removed repoclone scripts (they've been reformed into the RepoMirror repository) and added libvirt redirection

master
brent s. 2 years ago
parent 743edf045b
commit 473833a58f
Signed by: bts
GPG Key ID: 8C004C2F93481F6B
  1. 197
      arch/repoclone.py
  2. 146
      centos/repoclone/centos.dflts.ini
  3. 339
      centos/repoclone/repoclone.py
  4. 9
      centos/repoclone/test.py
  5. 1
      libvirt/README
  6. 223
      libvirt/better_virsh.py

@ -1,197 +0,0 @@
#!/usr/bin/env python3

import argparse
import configparser
import copy
import datetime
import os
import pprint
import subprocess
import sys

# TODO: convert .ini to treat [section]s as repositories, with a [DEFAULT]
# section for URL etc.

cfgfile = os.path.join(os.environ['HOME'],
'.config',
'optools',
'repoclone',
'arch.ini')

# Rsync options
opts = [
'--recursive', # recurse into directories
'--times', # preserve modification times
'--links', # copy symlinks as symlinks
'--hard-links', # preserve hard links
'--quiet', # suppress non-error messages
'--delete-after', # receiver deletes after transfer, not during
'--delay-updates', # put all updated files into place at end
'--copy-links', # transform symlink into referent file/dir
'--safe-links', # ignore symlinks that point outside the tree
#'--max-delete', # don't delete more than NUM files
'--delete-excluded', # also delete excluded files from dest dirs
'--exclude=.*' # exclude files matching PATTERN
]

def sync(args):
# TODO: this should be a class, probably, instead as there's a lot of shared data across what should be multiple
# functions.
with open(os.devnull, 'w') as devnull:
mntchk = subprocess.run(['findmnt', args['mount']], stdout = devnull, stderr = devnull)
if mntchk.returncode != 0:
exit('!! BAILING OUT; {0} isn\'t mounted !!'.format(args['mount']))
if args['bwlimit'] >= 1:
opts.insert(10, '--bwlimit=' + str(args['bwlimit'])) # limit socket I/O bandwidth
for k in ('destination', 'logfile', 'lockfile'):
os.makedirs(os.path.dirname(args[k]), exist_ok = True)
paths = os.environ['PATH'].split(':')
rsync = '/usr/bin/rsync' # set the default
for p in paths:
testpath = os.path.join(p, 'rsync')
if os.path.isfile(testpath):
rsync = testpath # in case rsync isn't in /usr/bin/rsync
break
cmd = [rsync] # the path to the binary
cmd.extend(opts) # the arguments
# TODO: implement repos here?
# end TODO
# The https://git.server-speed.net/users/flo/bin/tree/syncrepo.sh script uses http(s). to check for lastupdate.
# I don't, because not all mirrors *have* http(s).
check_cmd = copy.deepcopy(cmd)
check_cmd.append(os.path.join(args['mirror'], 'lastupdate'))
check_cmd.append(os.path.join(args['destination'], 'lastupdate'))
update_cmd = copy.deepcopy(cmd)
update_cmd.append(os.path.join(args['mirror'], 'lastsync'))
update_cmd.append(os.path.join(args['destination'], 'lastsync'))
cmd.append(os.path.join(args['mirror'], '.')) # the path on the remote mirror (full sync)
cmd.append(os.path.join(args['destination'], '.')) # the local destination (full sync)
if os.path.isfile(args['lockfile']):
with open(args['lockfile'], 'r') as f:
existingpid = f.read().strip()
if os.isatty(sys.stdin.fileno()):
# Running from shell
exit('!! A repo synchronization seems to already be running (PID: {0}). Quitting. !!'.format(existingpid))
else:
exit() # we're running in cron, shut the hell up.
else:
with open(args['lockfile'], 'w') as f:
f.write(str(os.getpid()))
# determine if we need to do a full sync.
# TODO: clean this up. there's a lot of code duplication here, and it should really be a function.
with open(os.path.join(args['destination'], 'lastupdate'), 'r') as f:
oldupdate = datetime.datetime.utcfromtimestamp(int(f.read().strip()))
with open(os.devnull, 'wb') as devnull:
# TODO: when i clean this up, change this to do error detection
c = subprocess.run(check_cmd, stdout = devnull, stderr = devnull)
c2 = subprocess.run(update_cmd, stdout = devnull, stderr = devnull)
with open(os.path.join(args['destination'], 'lastupdate'), 'r') as f:
newupdate = datetime.datetime.utcfromtimestamp(int(f.read().strip()))
if newupdate > oldupdate:
with open(args['logfile'], 'a') as log:
c = subprocess.run(cmd, stdout = log, stderr = subprocess.PIPE)
now = int(datetime.datetime.timestamp(datetime.datetime.utcnow()))
with open(os.path.join(args['destination'], 'lastsync'), 'w') as f:
f.write(str(now) + '\n')
else:
# No-op. Stderr should be empty.
c = subprocess.run(['echo'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
now = int(datetime.datetime.timestamp(datetime.datetime.utcnow()))
with open(args['lastcheck'], 'w') as f:
f.write(str(now) + '\n')
os.remove(args['lockfile'])
# Only report errors at the end of the run if we aren't running in cron. Otherwise, log them.
errors = c.stderr.decode('utf-8').splitlines()
if os.isatty(sys.stdin.fileno()) and errors:
print('We encountered some errors:')
for e in errors:
if e.startswith('symlink has no referent: '):
print('Broken upstream symlink: {0}'.format(e.split()[1].replace('"', '')))
else:
print(e)
elif errors:
with open(args['logfile'], 'a') as f:
for e in errors:
f.write('{0}\n'.format(e))
return()

def getDefaults():
# Hardcoded defaults
dflt = {'mirror': 'rsync://mirror.square-r00t.net/arch/',
'repos': 'core,extra,community,multilib,iso/latest',
'destination': '/srv/repos/arch',
'lastcheck': '/srv/http/arch.lastcheck',
'mount': '/',
'bwlimit': 0,
'lockfile': '/var/run/repo-sync_arch.lck',
'logfile': '/var/log/repo/arch.log'}
realcfg = configparser.ConfigParser(defaults = dflt)
if not os.path.isfile(cfgfile):
with open(cfgfile, 'w') as f:
realcfg.write(f)
realcfg.read(cfgfile)
return(realcfg)

def parseArgs():
cfg = getDefaults()
liveopts = cfg['DEFAULT']
args = argparse.ArgumentParser(description = 'Synchronization for a remote Arch repository to a local one.',
epilog = ('This program will write a default configuration file to {0} ' +
'if one is not found.').format(cfgfile))
args.add_argument('-m',
'--mirror',
dest = 'mirror',
default = liveopts['mirror'],
help = ('The upstream mirror to sync from, must be an rsync URI '+
'(Default: {0}').format(liveopts['mirror']))
# TODO: can we do this?
# We can; we need to .format() a repo in, probably, on the src and dest.
# Problem is the last updated/last synced files.
# args.add_argument('-r',
# '--repos',
# dest = 'repos',
# default = liveopts['repos'],
# help = ('The repositories to sync; must be a comma-separated list. ' +
# '(Currently not used.) Default: {0}').format(','.join(liveopts['repos'])))
args.add_argument('-d',
'--destination',
dest = 'destination',
default = liveopts['destination'],
help = 'The destination directory to sync to. Default: {0}'.format(liveopts['destination']))
args.add_argument('-c', '--last-check',
dest = 'lastcheck',
default = liveopts['lastcheck'],
help = ('The file to update with a timestamp on every run. Per spec, this must be outside the '
'repository webroot'))
args.add_argument('-b',
'--bwlimit',
dest = 'bwlimit',
default = liveopts['bwlimit'],
type = int,
help = ('The amount, in Kilobytes per second, to throttle the sync to. Default is to not '
'throttle (0).'))
args.add_argument('-l',
'--log',
dest = 'logfile',
default = liveopts['logfile'],
help = 'The path to the logfile. Default: {0}'.format(liveopts['logfile']))
args.add_argument('-L',
'--lock',
dest = 'lockfile',
default = liveopts['lockfile'],
help = 'The path to the lockfile. Default: {0}'.format(liveopts['lockfile']))
args.add_argument('-M',
'--mount',
dest = 'mount',
default = liveopts['mount'],
help = 'The mountpoint for your --destination. The script will exit if this point is not mounted. ' +
'If you don\'t need mount checking, just use /. Default: {0}'.format(liveopts['mount']))
return(args)

def main():
args = vars(parseArgs().parse_args())
sync(args)
return()

if __name__ == '__main__':
main()

@ -1,146 +0,0 @@
# This is an example ~/.config/optools/repoclone/centos.ini.
# You may need to change some options, but they are all commented so you know
# what to replace.
###############################################################################
# SPECIAL VALUES #
# You may recognize some values as used in yum's repo files
# (i.e. /etc/yum.repos.d/*.repo). THEY ARE NOT THE SAME. You CANNOT and SHOULD
# NOT simply copy-and-paste them in here, as they are constructed/used
# differently.
# That said, the following are special values/variables that are generated
# automatically (*case-sensitive*):
#
# {name}: The name of the repo (i.e. what appears in [brackets]).
# It is best that you leave this set in [DEFAULT] the way it
# is.
#
# {cur_arch}: The current hardware architecture of the host running the
# script e.g. x86_64 or i686.
#
# {rel_ver}: The release version. This will substitute for each version
# in the repository's "releases" directive. For example:
#
# destination = /srv/repos/centos/{rel_ver}/os/x86_64
# releases = 6,7
# baseuri = mirrors.centos.org/{rel_ver}/os/x86_64
#
# would clone from the following to the following:
#
# mirrors.centos.org/6/os/x86_64 =>
# /srv/repos/centos/6/os/x86_64
#
# mirrors.centos.org/7/os/x86_64 =>
# /srv/repos/centos/7/os/x86_64
#
# {arch}: Similar to {rel_ver} in that it iterates, but for each item
# in the "arches" directive.
#
# {cur_ver}: *IF* (and *only* if) you are running this script on CentOS
# itself, this will be a "meta-variable" containing the full
# version, major version, minor version, and revision.
# i.e. For CentOS 7.4.1708, {cur_ver} or {cur_ver.full} will
# both replace as "7.4.1708". {cur_ver.maj} will replace as
# "7", {cur.min} will replace as "4", and {cur.rev} will
# replace as "1708". If you use this mechanism and are NOT
# running the script on CentOS, an error will occur.
#
# You can also refer to directives themselves -- see below for an example of
# this.
###############################################################################
# The default section provides... well, defaults. All of these values can be
# overridden in each repository defined. If any of these are not specified,
# either in DEFAULT or in the repo section itself, then an error will occur.
[DEFAULT]

# The name of the repository. By default, this (repo_name) is the name of the
# section.
# {name} will ALWAYS be the section name and cannot be changed.
repo_name = {name}

# Whether we should sync this repository or not.
# To enable a repository, set this to one of: 1, yes, on, true
# To disable a repository, set this to one of: 0, no, off, false
enabled = 0

# The list of architectures to clone, separated by commas (if you have more
# than one). This is iterated over.
arches = i686,{arch}

# The full path to the "base" of the repository where we'll be rsyncing from.
# As shown, if an "$" is in front of curly brackets, you can use another
# directive in the same section. To specify a directive from another section,
# you would use ${section:directive} (e.g. ${base:repo_name})
# Note that this has the potential to generate multiple iterations.
# isomirrors_sort.py should offer package repository mirrors as well, so that
# may be handy to find a fast mirror.
# Note that the default probably will not work for you since you need to be
# whitelisted to use it.
baseuri = mirror.centos.org/centos/{rel_ver}/${repo_name}/{arch}

# Where the clone should go. If you are using iterables, make sure you use them
# here, too, otherwise you'll most likely overwrite parts and end up with a
# totally broken repository!
# The parent directories will be created if necessary (assuming we have proper
# permissions).
destination = ${mount}/centos/{rel_ver}/${repo_name}/{arch}

# Perform a check before we start to make sure this mountpoint has a device
# mounted at it. If you don't store your repository mirrors at a different
# mountpoint, just set this to "/" (without quotes).
mount = /mnt/repos

# If set, throttle the transfer speeds down to this number of Kilobytes per
# second (KB/s, *not* kbps!).
# If it's set to 0, don't perform any throttling.
bwlimit = 0

# The lockfile for the repository. If this file is present, the clone will
# abort to avoid rsync/file differentiation conflicts. The parent directories
# will be created if necessary (assuming we have proper permissions).
lockfile = /var/run/repo-sync_{name}.lck

# The logfile. This setting is actually for all repositories; changing it below
# will have no effect as all entries use the same log file. The parent
# directories will be created if necessary (assuming we have proper
# permissions).
logfile = /var/log/repo/centos.log

# The releases to clone for. Note that we follow symlinks, so you should *NOT*
# include e.g. both 7 and 7.4.1708. This is also an iterable in the form of a
# comma-separated list (if you have more than one).
releases = 6,{cur_ver.maj}

# A comma-separated list of paths/patterns on the rsync server to exclude (if
# you have more than one). Leave empty for no excludes (this should be fine if
# your "baseuri"s are explicit enough).
excludes =

# This would be equivalent to cloning the [base] repository found in
# /etc/yum.repos.d/CentOS-Base.repo
[base]
repo_name = os
enabled = 1

# Likewise with [base], but with [updates] instead, etc.
[updates]
enabled = 1

[extras]
enabled = 1

[centosplus]
enabled = 1

# /etc/yum.repos.d/epel.repo (assuming you installed the epel-release package)
# Just like CentOS mirrors, you probably need to change this since they run a
# whitelist.
[epel]
enabled = 1
baseuri = dl.fedoraproject.org::fedora-{name}0/{rel_ver}/{arch}
destination = ${mount}/centos/{name}/{rel_ver}/{arch}

# It even works with non-RedHat-supplied repositories, too! As long as they
# offer rsync access. I *highly* recommend you check IUS out: https://ius.io/
[ius]
enabled = 0
baseuri = dl.iuscommunity.org/{name}/stable/CentOS/{rel_ver}/{arch}

@ -1,339 +0,0 @@
#!/usr/bin/env python3

import configparser
import copy
import datetime
import importlib.util
import os
import platform
import re
import socket
import subprocess
import sys

cfgfile = os.path.join(os.environ['HOME'],
'.config',
'optools',
'repoclone',
'centos.ini')

# Set up the logger.
_selfpath = os.path.abspath(os.path.realpath(os.path.expanduser(__file__)))
_logmodpath = os.path.abspath(os.path.join(_selfpath,
'..', '..', '..',
'lib',
'python',
'logger.py'))
log_spec = importlib.util.spec_from_file_location('logger', _logmodpath)
logger = importlib.util.module_from_spec(log_spec)
log_spec.loader.exec_module(logger)

_loglevel = 'warning'
#_loglevel = 'debug'

class cur_ver(object):
def __init__(self):
# TODO: .dist() is deprecated, as is linux_distribution.
# switch to distro? https://pypi.org/project/distro
_distname = platform.dist()[0]
if not re.search('^CentOS( Linux)?$', _distname, re.IGNORECASE):
raise ValueError(('You have specified "{cur_ver}" in your ' +
'config, but you are not running this script ' +
'on CentOS!'))
_ver = platform.dist()[1].split('.')
self.full = '.'.join(_ver)
self.maj = int(_ver[0])
self.min = int(_ver[1])
self.rev = _ver[2]

def __str__(self):
return(self.full)

# Rsync options
opts = [
'--recursive', # recurse into directories
'--times', # preserve modification times
'--links', # copy symlinks as symlinks
'--hard-links', # preserve hard links
'--quiet', # suppress non-error messages
'--delete-after', # receiver deletes after transfer, not during
'--delay-updates', # put all updated files into place at end
'--copy-links', # transform symlink into referent file/dir
'--safe-links', # ignore symlinks that point outside the tree
#'--max-delete', # don't delete more than NUM files
'--delete-excluded', # also delete excluded files from dest dirs
]

dflts = {'DEFAULT': {'repo_name': '{name}',
'enabled': False,
'arches': ['i686', 'x86_64'],
'baseuri': ('mirror.centos.org/centos/{rel_ver}/' +
'${repo_name}/{arch}'),
'destination': ('${mount}/centos/{rel_ver}/' +
'${repo_name}/{arch}'),
'mount': '/mnt/repos',
'bwlimit': 0,
'lockfile': '/var/run/repo-sync_{name}.lck',
'logfile': '/var/log/repo/centos.log',
'releases': [6, 7],
'excludes': None},
'base': {'repo_name': 'os',
'enabled': True},
'updates': {'enabled': True},
'extras': {'enabled': True},
'centosplus': {'enabled': True},
'epel': {'enabled': True,
'baseuri': ('dl.fedoraproject.org::fedora-{name}0/' +
'{rel_ver}/{arch}'),
'destination': '${mount}/centos/{name}/{rel_ver}/{arch}'},
'ius': {'enabled': False,
'baseuri': ('dl.iuscommunity.org/{name}/stable/CentOS/' +
'{rel_ver}/{arch}')}}

class MirrorMgr(object):
def __init__(self):
self.cfg = configparser.ConfigParser(
interpolation = configparser.ExtendedInterpolation(),
defaults = dflts['DEFAULT'],
allow_no_value = True)
self.strvars = {'cur_ver': None,
'name': None,
'arches': [],
'releases': [],
'cur_arch': platform.machine(),
'rel_ver': None,
'arch': None}
if not os.path.isfile(cfgfile):
self.gen_cfg()
self.get_cfg()
self.chk_cur_ver()
self.parse_cfg()
self.log = logger.log(logfile = os.path.abspath(os.path.expanduser(
self.cfg['DEFAULT']['logfile'])),
logname = 'optools.repoclone.centos',
loglvl = _loglevel)

def get_cfg(self):
with open(cfgfile, 'r') as f:
self.cfg_in = f.read()
return()

def chk_cur_ver(self):
for line in self.cfg_in.splitlines():
_line = line
# Strip out inline comments -- this is disabled by default(?).
#_line = re.sub('\s*(#|;).*$', '', line)
# Skip empty lines/comments.
if re.search('^\s*((#|;).*)?$', line):
continue
# Check to see if cur_ver is referenced.
if re.search('^.*{cur_ver}.*$', _line):
self.strvars['cur_ver'] = cur_ver()
break
return()

def gen_cfg(self):
cfg = configparser.ConfigParser(
interpolation = configparser.ExtendedInterpolation(),
defaults = dflts['DEFAULT'],
allow_no_value = True)
for i in dflts.keys():
if i != 'DEFAULT':
cfg[i] = copy.deepcopy(dflts[i])
with open(cfgfile, 'w') as f:
cfg.write(f)
# And add the comment about how it's a stripped down default conf.
with open(cfgfile, 'r+') as f:
cfgdata = f.read()
f.seek(0, 0)
cmnt = ('# This is an autogenerated configuration file for ' +
'r00t^s\'s OpTools CentOS\n# mirror script.\n# You ' +
'should reference the fully commented version ' +
'distributed with the script,\n# "centos.dflts.ini".\n\n')
f.write(cmnt + cfgdata)
print(('A configuration file has been automatically generated for ' +
'you at {0}. You should review and customize it, because it ' +
'most likely will not work out of the box.').format(cfgfile))
exit('Exiting to give you the chance to customize it...')
return()

def parse_cfg(self):
self.cfg.read_string(self.cfg_in)
return()

def sync(self):
for repo in self.cfg.sections():
# Skip disabled repos.
if not self.cfg.getboolean(repo, 'enabled'):
continue
self.repo = copy.deepcopy(dict(self.cfg[repo]))
self.strvars['name'] = repo
# This should be safe since the only thing that makes sense here is
# {cur_arch}, which we populate in __init__().
self.strvars['arches'] = [i.strip() for i in \
self.repo['arches'].format(
**self.strvars).split(',')]
self.strvars['releases'] = [i.strip() for i in \
self.repo['releases'].format(
**self.strvars).split(',')]
for arch in self.strvars['arches']:
for rel_ver in self.strvars['releases']:
self._clear_tpl(repo, arch, rel_ver)
self._repo_chk(repo)
self._repo_sync(repo)
return()

def _clear_tpl(self, repo, arch, rel_ver):
self.repo = copy.deepcopy(dict(self.cfg[repo]))
self.strvars['name'] = repo
# This should be safe since the only thing that makes sense here is
# {cur_arch}, which we populate in __init__().
self.strvars['arches'] = [i.strip() for i in \
self.repo['arches'].format(
**self.strvars).split(',')]
self.strvars['releases'] = [i.strip() for i in \
self.repo['releases'].format(
**self.strvars).split(',')]
self.strvars['arch'] = arch
self.strvars['rel_ver'] = rel_ver
self.strvars['name'] = repo
self._repo_chk(repo)
return()

def _repo_sync(self, repo):
# Reset the Rsync options
self.opts = opts
self.repo['bwlimit'] = float(self.repo['bwlimit'])
if self.repo['bwlimit'] > 0.0:
# limit socket I/O bandwidth
self.opts.append('--bwlimit=' + str(self.repo['bwlimit']))
paths = os.environ['PATH'].split(':')
cmd = ['rsync'] # Set up a cmd list for subprocess
cmd.extend(opts) # The arguments for rsync
# The path on the remote mirror
_path = os.path.join('rsync://{0}'.format(self.repo['baseuri']), '.')
cmd.append(_path)
# The local destination
cmd.append(os.path.join(self.repo['destination'], '.'))
if os.path.isfile(self.repo['lockfile']):
with open(self.repo['lockfile'], 'r') as f:
existingpid = f.read().strip()
if os.isatty(sys.stdin.fileno()):
# Running from shell
exit(('!! A repo synchronization seems to already be ' +
'running (PID: {0}). Quitting. !!').format(existingpid))
else:
exit() # We're running in cron, shut the hell up.
else:
with open(self.repo['lockfile'], 'w') as f:
f.write(str(os.getpid()))
with open(self.repo['logfile'], 'a') as log:
c = subprocess.run(cmd, stdout = log, stderr = subprocess.PIPE)
now = int(datetime.datetime.utcnow().timestamp())
with open(os.path.join(self.repo['destination'],
'lastsync'), 'w') as f:
f.write(str(now) + '\n')
os.remove(self.repo['lockfile'])
# Only report errors at the end of the run if we aren't running in
# cron. Otherwise, log them.
errors = c.stderr.decode('utf-8').splitlines()
# CentOS 7 main doesn't have an i686.
if self.strvars['rel_ver'] == 7:
for e in errors[:]:
rgx = re.compile(('^rsync: change_dir.*/i[36]86(/|").*' +
'failed:\s*No\s+such\s+file\s+or\s+' +
'directory.*$'))
if rgx.search(e):
errors.remove(e)
for e in errors[:]:
if e.startswith(('rsync error: some files/attrs were not ' +
'transferred (see previous errors)')):
errors.remove(e)
if os.isatty(sys.stdin.fileno()) and errors:
print('[{0}] We encountered some errors:'.format(repo))
for e in errors:
if e.startswith('symlink has no referent: '):
print(('Broken upstream symlink: ' +
'{0}').format(e.split()[1].replace('"', '')))
else:
print(e)
else:
for e in errors:
log.write('{0}\n'.format(e))
return()

def _repo_chk(self, repo):
def chkmnt():
self.repo['mount'] = os.path.abspath(
os.path.expanduser(
self.repo['mount'].format(
**self.strvars)))
with open(os.devnull, 'w') as devnull:
mntchk = subprocess.run(['findmnt',
self.repo['mount']],
stdout = devnull,
stderr = devnull)
if mntchk.returncode != 0:
raise RuntimeError(('!! BAILING OUT; {0} isn\'t ' +
'mounted !!').format(self.repo['mount']))
return()
def chkrsync():
_port = 873
_open = False
self.repo['baseuri'] = re.sub('^\s*rsync://',
'',
self.repo['baseuri'].format(
**self.strvars),
re.IGNORECASE)
_raw_srv = self.repo['baseuri'].split('/')[0]
_split_srv = re.sub('::.*$', '', _raw_srv).split(':')
if len(_split_srv) >= 2:
_port = _split_srv[1]
for proto in (socket.AF_INET, socket.AF_INET6):
s = socket.socket(proto, socket.SOCK_STREAM)
chk = s.connect_ex((_split_srv[0], _port))
if chk == 0:
_open = True
break
if os.isatty(sys.stdin.fileno()):
if not _open:
raise RuntimeError(('Rsync on host {0}:{1} is not ' +
'accessible!').format(_split_srv[0],
_port))
else:
exit()
return()
def chkdest():
_dest = os.path.abspath(
os.path.expanduser(
self.cfg[repo]['destination'].format(
**self.strvars)))
self.repo['destination'] = _dest
os.makedirs(self.repo['destination'], exist_ok = True)
return()
def chkdest_files():
for f in ('logfile', 'lockfile'):
_dest = os.path.abspath(
os.path.expanduser(
self.repo[f].format(**self.strvars)))
self.repo[f] = _dest
os.makedirs(os.path.dirname(self.repo[f]), exist_ok = True)
return()
def chkmisc():
# Odds and ends.
pass
return()
# The Business-End(TM)
chkmnt()
chkrsync()
chkdest()
chkdest_files()
chkmisc()
return()

def main():
m = MirrorMgr()
m.sync()

if __name__ == '__main__':
main()

@ -1,9 +0,0 @@
#!/usr/bin/env python3

import os

selfpath = os.path.abspath(os.path.expanduser(__file__))
print(selfpath)

logmodpath = os.path.abspath(os.path.join(selfpath, '..', '..', '..', 'lib', 'python'))
print(logmodpath)

@ -0,0 +1 @@
These projects/scripts have been moved to https://git.square-r00t.net/LibvirtTools/.

@ -1,223 +0,0 @@
#!/usr/bin/env python3

import argparse
# import os
# import getpass
import re
##
import libvirt
# from lxml import etree

# NOTE: docs URLS are super long. Extrapolate using following:
# docsurl = 'https://libvirt.org/docs/libvirt-appdev-guide-python/en-US/html'

# TODO: flesh this out. only supports guests atm
# TODO: use openAuth?
# {docsurl}/libvirt_application_development_guide_using_python-Connections.html#idp13928160

# I would like to take the moment to point out that I did in three hours with exactly NO prior knowledge of the libvirt
# API what Red Hat couldn't do in four YEARS. https://bugzilla.redhat.com/show_bug.cgi?id=1244093


def libvirt_callback(userdata, err):
# fucking worst design decision.
# https://stackoverflow.com/a/45543887/733214
pass


# fucking worst design decision.
# https://stackoverflow.com/a/45543887/733214
libvirt.registerErrorHandler(f = libvirt_callback, ctx = None)


class LV(object):
def __init__(self, uri, *args, **kwargs):
self.uri = uri
self.conn = None
self._args = args
self._kwargs = kwargs

def _getTargets(self, target, regex = False, ttype = 'guest',
state = None, nocase = False, *args, **kwargs):
targets = []
# TODO: ..._RUNNING as well? can add multiple flags
state_flags = {'guest': (libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE,
libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE),
'net': (libvirt.VIR_CONNECT_LIST_NETWORKS_ACTIVE,
libvirt.VIR_CONNECT_LIST_NETWORKS_INACTIVE),
'storage': (libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE,
libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_INACTIVE)}
re_flags = re.UNICODE # The default
if nocase:
re_flags += re.IGNORECASE
if not self.conn:
self.startConn()
search_funcs = {'guest': self.conn.listAllDomains,
'net': self.conn.listAllNetworks,
'storage': self.conn.listAllStoragePools}
if not regex:
ptrn = r'^{0}$'.format(target)
else:
ptrn = target
ptrn = re.compile(ptrn, re_flags)
if state == 'active':
flag = state_flags[ttype][0]
elif state == 'inactive':
flag = state_flags[ttype][1]
else:
flag = 0
for t in search_funcs[ttype](flag):
if ptrn.search(t.name()):
targets.append(t)
targets.sort(key = lambda i: i.name())
return(targets)

def list(self, target, verbose = False, *args, **kwargs):
# {docsurl}/libvirt_application_development_guide_using_python-Guest_Domains-Information-Info.html
if not self.conn:
self.startConn()
targets = self._getTargets(target, **kwargs)
results = []
# Each attr is a tuple; the name of the attribute and the key name the result should use (if defined)
attr_map = {'str': (('name', None),
('OSType', 'os'),
('UUIDString', 'uuid'),
('hostname', None)),
'bool': (('autostart', None),
('hasCurrentSnapshot', 'current_snapshot'),
('hasManagedSaveImage', 'managed_save_image'),
('isActive', 'active'),
('isPersistent', 'persistent'),
('isUpdated', 'updated')),
'int': (('ID', 'id'),
('maxMemory', 'max_memory_KiB'),
('maxVcpus', 'max_vCPUs'))}
for t in targets:
if not verbose:
results.append(t.name())
else:
r = {}
for attrname, newkey in attr_map['str']:
keyname = (newkey if newkey else attrname)
try:
r[keyname] = str(getattr(t, attrname)())
except libvirt.libvirtError:
r[keyname] = '(N/A)'
for attrname, newkey in attr_map['bool']:
keyname = (newkey if newkey else attrname)
try:
r[keyname] = bool(getattr(t, attrname)())
except (libvirt.libvirtError, ValueError):
r[keyname] = None
for attrname, newkey in attr_map['int']:
keyname = (newkey if newkey else attrname)
try:
r[keyname] = int(getattr(t, attrname)())
if r[keyname] == -1:
r[keyname] = None
except (libvirt.libvirtError, ValueError):
r[keyname] = None
results.append(r)
return(results)

def restart(self, target, *args, **kwargs):
self.stop(target, state = 'active', **kwargs)
self.start(target, state = 'inactive', **kwargs)
return()

def start(self, target, **kwargs):
if not self.conn:
self.startConn()
targets = self._getTargets(target, state = 'inactive', **kwargs)
for t in targets:
t.create()
return()

def stop(self, target, force = False, *args, **kwargs):
if not self.conn:
self.startConn()
targets = self._getTargets(target, state = 'active', **kwargs)
for t in targets:
if not force:
t.shutdown()
else:
t.destroy()
return ()

def startConn(self):
self.conn = libvirt.open(self.uri)
return()

def stopConn(self):
if self.conn:
self.conn.close()
self.conn = None
return()


def parseArgs():
args = argparse.ArgumentParser(description = 'Some better handling of libvirt guests')
common_args = argparse.ArgumentParser(add_help = False)
common_args.add_argument('-u', '--uri',
dest = 'uri',
default = 'qemu:///system',
help = 'The URI for the libvirt to connect to. Default: qemu:///system')
common_args.add_argument('-r', '--regex',
action = 'store_true',
help = 'If specified, use a regex pattern for TARGET instead of exact match')
common_args.add_argument('-i', '--case-insensitive',
action = 'store_true',
dest = 'nocase',
help = 'If specified, match the target name/regex pattern case-insensitive')
common_args.add_argument('-T', '--target-type',
# choices = ['guest', 'net', 'storage'],
choices = ['guest'],
default = 'guest',
dest = 'ttype',
help = 'The type of TARGET')
common_args.add_argument('-t', '--target',
dest = 'target',
metavar = 'TARGET',
default = '.*',
help = ('The guest, network, etc. to manage. '
'If not specified, operate on all (respecting other filtering)'))
subparsers = args.add_subparsers(help = 'Operation to perform',
dest = 'oper',
metavar = 'OPERATION',
required = True)
start_args = subparsers.add_parser('start', help = 'Start the target(s)', parents = [common_args])
restart_args = subparsers.add_parser('restart', help = 'Restart the target(s)', parents = [common_args])
stop_args = subparsers.add_parser('stop', help = 'Stop ("destroy") the target(s)', parents = [common_args])
stop_args.add_argument('-f', '--force',
dest = 'force',
action = 'store_true',
help = 'Hard poweroff instead of send a shutdown/ACPI powerdown signal')
list_args = subparsers.add_parser('list', help = 'List the target(s)', parents = [common_args])
list_args.add_argument('-v', '--verbose',
dest = 'verbose',
action = 'store_true',
help = 'Display more output')
list_args.add_argument('-s', '--state',
dest = 'state',
choices = ['active', 'inactive'],
default = None,
help = 'Filter results by state. Default is all states')
return(args)


def main():
args = parseArgs().parse_args()
varargs = vars(args)
lv = LV(**varargs)
f = getattr(lv, args.oper)(**varargs)
if args.oper == 'list':
if args.verbose:
import json
print(json.dumps(f, indent = 4, sort_keys = True))
else:
print('\n'.join(f))
return()


if __name__ == '__main__':
main()
Loading…
Cancel
Save