i...should probably be using xs:ID?

This commit is contained in:
brent s 2019-10-31 18:32:56 -04:00
parent c4386d55d1
commit 1ea84cbac0
7 changed files with 160 additions and 35 deletions

97
aif.xsd
View File

@ -408,39 +408,102 @@
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:unique name="uniq_luks_name">
<xs:selector xpath="aif:luksDev"/>
<xs:field xpath="@name"/>
</xs:unique>
</xs:element>
<!-- END LUKS -->
<!-- BEGIN LVM -->
<xs:element name="lvm" minOccurs="0" maxOccurs="1">
<xs:complexType>
<xs:sequence minOccurs="1" maxOccurs="unbounded">
<xs:element name="lvmGroup" minOccurs="1" maxOccurs="unbounded">
<xs:all minOccurs="1" maxOccurs="1">
<xs:element name="physicals" minOccurs="1" maxOccurs="1">
<xs:complexType>
<xs:sequence minOccurs="1" maxOccurs="unbounded">
<xs:element name="lvmLogical" minOccurs="1" maxOccurs="unbounded">
<xs:element name="pv">
<xs:complexType>
<xs:attribute name="id" type="aif:t_nonempty"
use="required"/>
<xs:attribute name="source" type="aif:t_nonempty"
use="required"/>
<xs:attribute name="vg" type="aif:t_nonempty"
use="required"/>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:unique name="uniq_pv_id">
<xs:selector xpath="aif:pv"/>
<xs:field xpath="@id"/>
</xs:unique>
<xs:unique name="uniq_pv_src">
<xs:selector xpath="aif:pv"/>
<xs:field xpath="@source"/>
</xs:unique>
</xs:element>
<xs:element name="volumes" minOccurs="1" maxOccurs="1">
<xs:complexType>
<xs:sequence minOccurs="1" maxOccurs="unbounded">
<xs:element name="vg">
<xs:complexType>
<xs:all>
<xs:element name="tags" minOccurs="0" maxOccurs="1">
<xs:complexType>
<xs:sequence minOccurs="1"
maxOccurs="unbounded">
<xs:element name="tag"
minOccurs="1"
maxOccurs="unbounded"
type="aif:t_nonempty"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="logicals" minOccurs="1"
maxOccurs="1">
<xs:complexType>
<xs:sequence minOccurs="1"
maxOccurs="unbounded">
<xs:element name="lv" minOccurs="1"
maxOccurs="unbounded">
<xs:complexType>
<xs:attribute name="id"
type="aif:t_nonempty"
use="required"/>
<xs:attribute name="name"
type="aif:t_nonempty"
use="required"/>
<xs:attribute name="size"
type="aif:t_disksize"
use="required"/>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:all>
<xs:attribute name="id" type="aif:t_nonempty"
use="required"/>
<xs:attribute name="name" type="aif:t_nonempty"
use="required"/>
<xs:attribute name="source" type="aif:t_nonempty"
use="required"/>
</xs:complexType>
<xs:unique name="uniq_lv">
<xs:selector xpath="aif:lvmLogical"/>
<xs:field xpath="@name"/>
<xs:unique name="uniq_vg_tag">
<xs:selector xpath="aif:tags"/>
<xs:field xpath="tag"/>
</xs:unique>
</xs:element>
</xs:sequence>
<xs:attribute name="id" type="aif:t_nonempty" use="required"/>
<xs:attribute name="name" type="aif:t_nonempty" use="required"/>
</xs:complexType>
<xs:unique name="uniq_vg">
<xs:selector xpath="aif:lvmGroup"/>
<xs:unique name="uniq_vg_id">
<xs:selector xpath="aif:vg"/>
<xs:field xpath="@id"/>
</xs:unique>
<xs:unique name="uniq_vg_name">
<xs:selector xpath="aif:vg"/>
<xs:field xpath="@name"/>
</xs:unique>
</xs:element>
</xs:sequence>
</xs:all>
</xs:complexType>
</xs:element>
<!-- END LVM -->
@ -474,6 +537,14 @@
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:unique name="uniq_array_id">
<xs:selector xpath="array"/>
<xs:field xpath="@id"/>
</xs:unique>
<xs:unique name="uniq_array_name">
<xs:selector xpath="array"/>
<xs:field xpath="@name"/>
</xs:unique>
</xs:element>
<!-- END MDADM -->
<!-- BEGIN MOUNTPOINTS -->

View File

@ -20,6 +20,8 @@ import psutil
##
from aif.utils import xmlBool, size

# TODO: https://serverfault.com/questions/356534/ssd-erase-block-size-lvm-pv-on-raw-device-alignment


PARTED_FSTYPES = sorted(list(dict(vars(parted.filesystem))['fileSystemType'].keys()))
PARTED_FLAGS = sorted(list(parted.partition.partitionFlag.values()))

View File

@ -6,7 +6,7 @@ import psutil
##
from aif.disk.block import Partition
from aif.disk.luks import LUKS
from aif.disk.lvm import Group as LVMGroup
from aif.disk.lvm import LV as LVMVolume
from aif.disk.mdadm import Array as MDArray

# I wish there was a better way of doing this.
@ -44,11 +44,11 @@ for i in os.listdir(_mod_dir):
class FS(object):
def __init__(self, fs_xml, sourceobj):
self.xml = fs_xml
if not isinstance(sourceobj, (Partition, LUKS, LVMGroup, MDArray)):
if not isinstance(sourceobj, (Partition, LUKS, LVMVolume, MDArray)):
raise ValueError(('sourceobj must be of type '
'aif.disk.block.Partition, '
'aif.disk.luks.LUKS, '
'aif.disk.lvm.Group, or'
'aif.disk.lvm.LV, or'
'aif.disk.mdadm.Array'))
self.source = sourceobj
self.devpath = sourceobj.devpath

View File

@ -1,4 +1,8 @@
from aif.disk.block import Disk, Partition
from aif.disk.lvm import LV
from aif.disk.mdadm import Array

class LUKS(object):
def __init__(self):
def __init__(self, partobj):
self.devpath = None
pass

View File

@ -1,13 +1,29 @@
try:
import dbus
has_mod = True
except ImportError:
# This is ineffecient; the native dbus module is preferred.
# In Arch, this can be installed via the 'extra' repository package "python-dbus".
import subprocess
has_mod = False
##
from aif.disk.block import Disk, Partition
from aif.disk.luks import LUKS
from aif.disk.mdadm import Array


class PV(object):
def __init__(self, partobj):
self.devpath = None
pass

class LV(object):
def __init__(self, lv_xml, pv_objs):
pass

class Group(object):
class VG(object):
def __init__(self, vg_xml, lv_objs):
self.devpath = None
pass


class LV(object):
def __init__(self, lv_xml, pv_objs):
pass

View File

@ -7,8 +7,9 @@ import uuid
##
import mdstat
##
from aif.disk.block import Disk
from aif.disk.block import Partition
from aif.disk.block import Disk, Partition
from aif.disk.luks import LUKS
from aif.disk.lvm import LV


SUPPORTED_LEVELS = (0, 1, 4, 5, 6, 10)
@ -173,8 +174,10 @@ class Array(object):
self.devname = self.xml.attrib['name']
self.devpath = devpath
self.updateStatus()
self.homehost = homehost
self.members = []
self.state = None
self.info = None

def addMember(self, memberobj):
if not isinstance(memberobj, Member):
@ -183,20 +186,18 @@ class Array(object):
self.members.append(memberobj)
return()

def assemble(self, scan = False):
def start(self, scan = False):
if not any((self.members, self.devpath)):
raise RuntimeError('Cannot assemble an array with no members (for hints) or device path')

cmd = ['mdadm', '--assemble', self.devpath]
if not scan:
for m in self.members:
cmd.append(m.devpath)
else:
cmd.extend([''])
cmd.append('--scan')
# TODO: logging!
subprocess.run(cmd)

pass
self.state = 'assembled'
return()

def create(self):
@ -206,6 +207,7 @@ class Array(object):
'--level={0}'.format(self.level),
'--metadata={0}'.format(self.metadata),
'--chunk={0}'.format(self.chunksize),
'--homehost={0}'.format(self.homehost),
'--raid-devices={0}'.format(len(self.members))]
if self.layout:
cmd.append('--layout={0}'.format(self.layout))
@ -214,13 +216,14 @@ class Array(object):
cmd.append(m.devpath)
# TODO: logging!
subprocess.run(cmd)

pass
self.writeConf()
self.state = 'new'
return()

def stop(self):
# TODO: logging
subprocess.run(['mdadm', '--stop', self.devpath])
self.state = 'disassembled'
return()

def updateStatus(self):
@ -232,4 +235,22 @@ class Array(object):
return()

def writeConf(self, conf = '/etc/mdadm.conf'):
pass
with open(conf, 'r') as fh:
conflines = fh.read().splitlines()
# TODO: logging
arrayinfo = subprocess.run(['mdadm', '--detail', '--brief', self.devpath],
stdout = subprocess.PIPE).stdout.decode('utf-8').strip()
if arrayinfo not in conflines:
r = re.compile(r'^ARRAY\s+{0}'.format(self.devpath))
nodev = True
for l in conflines:
if r.search(l):
nodev = False
# TODO: logging?
# and/or Raise an exception here;
# an array already exists with that name but not with the same opts/GUID/etc.
break
if nodev:
with open(conf, 'a') as fh:
fh.write('{0}\n'.format(arrayinfo))
return()

View File

@ -55,9 +55,20 @@
</luksDev>
</luks>
<lvm>
<lvmGroup id="vg1" name="GroupName">
<lvmLogical id="lv1" name="LogicalName" source="lvm_member1"/>
</lvmGroup>
<physicals>
<!-- "size" refers to the size used of the LV for this VG. -->
<pv id="pv1" source="lvm_member1" vg="vg1"/>
</physicals>
<volumes>
<vg id="vg1" name="group1">
<tags>
<tag>data</tag>
</tags>
<logicals>
<lv id="lv1" name="logical1" size="100%"/>
</logicals>
</vg>
</volumes>
</lvm>
<mdadm>
<!-- level can be 0, 1, 4, 5, 6, or 10. RAID 1+0 (which is different from mdadm RAID10) would be done by
@ -91,7 +102,7 @@
</mount>
<mount source="boot" target="/mnt/aif/boot"/>
<mount source="swap" target="swap"/>
<mount source="vg1" target="/mnt/aif/mnt/pool"/>
<mount source="lv1" target="/mnt/aif/mnt/pool"/>
<mount source="mdadm1" target="/mnt/aif/mnt/raid"/>
</mountPoints>
</storage>