builds successfully. however, still working through some bugs with iPXE.

This commit is contained in:
brent s. 2016-12-13 23:43:53 -05:00
parent 3c46d85683
commit b4a5e40b8f
25 changed files with 228 additions and 388 deletions

2
.gitignore vendored
View File

@ -1,6 +1,8 @@
# We don't want local build settings in case someone's using # We don't want local build settings in case someone's using
# the git dir as a place to store their build.ini # the git dir as a place to store their build.ini
/build.ini /build.ini
/dist.build.ini
*.bak


# These are user-controlled. # These are user-controlled.
#/overlay/* #/overlay/*

View File

@ -105,6 +105,8 @@ def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
os.fchdir(real_root) os.fchdir(real_root)
os.chroot('.') os.chroot('.')
os.close(real_root) os.close(real_root)
if not os.path.isfile('{0}/sbin/init'.format(chrootdir)):
os.symlink('../lib/systemd/systemd', '{0}/sbin/init'.format(chrootdir))
return(chrootdir) return(chrootdir)


def chrootUnmount(chrootdir): def chrootUnmount(chrootdir):
@ -123,8 +125,8 @@ def chrootTrim(build):
if os.path.isdir(dbdir): if os.path.isdir(dbdir):
print("{0}: [CHROOT] Compressing {1}'s cache ({2})...".format( print("{0}: [CHROOT] Compressing {1}'s cache ({2})...".format(
datetime.datetime.now(), datetime.datetime.now(),
chrootdir, chrootdir + '/root.' + a,
a)) i))
if os.path.isfile(tarball): if os.path.isfile(tarball):
os.remove(tarball) os.remove(tarball)
with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this complains, use x:xz instead with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this complains, use x:xz instead
@ -136,6 +138,9 @@ def chrootTrim(build):
humanize.naturalsize( humanize.naturalsize(
os.path.getsize(tarball)), os.path.getsize(tarball)),
dbdir)) dbdir))
for d in ('etc/pacman.d/gnupg', 'var/empty/.gnupg'):
if os.path.isdir('{0}/root.{1}/{2}'.format(chrootdir, a, d)):
shutil.rmtree('{0}/root.{1}/{2}'.format(chrootdir, a, d))
# TODO: move the self-cleanup in pre-build.sh to here. # TODO: move the self-cleanup in pre-build.sh to here.
delme = ['/root/.gnupg', delme = ['/root/.gnupg',
'/root/.bash_history', '/root/.bash_history',

View File

@ -24,13 +24,19 @@ if __name__ == '__main__':
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a) bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
prep.postChroot(conf['build']) prep.postChroot(conf['build'])
bchroot.chrootTrim(conf['build']) bchroot.chrootTrim(conf['build'])
build.genImg(conf['build'], conf['bdisk']) build.genImg(conf)
build.genUEFI(conf['build'], conf['bdisk']) build.genUEFI(conf['build'], conf['bdisk'])
fulliso = build.genISO(conf) fulliso = build.genISO(conf)
build.signIMG(fulliso['Main']['file'], conf)
build.displayStats(fulliso) build.displayStats(fulliso)
if conf['build']['ipxe']: if conf['build']['ipxe']:
bSSL.sslPKI(conf) bSSL.sslPKI(conf)
iso = ipxe.buildIPXE(conf) iso = ipxe.buildIPXE(conf)
if iso:
for x in iso.keys():
if x != 'name':
path = iso[x]['file']
build.signIMG(path, conf)
build.displayStats(iso) build.displayStats(iso)
bsync.http(conf) bsync.http(conf)
bsync.tftp(conf) bsync.tftp(conf)

View File

@ -138,7 +138,7 @@ def rsync(conf):
'-z', '-z',
locpath, locpath,
'{0}@{1}:{2}/.'.format(user, server, path)] '{0}@{1}:{2}/.'.format(user, server, path)]
#if sync['http']: #if sync['http']: # TODO: rsync:http to enable this
# cmd[4] = conf['http']['path'] # cmd[4] = conf['http']['path']
# print('{0}: Syncing {1} to {2}. Please wait...'.format( # print('{0}: Syncing {1} to {2}. Please wait...'.format(
# datetime.datetime.now(), # datetime.datetime.now(),
@ -161,6 +161,7 @@ def rsync(conf):
subprocess.call(cmd) subprocess.call(cmd)
cmd[4] = '{0}/boot'.format(build['tempdir']) cmd[4] = '{0}/boot'.format(build['tempdir'])
subprocess.call(cmd) subprocess.call(cmd)
if conf['rsync']['iso']:
cmd[4] = isodir cmd[4] = isodir
print('{0}: [RSYNC] {1} => {2}...'.format( print('{0}: [RSYNC] {1} => {2}...'.format(
datetime.datetime.now(), datetime.datetime.now(),
@ -174,15 +175,8 @@ def rsync(conf):
'{0}/root/iso.pkgs.both'.format(prebuild_dir)] '{0}/root/iso.pkgs.both'.format(prebuild_dir)]
for x in rsync_files: for x in rsync_files:
cmd[4] = x cmd[4] = x
print('{0}: [RSYNC] {1} => {2}...'.format(
datetime.datetime.now(),
cmd[4],
server))
subprocess.call(cmd) subprocess.call(cmd)
# And we grab the remaining, since we need to rename them. # And we grab the remaining, since we need to rename them.
print('{0}: [RSYNC] (Informational files) => {1}...'.format(
datetime.datetime.now(),
server))
for a in arch: for a in arch:
cmd[4] = '{0}/{1}/root/packages.arch'.format(prebuild_dir, a) cmd[4] = '{0}/{1}/root/packages.arch'.format(prebuild_dir, a)
cmd[5] = '{0}@{1}:{2}/packages.{3}'.format(user, server, path, a) cmd[5] = '{0}@{1}:{2}/packages.{3}'.format(user, server, path, a)

View File

@ -4,13 +4,16 @@ import shutil
import glob import glob
import subprocess import subprocess
import hashlib import hashlib
import gnupg
import jinja2 import jinja2
import humanize import humanize
import datetime import datetime
from urllib.request import urlopen from urllib.request import urlopen




def genImg(build, bdisk): def genImg(conf):
bdisk = conf['bdisk']
build = conf['build']
arch = build['arch'] arch = build['arch']
chrootdir = build['chrootdir'] chrootdir = build['chrootdir']
archboot = build['archboot'] archboot = build['archboot']
@ -19,6 +22,7 @@ def genImg(build, bdisk):
hashes = {} hashes = {}
hashes['sha256'] = {} hashes['sha256'] = {}
hashes['md5'] = {} hashes['md5'] = {}
squashfses = []
for a in arch: for a in arch:
if a == 'i686': if a == 'i686':
bitness = '32' bitness = '32'
@ -64,6 +68,7 @@ def genImg(build, bdisk):
f.write("{0} airootfs.sfs".format(hashes['sha256'][a].hexdigest())) f.write("{0} airootfs.sfs".format(hashes['sha256'][a].hexdigest()))
with open(airoot + 'airootfs.md5', 'w+') as f: with open(airoot + 'airootfs.md5', 'w+') as f:
f.write("{0} airootfs.sfs".format(hashes['md5'][a].hexdigest())) f.write("{0} airootfs.sfs".format(hashes['md5'][a].hexdigest()))
squashfses.append('{0}'.format(squashimg))
print("{0}: [BUILD] Hash checksums complete.".format(datetime.datetime.now())) print("{0}: [BUILD] Hash checksums complete.".format(datetime.datetime.now()))
# Logo # Logo
os.makedirs(tempdir + '/boot', exist_ok = True) os.makedirs(tempdir + '/boot', exist_ok = True)
@ -79,6 +84,8 @@ def genImg(build, bdisk):
bootfiles['initrd'] = ['initramfs-linux-{0}.img'.format(bdisk['name']), '{0}.{1}.img'.format(bdisk['uxname'], bitness)] bootfiles['initrd'] = ['initramfs-linux-{0}.img'.format(bdisk['name']), '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
for x in ('kernel', 'initrd'): for x in ('kernel', 'initrd'):
shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(tempdir, bootfiles[x][1])) shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(tempdir, bootfiles[x][1]))
for i in squashfses:
signIMG(i, conf)




def genUEFI(build, bdisk): def genUEFI(build, bdisk):
@ -258,9 +265,9 @@ def genISO(conf):
sysl_tmp = tempdir + '/isolinux/' sysl_tmp = tempdir + '/isolinux/'
ver = bdisk['ver'] ver = bdisk['ver']
if len(arch) == 1: if len(arch) == 1:
isofile = '{0}-{1}-{2}.iso'.format(bdisk['uxname'], bdisk['ver'], arch[0]) isofile = '{0}-{1}-{2}-{3}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'], arch[0])
else: else:
isofile = '{0}-{1}.iso'.format(bdisk['uxname'], bdisk['ver']) isofile = '{0}-{1}-{2}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
isopath = build['isodir'] + '/' + isofile isopath = build['isodir'] + '/' + isofile
arch = build['arch'] arch = build['arch']
# In case we're building a single-arch ISO... # In case we're building a single-arch ISO...
@ -384,6 +391,36 @@ def genISO(conf):
iso['Main']['fmt'] = 'Hybrid ISO' iso['Main']['fmt'] = 'Hybrid ISO'
return(iso) return(iso)


def signIMG(file, conf):
if conf['build']['gpg']:
# If we enabled GPG signing, we need to figure out if we
# are using a personal key or the automatically generated one.
if conf['gpg']['mygpghome'] != '':
gpghome = conf['gpg']['mygpghome']
else:
gpghome = conf['build']['dlpath'] + '/.gnupg'
if conf['gpg']['mygpgkey'] != '':
keyid = conf['gpg']['mygpgkey']
else:
keyid = False
gpg = gnupg.GPG(gnupghome = gpghome, use_agent = True)
# And if we didn't specify one manually, we'll pick the first one we find.
# This way we can use the automatically generated one from prep.
if not keyid:
keyid = gpg.list_keys(True)[0]['keyid']
print('{0}: [BUILD] Signing {1} with {2}...'.format(
datetime.datetime.now(),
file,
keyid))
# TODO: remove this warning when upstream python-gnupg fixes
print('\t\t\t If you see a "ValueError: Unknown status message: \'KEY_CONSIDERED\'" error, ' +
'it can be safely ignored.')
print('\t\t\t If this is taking a VERY LONG time, try installing haveged and starting it. ' +
'This can be done safely in parallel with the build process.')
with open(file, 'rb') as fh:
gpg.sign_file(fh, keyid = keyid, detach = True,
clearsign = False, output = '{0}.sig'.format(file))

def displayStats(iso): def displayStats(iso):
for i in iso['name']: for i in iso['name']:
print("{0}: == {1} {2} ==".format(datetime.datetime.now(), iso[i]['type'], iso[i]['fmt'])) print("{0}: == {1} {2} ==".format(datetime.datetime.now(), iso[i]['type'], iso[i]['fmt']))

View File

@ -67,13 +67,27 @@ def parseConfig(confs):
config_dict = {s:dict(config.items(s)) for s in config.sections()} config_dict = {s:dict(config.items(s)) for s in config.sections()}
# Convert the booleans to pythonic booleans in the dict... # Convert the booleans to pythonic booleans in the dict...
config_dict['bdisk']['user'] = config['bdisk'].getboolean('user') config_dict['bdisk']['user'] = config['bdisk'].getboolean('user')
config_dict['build']['gpg'] = config['build'].getboolean('gpg')
config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar') config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar')
config_dict['build']['ipxe'] = config['build'].getboolean('ipxe')
config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower() config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower()
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso')
config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb')
config_dict['sync']['git'] = config['sync'].getboolean('git')
config_dict['sync']['http'] = config['sync'].getboolean('http')
config_dict['sync']['rsync'] = config['sync'].getboolean('rsync')
config_dict['sync']['tftp'] = config['sync'].getboolean('tftp')
config_dict['rsync']['iso'] = config['rsync'].getboolean('iso')
# Get the version... # Get the version...
if config_dict['bdisk']['ver'] == '': if config_dict['bdisk']['ver'] == '':
repo = git.Repo(config_dict['build']['basedir']) repo = git.Repo(config_dict['build']['basedir'])
refs = repo.git.describe(repo.head.commit).split('-') refs = repo.git.describe(repo.head.commit).split('-')
if len(refs) >= 3:
config_dict['bdisk']['ver'] = refs[0] + 'r' + refs[2] config_dict['bdisk']['ver'] = refs[0] + 'r' + refs[2]
elif len(refs) == 2:
config_dict['bdisk']['ver'] = refs[0] + 'r' + refs[1]
else:
config_dict['bdisk']['ver'] = refs[0]
for i in ('http', 'tftp', 'rsync', 'git'): for i in ('http', 'tftp', 'rsync', 'git'):
config_dict['sync'][i] = config['sync'].getboolean(i) config_dict['sync'][i] = config['sync'].getboolean(i)
# And the build number. # And the build number.
@ -88,12 +102,10 @@ def parseConfig(confs):
# But logically we should start the build over at 0 if we don't have any existing ISO's. # But logically we should start the build over at 0 if we don't have any existing ISO's.
if os.path.isdir(config_dict['build']['isodir']): if os.path.isdir(config_dict['build']['isodir']):
if os.listdir(config_dict['build']['isodir']) == []: if os.listdir(config_dict['build']['isodir']) == []:
conf_dict['build']['buildnum'] = 0
# ...or if we don't have any previous builds for this ISO version.
elif not glob.glob('{0}/*v{1}r*.iso'.format(config_dict['build']['isodir'], config_dict['build']['ver'])):
config_dict['build']['buildnum'] = 0 config_dict['build']['buildnum'] = 0
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso') # ...or if we don't have any previous builds for this ISO version.
config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb') elif not glob.glob('{0}/*v{1}r*.iso'.format(config_dict['build']['isodir'], config_dict['bdisk']['ver'])):
config_dict['build']['buildnum'] = 0
# and build a list of arch(es) we want to build # and build a list of arch(es) we want to build
if config_dict['build']['multiarch'] in ('','yes','true','1'): if config_dict['build']['multiarch'] in ('','yes','true','1'):
config_dict['build']['arch'] = ['x86_64','i686'] config_dict['build']['arch'] = ['x86_64','i686']

View File

@ -24,9 +24,11 @@ def buildIPXE(conf):
embedscript = build['dlpath'] + '/EMBED' embedscript = build['dlpath'] + '/EMBED'
ipxe_src = srcdir + '/ipxe' ipxe_src = srcdir + '/ipxe'
img_path = build['isodir'] + '/' img_path = build['isodir'] + '/'
ipxe_usb = '{0}-{1}.usb.img'.format(bdisk['uxname'], bdisk['ver']) ipxe_usb = '{0}-{1}-{2}.usb.img'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
ipxe_mini = '{0}-{1}.mini.iso'.format(bdisk['uxname'], bdisk['ver']) ipxe_mini = '{0}-{1}-{2}.mini.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
ipxe_emini = '{0}-{1}-{2}.mini.eiso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
usb_file = '{0}/{1}'.format(img_path, ipxe_usb) usb_file = '{0}/{1}'.format(img_path, ipxe_usb)
emini_file = '{0}{1}'.format(img_path, ipxe_emini)
mini_file = '{0}{1}'.format(img_path, ipxe_mini) mini_file = '{0}{1}'.format(img_path, ipxe_mini)
ipxe_git_uri = 'git://git.ipxe.org/ipxe.git' ipxe_git_uri = 'git://git.ipxe.org/ipxe.git'
patches_git_uri = 'https://github.com/eworm-de/ipxe.git' patches_git_uri = 'https://github.com/eworm-de/ipxe.git'
@ -43,6 +45,9 @@ def buildIPXE(conf):
env = jinja2.Environment(loader = tpl_loader) env = jinja2.Environment(loader = tpl_loader)
patches = ipxe_repo.create_remote('eworm', patches_git_uri) patches = ipxe_repo.create_remote('eworm', patches_git_uri)
patches.fetch() patches.fetch()
# TODO: per http://ipxe.org/download#uefi, it builds efi *binaries* now.
# we can probably skip the commit patching from eworm and the iso/eiso
# (and even usb) generation, and instead use the same method we use in genISO
eiso_commit = '189652b03032305a2db860e76fb58e81e3420c4d' eiso_commit = '189652b03032305a2db860e76fb58e81e3420c4d'
nopie_commit = '58557055e51b2587ad3843af58075de916e5399b' nopie_commit = '58557055e51b2587ad3843af58075de916e5399b'
# patch files # patch files
@ -101,11 +106,22 @@ def buildIPXE(conf):
#modenv['CERT'] = '{0},{1}'.format(ipxe_ssl_ca, ipxe_ssl_crt) # TODO: test these #modenv['CERT'] = '{0},{1}'.format(ipxe_ssl_ca, ipxe_ssl_crt) # TODO: test these
#modenv['PRIVKEY'] = ipxe_ssl_ckey # TODO: test these #modenv['PRIVKEY'] = ipxe_ssl_ckey # TODO: test these
build_cmd = {} build_cmd = {}
build_cmd['base'] = ['/usr/bin/make',
'all',
'EMBED="{0}"'.format(embedscript)]
# TODO: copy the UNDI stuff/chainloader to tftpboot, if enabled
build_cmd['undi'] = ['/usr/bin/make',
'bin/ipxe.pxe',
'EMBED="{0}"'.format(embedscript)]
build_cmd['efi'] = ['/usr/bin/make', build_cmd['efi'] = ['/usr/bin/make',
'bin-i386-efi/ipxe.efi', 'bin-i386-efi/ipxe.efi',
'bin-x86_64-efi/ipxe.efi'] 'bin-x86_64-efi/ipxe.efi',
'EMBED="{0}"'.format(embedscript)]
# Command to build the actual USB and Mini images # Command to build the actual USB and Mini images
build_cmd['img'] = ['/usr/bin/make'] build_cmd['iso'] = ['/usr/bin/make',
'bin/ipxe.liso',
'bin/ipxe.eiso',
'EMBED="{0}"'.format(embedscript)]
# Now we call the commands. # Now we call the commands.
DEVNULL = open(os.devnull, 'w') DEVNULL = open(os.devnull, 'w')
if os.path.isfile(build['dlpath'] + '/ipxe.log'): if os.path.isfile(build['dlpath'] + '/ipxe.log'):
@ -114,35 +130,40 @@ def buildIPXE(conf):
datetime.datetime.now(), datetime.datetime.now(),
ipxe_src, ipxe_src,
build['dlpath'])) build['dlpath']))
if mini and not usb:
build_cmd['img'].insert(1, 'bin/ipxe.eiso')
elif usb and not mini:
build_cmd['img'].insert(1, 'bin/ipxe.usb')
elif usb and mini:
build_cmd['img'].insert(1, 'bin/ipxe.eiso')
build_cmd['img'].insert(2, 'bin/ipxe.usb')
with open('{0}/ipxe.log'.format(build['dlpath']), 'a') as f: with open('{0}/ipxe.log'.format(build['dlpath']), 'a') as f:
subprocess.call(build_cmd['base'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
subprocess.call(build_cmd['undi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
subprocess.call(build_cmd['efi'], stdout = f, stderr = subprocess.STDOUT, env=modenv) subprocess.call(build_cmd['efi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
subprocess.call(build_cmd['img'], stdout = f, stderr = subprocess.STDOUT, env=modenv) if mini:
subprocess.call(build_cmd['iso'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
print('{0}: [IPXE] Built iPXE image(s) successfully.'.format(datetime.datetime.now())) print('{0}: [IPXE] Built iPXE image(s) successfully.'.format(datetime.datetime.now()))
os.chdir(cwd) os.chdir(cwd)
# move the files to the results dir # move the files to the results dir
# TODO: grab ipxe.pxe here too.
if usb:
os.rename('{0}/src/bin/ipxe.usb'.format(ipxe_src), usb_file) os.rename('{0}/src/bin/ipxe.usb'.format(ipxe_src), usb_file)
os.rename('{0}/src/bin/ipxe.eiso'.format(ipxe_src), mini_file) if mini:
os.rename('{0}/src/bin/ipxe.eiso'.format(ipxe_src), emini_file)
os.rename('{0}/src/bin/ipxe.iso'.format(ipxe_src), mini_file)
# Get size etc. of build results # Get size etc. of build results
iso = {} iso = {}
stream = {} stream = {}
iso['name'] = [] iso['name'] = []
for t in ('USB', 'Mini'): # TODO: do this programmatically based on config for t in ('usb', 'mini'): # TODO: do this programmatically based on config
if t == 'usb':
imgname = 'USB'
elif t == 'mini':
imgname = 'Mini'
iso['name'].append(t) iso['name'].append(t)
iso[t] = {} iso[t] = {}
shasum = False shasum = False
shasum = hashlib.sha256() shasum = hashlib.sha256()
if t == 'USB': if t == 'usb':
isopath = usb_file isopath = usb_file
elif t == 'Mini': elif t == 'mini':
isopath = mini_file isopath = mini_file
stream = False stream = False
if os.path.isfile(isopath):
with open(isopath, 'rb') as f: with open(isopath, 'rb') as f:
while True: while True:
stream = f.read(65536) # 64kb chunks stream = f.read(65536) # 64kb chunks
@ -152,9 +173,9 @@ def buildIPXE(conf):
iso[t]['sha'] = shasum.hexdigest() iso[t]['sha'] = shasum.hexdigest()
iso[t]['file'] = isopath iso[t]['file'] = isopath
iso[t]['size'] = humanize.naturalsize(os.path.getsize(isopath)) iso[t]['size'] = humanize.naturalsize(os.path.getsize(isopath))
iso[t]['type'] = 'iPXE {0}'.format(t) iso[t]['type'] = 'iPXE {0}'.format(imgname)
if t == 'USB': if t == 'usb':
iso[t]['fmt'] = 'Image' iso[t]['fmt'] = 'Image'
elif t == 'Mini': elif t == 'mini':
iso[t]['fmt'] = 'ISO' iso[t]['fmt'] = 'ISO'
return(iso) return(iso)

View File

@ -45,11 +45,11 @@ def downloadTarball(build):
if build['mirrorgpgsig'] != '': if build['mirrorgpgsig'] != '':
# we don't want to futz with the user's normal gpg. # we don't want to futz with the user's normal gpg.
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg') gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
print("\n{0}: [PREP] Generating a GPG key...".format(datetime.datetime.now())) print("{0}: [PREP] Generating a GPG key...".format(datetime.datetime.now()))
# python-gnupg 0.3.9 spits this error in Arch. it's harmless, but ugly af. # python-gnupg 0.3.9 spits this error in Arch. it's harmless, but ugly af.
# TODO: remove this when the error doesn't happen anymore. # TODO: remove this when the error doesn't happen anymore.
print("\t\t\t If you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error,\n\t\t\t it can be safely ignored.") print("\t\t\t If you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error, it can be safely ignored.")
print("\t\t\t If this is taking a VERY LONG time, try installing haveged and starting it.\n\t\t\t This can be " + print("\t\t\t If this is taking a VERY LONG time, try installing haveged and starting it. This can be " +
"done safely in parallel with the build process.\n") "done safely in parallel with the build process.\n")
input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase') input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase')
key = gpg.gen_key(input_data) # this gives the "error" key = gpg.gen_key(input_data) # this gives the "error"
@ -121,7 +121,7 @@ def unpackTarball(tarball_path, build, keep = False):
# Open and extract the tarball # Open and extract the tarball
if not keep: if not keep:
for a in build['arch']: for a in build['arch']:
print("{0}: [PREP] Extracting tarball {1} ({2}). Please wait...".format( print("{0}: [PREP] Extracting tarball {1} ({2})...".format(
datetime.datetime.now(), datetime.datetime.now(),
tarball_path[a], tarball_path[a],
humanize.naturalsize( humanize.naturalsize(

View File

@ -4,6 +4,7 @@
-TFTP, HTTP, RSYNC, git -TFTP, HTTP, RSYNC, git
-UPDATE THE README! -UPDATE THE README!
-sizes of build iso files -sizes of build iso files
-GPG sigs on built files


## General ## ## General ##


@ -17,6 +18,7 @@
-set up automatic exporting to PDF of the user manual server-side. https://pypi.python.org/pypi/unoconv/0.6 -set up automatic exporting to PDF of the user manual server-side. https://pypi.python.org/pypi/unoconv/0.6
-There *has* to be a better way of handling package installation in the chroots. -There *has* to be a better way of handling package installation in the chroots.
-maybe remove lxde, firefox, chrome and replace with enlightenment/midori? -maybe remove lxde, firefox, chrome and replace with enlightenment/midori?
-custom repo? https://brainwreckedtech.wordpress.com/2013/01/27/making-your-own-arch-linux-repository/




## NETWORKING ## ## NETWORKING ##

View File

@ -58,8 +58,9 @@ ver =
dev = r00t^2 dev = r00t^2


; Your email address. ; Your email address.
; This is only used for the commit message if you enable ; This is only used for commit messages (sync:git),
; sync:git. ; or GPG-signing the releases (see the associated build
; section items).
email = bts@square-r00t.net email = bts@square-r00t.net


; What this distribution/project is used for. ; What this distribution/project is used for.
@ -196,6 +197,13 @@ gpgkey = 7F2D434B9741E8AC
; 1.) The default is probably fine. ; 1.) The default is probably fine.
gpgkeyserver = pgp.mit.edu gpgkeyserver = pgp.mit.edu


; Should we sign our release files? (See the GPG section)
; 0.) Only accepts (case-insensitive):
; yes|no
; true|false
; 1|0
gpg = yes

; Where should we save the bootstrap tarballs? ; Where should we save the bootstrap tarballs?
; 0.) No whitespace ; 0.) No whitespace
; 1.) Will be created if it doesn't exist ; 1.) Will be created if it doesn't exist
@ -263,7 +271,7 @@ multiarch = yes
; true|false ; true|false
; 1|0 ; 1|0
; If it is undefined, it is assumed to be no. ; If it is undefined, it is assumed to be no.
ipxe = yes ipxe =


; This option should only be enabled if you are on a fairly ; This option should only be enabled if you are on a fairly
; powerful, multicore system with plenty of RAM. It will ; powerful, multicore system with plenty of RAM. It will
@ -278,6 +286,30 @@ ipxe = yes
i_am_a_racecar = yes i_am_a_racecar = yes




#---------------------------------------------------------#
# This section controls settings for signing our release
# files. This is only used if build:gpg is
# yes/true/etc.
#---------------------------------------------------------#
[gpg]

; What is a valid key ID that we should use to
; *sign* our release files?
; 0.) You will be prompted for a passphrase if your
; key has one/you don't have an open gpg-agent
; session.
; 1.) If you leave this blank we will use the key
; we generate automatically earlier in the build
; process.
; 2.) We will generate one if this is blank and you
; have selected sign as yes.
mygpgkey =

; What directory should we use for the above GPG key?
; Make sure it contains your private key.
mygpghome =


#---------------------------------------------------------# #---------------------------------------------------------#
# This section controls what we should do with the # This section controls what we should do with the
# resulting build and how to handle uploads, if we # resulting build and how to handle uploads, if we
@ -466,11 +498,11 @@ ssl_key = ${ssldir}/main.key
[rsync] [rsync]


; This is the rsync destination host. ; This is the rsync destination host.
host = bdisk.square-r00t.net host =


; This is the remote user we should use when performing the ; This is the remote user we should use when performing the
; rsync push. ; rsync push.
user = root user =


; This is the remote destination path we should use for ; This is the remote destination path we should use for
; pushing via rsync. ; pushing via rsync.
@ -479,4 +511,12 @@ user = root
; 2.) The path MUST be writable by rsync:user ; 2.) The path MUST be writable by rsync:user
; RECOMMENDED: you'll probably want to set http:(user|group) ; RECOMMENDED: you'll probably want to set http:(user|group)
; to what it'll need to be on the destination. ; to what it'll need to be on the destination.
path = /srv/http/${bdisk:uxname}_ipxe path =

; Should we rsync over the ISO files too, or just the boot
; files?
; 0.) Only accepts (case-insensitive):
; yes|no
; true|false
; 1|0
iso = yes

View File

@ -1,7 +1,6 @@
# Server list generated by rankmirrors on 2016-07-09
Server = http://mirror.us.leaseweb.net/archlinux/$repo/os/$arch Server = http://mirror.us.leaseweb.net/archlinux/$repo/os/$arch
Server = http://mirrors.advancedhosters.com/archlinux/$repo/os/$arch
Server = http://ftp.osuosl.org/pub/archlinux/$repo/os/$arch
Server = http://mirrors.rutgers.edu/archlinux/$repo/os/$arch
Server = http://mirror.jmu.edu/pub/archlinux/$repo/os/$arch Server = http://mirror.jmu.edu/pub/archlinux/$repo/os/$arch
Server = http://arch.mirrors.ionfish.org/$repo/os/$arch Server = http://mirror.metrocast.net/archlinux/$repo/os/$arch
Server = http://mirror.vtti.vt.edu/archlinux/$repo/os/$arch
Server = http://arch.mirrors.pair.com/$repo/os/$arch
Server = http://mirrors.advancedhosters.com/archlinux/$repo/os/$arch

View File

@ -15,7 +15,7 @@ BINARIES="/usr/bin/memdiskfind"
# FILES # FILES
# This setting is similar to BINARIES above, however, files are added # This setting is similar to BINARIES above, however, files are added
# as-is and are not parsed in any way. This is useful for config files. # as-is and are not parsed in any way. This is useful for config files.
FILES="" FILES="/usr/bin/pkill"


# HOOKS # HOOKS
# This is the most important setting in this file. The HOOKS control the # This is the most important setting in this file. The HOOKS control the
@ -50,7 +50,9 @@ FILES=""
# usr, fsck and shutdown hooks. # usr, fsck and shutdown hooks.
#HOOKS="base udev autodetect modconf block filesystems keyboard fsck" #HOOKS="base udev autodetect modconf block filesystems keyboard fsck"
#HOOKS="base udev memdisk archiso_shutdown archiso modconf net ssh archiso_loop_mnt archiso_pxe_common archiso_pxe_nbd archiso_pxe_http archiso_pxe_nfs archiso_kms block pcmcia filesystems keyboard livecd" #HOOKS="base udev memdisk archiso_shutdown archiso modconf net ssh archiso_loop_mnt archiso_pxe_common archiso_pxe_nbd archiso_pxe_http archiso_pxe_nfs archiso_kms block pcmcia filesystems keyboard livecd"
HOOKS="base udev memdisk archiso_shutdown archiso-custom modconf net ssh archiso_loop_mnt archiso_pxe_common archiso_pxe_nbd archiso_http_custom archiso_pxe_nfs archiso_kms block pcmcia filesystems keyboard livecd" #HOOKS="base udev memdisk archiso_shutdown archiso-custom modconf net ssh archiso_loop_mnt archiso_pxe_common archiso_pxe_nbd archiso_http_custom archiso_pxe_nfs archiso_kms block pcmcia filesystems keyboard livecd"
HOOKS="base udev memdisk archiso_shutdown archiso modconf net ssh archiso_loop_mnt archiso_pxe_common archiso_pxe_nbd archiso_pxe_http archiso_pxe_nfs archiso_kms block pcmcia filesystems keyboard livecd"
#HOOKS="base memdisk systemd archiso_shutdown archiso modconf ssh archiso_loop_mnt archiso_pxe_common archiso_pxe_nbd archiso_pxe_http archiso_pxe_nfs archiso_kms block pcmcia filesystems keyboard livecd"


# COMPRESSION # COMPRESSION
# Use this to compress the initramfs image. By default, gzip compression # Use this to compress the initramfs image. By default, gzip compression

View File

@ -1,6 +1,6 @@
Server = http://mirrors.advancedhosters.com/archlinux/$repo/os/$arch Server = http://mirror.us.leaseweb.net/archlinux/$repo/os/$arch
Server = http://ftp.osuosl.org/pub/archlinux/$repo/os/$arch
Server = http://arch.mirrors.ionfish.org/$repo/os/$arch
Server = http://mirror.vtti.vt.edu/archlinux/$repo/os/$arch
Server = http://mirror.metrocast.net/archlinux/$repo/os/$arch
Server = http://mirror.jmu.edu/pub/archlinux/$repo/os/$arch Server = http://mirror.jmu.edu/pub/archlinux/$repo/os/$arch
Server = http://mirror.metrocast.net/archlinux/$repo/os/$arch
Server = http://mirror.vtti.vt.edu/archlinux/$repo/os/$arch
Server = http://arch.mirrors.pair.com/$repo/os/$arch
Server = http://mirrors.advancedhosters.com/archlinux/$repo/os/$arch

View File

@ -2,8 +2,8 @@


for i in pacman apacman; for i in pacman apacman;
do do
if [ -f /usr/local/${i}.db.tar.xz ]; if [ -f /usr/local/${i}/${i}.db.tar.xz ];
then then
/usr/bin/tar -Jxf /usr/local/${i}.db.tar.xz -C /var/lib/${i}/ /usr/bin/tar -Jxf /usr/local/${i}/${i}.db.tar.xz -C /var/lib/${i}/
fi fi
done done

View File

@ -14,6 +14,7 @@
#CacheDir = /var/cache/pacman/pkg/ #CacheDir = /var/cache/pacman/pkg/
#LogFile = /var/log/pacman.log #LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/ #GPGDir = /etc/pacman.d/gnupg/
#HookDir = /etc/pacman.d/hooks/
HoldPkg = pacman glibc HoldPkg = pacman glibc
#XferCommand = /usr/bin/curl -C - -f %u > %o #XferCommand = /usr/bin/curl -C - -f %u > %o
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
@ -34,11 +35,11 @@ Color
TotalDownload TotalDownload
CheckSpace CheckSpace
VerbosePkgLists VerbosePkgLists
#ILoveCandy


# By default, pacman accepts packages signed by keys that its local keyring # By default, pacman accepts packages signed by keys that its local keyring
# trusts (see pacman-key and its man page), as well as unsigned packages. # trusts (see pacman-key and its man page), as well as unsigned packages.
#SigLevel = Required DatabaseOptional #RE-ENABLE ME WHEN A NEW SNAPSHOT IS RELEASED WITH FIXED GPG SigLevel = Required DatabaseOptional
SigLevel = Never
LocalFileSigLevel = Optional LocalFileSigLevel = Optional
#RemoteFileSigLevel = Required #RemoteFileSigLevel = Required


@ -89,7 +90,3 @@ Include = /etc/pacman.d/mirrorlist
#[custom] #[custom]
#SigLevel = Optional TrustAll #SigLevel = Optional TrustAll
#Server = file:///home/custompkgs #Server = file:///home/custompkgs

#[archlinuxfr]
#SigLevel = Never
#Server = http://repo.archlinux.fr/$arch

View File

@ -45,6 +45,9 @@ pacman -Syy
cleanPacorigs cleanPacorigs
# Install some prereqs # Install some prereqs
pacman -S --noconfirm --needed sed pacman -S --noconfirm --needed sed
sed -i.bak -e 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf
pacman -S --noconfirm --needed filesystem
mv /etc/pacman.conf.bak /etc/pacman.conf
pacman -S --noconfirm --needed base syslinux wget rsync unzip jshon sudo abs xmlto bc docbook-xsl git pacman -S --noconfirm --needed base syslinux wget rsync unzip jshon sudo abs xmlto bc docbook-xsl git
locale-gen locale-gen
# And get rid of files it wants to replace # And get rid of files it wants to replace

View File

@ -1,179 +0,0 @@
# args: source, newroot, mountpoint
_mnt_fs() {
local img="${1}"
local newroot="${2}"
local mnt="${3}"
local img_fullname="${img##*/}";
local img_name="${img_fullname%%.*}"
local dm_snap_name="${dm_snap_prefix}_${img_name}"
local ro_dev ro_dev_size rw_dev

ro_dev=$(losetup --find --show --read-only "${img}")
echo ${ro_dev} >> /run/archiso/used_block_devices
ro_dev_size=$(blockdev --getsz ${ro_dev})

if [[ "${cow_persistent}" == "P" ]]; then
if [[ -f "/run/archiso/cowspace/${cow_directory}/${img_name}.cow" ]]; then
msg ":: Found '/run/archiso/cowspace/${cow_directory}/${img_name}.cow', using as persistent."
else
msg ":: Creating '/run/archiso/cowspace/${cow_directory}/${img_name}.cow' as persistent."
truncate -s "${cowfile_size}" "/run/archiso/cowspace/${cow_directory}/${img_name}.cow"
fi
else
if [[ -f "/run/archiso/cowspace/${cow_directory}/${img_name}.cow" ]]; then
msg ":: Found '/run/archiso/cowspace/${cow_directory}/${img_name}.cow' but non-persistent requested, removing."
rm -f "/run/archiso/cowspace/${cow_directory}/${img_name}.cow"
fi
msg ":: Creating '/run/archiso/cowspace/${cow_directory}/${img_name}.cow' as non-persistent."
truncate -s "${cowfile_size}" "/run/archiso/cowspace/${cow_directory}/${img_name}.cow"
fi

rw_dev=$(losetup --find --show "/run/archiso/cowspace/${cow_directory}/${img_name}.cow")
echo ${rw_dev} >> /run/archiso/used_block_devices

dmsetup create ${dm_snap_name} --table "0 ${ro_dev_size} snapshot ${ro_dev} ${rw_dev} ${cow_persistent} 8"

_mnt_dev "/dev/mapper/${dm_snap_name}" "${newroot}${mnt}" "-w"
echo $(readlink -f /dev/mapper/${dm_snap_name}) >> /run/archiso/used_block_devices
}

# args: /path/to/image_file, mountpoint
_mnt_sfs() {
local img="${1}"
local mnt="${2}"
local img_fullname="${img##*/}"
local sfs_dev

if [[ "${copytoram}" == "y" ]]; then
msg -n ":: Copying squashfs image to RAM..."
#if ! cp "${img}" "/run/archiso/copytoram/${img_fullname}" ; then
if ! pv -pterabT "${img}" > "/run/archiso/copytoram/${img_fullname}" ; then
echo "ERROR: while copy '${img}' to '/run/archiso/copytoram/${img_fullname}'"
launch_interactive_shell
fi
img="/run/archiso/copytoram/${img_fullname}"
msg "done."
fi
sfs_dev=$(losetup --find --show --read-only "${img}")
echo ${sfs_dev} >> /run/archiso/used_block_devices
_mnt_dev "${sfs_dev}" "${mnt}" "-r"
}

# args: device, mountpoint, flags
_mnt_dev() {
local dev="${1}"
local mnt="${2}"
local flg="${3}"

mkdir -p "${mnt}"

msg ":: Mounting '${dev}' to '${mnt}'"

while ! poll_device "${dev}" 30; do
echo "ERROR: '${dev}' device did not show up after 30 seconds..."
echo " Falling back to interactive prompt"
echo " You can try to fix the problem manually, log out when you are finished"
launch_interactive_shell
done

if mount "${flg}" "${dev}" "${mnt}"; then
msg ":: Device '${dev}' mounted successfully."
else
echo "ERROR; Failed to mount '${dev}'"
echo " Falling back to interactive prompt"
echo " You can try to fix the problem manually, log out when you are finished"
launch_interactive_shell
fi
}

_verify_checksum() {
local _status
cd "/run/archiso/bootmnt/${archisobasedir}/${arch}"
md5sum -c airootfs.md5 > /tmp/checksum.log 2>&1
_status=$?
cd "${OLDPWD}"
return ${_status}
}

run_hook() {
[[ -z "${arch}" ]] && arch="$(uname -m)"
[[ -z "${cowspace_size}" ]] && cowspace_size="75%"
[[ -z "${copytoram_size}" ]] && copytoram_size="75%"
[[ -z "${archisobasedir}" ]] && archisobasedir="arch"
[[ -z "${dm_snap_prefix}" ]] && dm_snap_prefix="arch"
[[ -z "${archisodevice}" ]] && archisodevice="/dev/disk/by-label/${archisolabel}"
if [[ -z "${cowfile_size}" ]]; then
cowfile_size="256M"
fi

if [[ -n "${cow_label}" ]]; then
cow_device="/dev/disk/by-label/${cow_label}"
[[ -z "${cow_persistent}" ]] && cow_persistent="P"
elif [[ -n "${cow_device}" ]]; then
[[ -z "${cow_persistent}" ]] && cow_persistent="P"
else
cow_persistent="N"
fi

[[ -z "${cow_directory}" ]] && cow_directory="persistent_${archisolabel}/${arch}"

# set mount handler for archiso
mount_handler="archiso_mount_handler"
}

# This function is called normally from init script, but it can be called
# as chain from other mount handlers.
# args: /path/to/newroot
archiso_mount_handler() {
local newroot="${1}"

if ! mountpoint -q "/run/archiso/bootmnt"; then
_mnt_dev "${archisodevice}" "/run/archiso/bootmnt" "-r"
if [[ "${copytoram}" != "y" ]]; then
echo $(readlink -f ${archisodevice}) >> /run/archiso/used_block_devices
fi
fi

if [[ "${checksum}" == "y" ]]; then
if [[ -f "/run/archiso/bootmnt/${archisobasedir}/${arch}/airootfs.md5" ]]; then
msg -n ":: Self-test requested, please wait..."
if _verify_checksum; then
msg "done. Checksum is OK, continue booting."
else
echo "ERROR: one or more files are corrupted"
echo "see /tmp/checksum.log for details"
launch_interactive_shell
fi
else
echo "ERROR: checksum=y option specified but ${archisobasedir}/${arch}/airootfs.md5 not found"
launch_interactive_shell
fi
fi

if [[ "${copytoram}" == "y" ]]; then
msg ":: Mounting /run/archiso/copytoram (tmpfs) filesystem, size=${copytoram_size}"
mkdir -p /run/archiso/copytoram
mount -t tmpfs -o "size=${copytoram_size}",mode=0755 copytoram /run/archiso/copytoram
fi

if [[ -n "${cow_device}" ]]; then
_mnt_dev "${cow_device}" "/run/archiso/cowspace" "-r"
echo $(readlink -f ${cow_device}) >> /run/archiso/used_block_devices
mount -o remount,rw "/run/archiso/cowspace"
else
msg ":: Mounting /run/archiso/cowspace (tmpfs) filesystem, size=${cowspace_size}..."
mkdir -p /run/archiso/cowspace
mount -t tmpfs -o "size=${cowspace_size}",mode=0755 cowspace /run/archiso/cowspace
fi
mkdir -p "/run/archiso/cowspace/${cow_directory}"

_mnt_sfs "/run/archiso/bootmnt/${archisobasedir}/${arch}/airootfs.sfs" "/run/archiso/sfs/airootfs"
#_mnt_fs "/run/archiso/sfs/airootfs/airootfs.img" "${newroot}" "/"
mount --bind "/run/archiso/sfs/airootfs" "/new_root"

if [[ "${copytoram}" == "y" ]]; then
umount /run/archiso/bootmnt
fi
}

# vim:ft=sh:ts=4:sw=4:et:

View File

@ -1,49 +0,0 @@
# vim: set ft=sh:

run_hook() {
if [[ -n "${ip}" && -n "${archiso_http_srv}" ]]; then

archiso_http_srv=$(eval echo ${archiso_http_srv})
[[ -z "${archiso_http_spc}" ]] && archiso_http_spc="75%"

mount_handler="archiso_pxe_http_mount_handler"
fi
}

# Fetch a file with CURL
#
# $1 URL
# $2 Destination directory inside httpspace/${archisobasedir}
_curl_get() {
local _url="${1}"
local _dst="${2}"

msg ":: Downloading image"
if ! curl -L -f -o "/run/archiso/httpspace/${archisobasedir}${_dst}/${_url##*/}" --create-dirs "${_url}"; then
echo "ERROR: Downloading failed."
#echo " Falling back to interactive prompt"
#echo " You can try to fix the problem manually, log out when you are finished"
#launch_interactive_shell
sleep 4
reboot -f
fi
}

archiso_pxe_http_mount_handler () {
newroot="${1}"

msg ":: Mounting /run/archiso/httpspace (tmpfs) filesystem, size='${archiso_http_spc}'"
mkdir -p "/run/archiso/httpspace"
mount -t tmpfs -o size="${archiso_http_spc}",mode=0755 httpspace "/run/archiso/httpspace"

_curl_get "${archiso_http_srv}${archisobasedir}/${arch}/airootfs.sfs" "/${arch}"

if [[ "${checksum}" == "y" ]]; then
_curl_get "${archiso_http_srv}${archisobasedir}/${arch}/airootfs.md5" "/${arch}"
fi

mkdir -p "/run/archiso/bootmnt"
mount -o bind /run/archiso/httpspace /run/archiso/bootmnt

archiso_mount_handler ${newroot}
}

View File

@ -1,18 +1,7 @@
# vim: set ft=sh: # vim: set ft=sh:


run_cleanuphook () { run_cleanuphook () {
msg ":: Mounting OverlayFS on / with tmpfs=rw, ${root}=ro ..." msg ":: Adding SSL and SSH support..."
modprobe overlay


echo "Now attempting overlay mount..."

#mkdir /new_root.hw
mkdir -p /run/archiso/cowspace/upperdir /run/archiso/cowspace/workdir
#mount --move /new_root /new_root.hw
#mkdir /dev/shm
#mount -t tmpfs none /dev/shm
#mount -t overlay overlay -olowerdir=/new_root.hw,upperdir=/new_root,workdir=/dev/shm -o noatime
umount -l /new_root > /dev/null 2>&1
mount -t overlay -o lowerdir=/run/archiso/sfs/airootfs,upperdir=/run/archiso/cowspace/upperdir,workdir=/run/archiso/cowspace/workdir airootfs /new_root
pkill -9 dropbear #kill SSH (in preparation for the live system starting ssh on port 22 pkill -9 dropbear #kill SSH (in preparation for the live system starting ssh on port 22
} }

View File

@ -1,24 +0,0 @@
#!/bin/bash

build() {
add_module "cdrom"
add_module "loop"
add_module "dm-snapshot"

add_runscript

add_binary /usr/lib/udev/cdrom_id
add_binary blockdev
add_binary dmsetup
add_binary losetup
add_binary mountpoint
add_binary truncate
add_binary pv

add_file /usr/lib/udev/rules.d/60-cdrom_id.rules
add_file /usr/lib/udev/rules.d/10-dm.rules
add_file /usr/lib/udev/rules.d/95-dm-notify.rules
add_file /usr/lib/initcpio/udev/11-dm-initramfs.rules /usr/lib/udev/rules.d/11-dm-initramfs.rules
}

# vim: set ft=sh ts=4 sw=4 et:

View File

@ -1,18 +0,0 @@
#!/bin/bash

build() {
add_runscript

add_binary curl

add_full_dir /etc/ssl
add_full_dir /etc/ca-certificates
}

help() {
cat<<HELPEOF
This hook loads the necessary modules for boot via PXE and HTTP.
HELPEOF
}

# vim: set ft=sh ts=4 sw=4 et:

View File

@ -6,8 +6,11 @@ build()
add_module 'loop' add_module 'loop'
add_module 'overlay' add_module 'overlay'


add_binary "sed" add_binary "/usr/bin/sed"
add_binary "pkill" add_binary "/usr/bin/pkill"
add_binary "/usr/bin/curl"
add_full_dir /etc/ssl
add_full_dir /etc/ca-certificates


add_runscript add_runscript


@ -16,6 +19,6 @@ build()
help() help()
{ {
cat <<HELPEOF cat <<HELPEOF
Mount a squashed flat-file directory with OverlayFS on / Mount a squashed flat-file directory with OverlayFS on /, add SSL support
HELPEOF HELPEOF
} }

View File

@ -68,9 +68,10 @@ build ()
[ -e "${TMPDIR}/passwd" ] && ( grep -q -e '^root:' "${TMPDIR}/passwd" ) || make_etc_passwd [ -e "${TMPDIR}/passwd" ] && ( grep -q -e '^root:' "${TMPDIR}/passwd" ) || make_etc_passwd


add_checked_modules "/drivers/net/" add_checked_modules "/drivers/net/"
add_binary "rm" add_binary "/usr/bin/rm"
add_binary "dropbear" add_binary "/usr/bin/dropbear"
add_binary "killall" add_binary "/usr/bin/killall"
add_binary "/usr/bin/pkill"


echo '/bin/ash' > "${TMPDIR}"/shells echo '/bin/ash' > "${TMPDIR}"/shells
add_file "${TMPDIR}/shells" "/etc/shells" add_file "${TMPDIR}/shells" "/etc/shells"

View File

@ -14,6 +14,7 @@
#CacheDir = /var/cache/pacman/pkg/ #CacheDir = /var/cache/pacman/pkg/
#LogFile = /var/log/pacman.log #LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/ #GPGDir = /etc/pacman.d/gnupg/
#HookDir = /etc/pacman.d/hooks/
HoldPkg = pacman glibc HoldPkg = pacman glibc
#XferCommand = /usr/bin/curl -C - -f %u > %o #XferCommand = /usr/bin/curl -C - -f %u > %o
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
@ -34,11 +35,11 @@ Color
TotalDownload TotalDownload
CheckSpace CheckSpace
VerbosePkgLists VerbosePkgLists
ILoveCandy


# By default, pacman accepts packages signed by keys that its local keyring # By default, pacman accepts packages signed by keys that its local keyring
# trusts (see pacman-key and its man page), as well as unsigned packages. # trusts (see pacman-key and its man page), as well as unsigned packages.
#SigLevel = Required DatabaseOptional #RE-ENABLE ME WHEN A NEW SNAPSHOT IS RELEASED WITH FIXED GPG SigLevel = Required DatabaseOptional
SigLevel = Never
LocalFileSigLevel = Optional LocalFileSigLevel = Optional
#RemoteFileSigLevel = Required #RemoteFileSigLevel = Required


@ -98,7 +99,3 @@ Include = /etc/pacman.d/mirrorlist
#[custom] #[custom]
#SigLevel = Optional TrustAll #SigLevel = Optional TrustAll
#Server = file:///home/custompkgs #Server = file:///home/custompkgs

#[archlinuxfr]
#SigLevel = Never
#Server = http://repo.archlinux.fr/$arch

View File

@ -56,7 +56,7 @@ then
echo -n "${DEFROUTEIF} (${HWADDR}) is: ${IPADDR}" echo -n "${DEFROUTEIF} (${HWADDR}) is: ${IPADDR}"
fi fi
echo echo
echo -n "tun0 is:" echo -n "tun0 is: "
ifconfig tun0 | grep inet | grep -v "inet6" | awk '{print $2}' ifconfig tun0 | grep inet | grep -v "inet6" | awk '{print $2}'
echo echo
echo "http://bdisk.square-r00t.net/" echo "http://bdisk.square-r00t.net/"