minor tweaks, testing syncing... if it passes, it gets a 3.10 tag
This commit is contained in:
parent
ad539b4254
commit
60ee17bf9d
@ -16,7 +16,7 @@ def verifyCert(cert, key, CA = None):
|
|||||||
return(False)
|
return(False)
|
||||||
exit(("{0}: {1} does not match {2}!".format(datetime.datetime.now(), key, cert)))
|
exit(("{0}: {1} does not match {2}!".format(datetime.datetime.now(), key, cert)))
|
||||||
else:
|
else:
|
||||||
print("{0}: {1} verified against {2} successfully.".format(datetime.datetime.now(), key, cert))
|
print("{0}: [SSL] Verified {1} against {2} successfully.".format(datetime.datetime.now(), key, cert))
|
||||||
return(True)
|
return(True)
|
||||||
# This is disabled because there doesn't seem to currently be any way
|
# This is disabled because there doesn't seem to currently be any way
|
||||||
# to actually verify certificates against a given CA.
|
# to actually verify certificates against a given CA.
|
||||||
@ -39,7 +39,7 @@ def sslCAKey(conf):
|
|||||||
keyfile))
|
keyfile))
|
||||||
else:
|
else:
|
||||||
key = OpenSSL.crypto.PKey()
|
key = OpenSSL.crypto.PKey()
|
||||||
print("{0}: Generating SSL CA key...".format(datetime.datetime.now()))
|
print("{0}: [SSL] Generating SSL CA key...".format(datetime.datetime.now()))
|
||||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
||||||
with open(keyfile, 'wb') as f:
|
with open(keyfile, 'wb') as f:
|
||||||
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
||||||
@ -117,7 +117,7 @@ def sslCKey(conf):
|
|||||||
keyfile))
|
keyfile))
|
||||||
else:
|
else:
|
||||||
key = OpenSSL.crypto.PKey()
|
key = OpenSSL.crypto.PKey()
|
||||||
print("{0}: Generating SSL Client key...".format(datetime.datetime.now()))
|
print("{0}: [SSL] Generating SSL Client key...".format(datetime.datetime.now()))
|
||||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
||||||
with open(keyfile, 'wb') as f:
|
with open(keyfile, 'wb') as f:
|
||||||
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
||||||
@ -177,6 +177,8 @@ def sslSign(conf, ca, key, csr):
|
|||||||
|
|
||||||
def sslPKI(conf):
|
def sslPKI(conf):
|
||||||
# run checks for everything, gen what's missing
|
# run checks for everything, gen what's missing
|
||||||
|
ssldir = conf['ipxe']['ssldir']
|
||||||
|
os.makedirs(ssldir, exist_ok = True)
|
||||||
certfile = conf['ipxe']['ssl_crt']
|
certfile = conf['ipxe']['ssl_crt']
|
||||||
key = sslCAKey(conf)
|
key = sslCAKey(conf)
|
||||||
ca = sslCA(conf, key = key)
|
ca = sslCA(conf, key = key)
|
||||||
|
@ -95,8 +95,10 @@ def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
|
|||||||
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
|
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
|
||||||
if cmounts[m]:
|
if cmounts[m]:
|
||||||
subprocess.call(cmounts[m])
|
subprocess.call(cmounts[m])
|
||||||
print("{0}: Performing '{1}' in chroot for {2}...".format(datetime.datetime.now(), cmd, chrootdir))
|
print("{0}: [CHROOT] Running '{1}' ({2}). PROGRESS: tail -f {2}/var/log/chroot_install.log ...".format(
|
||||||
print("\t\t\t You can view the progress via:\n\t\t\t tail -f {0}/var/log/chroot_install.log".format(chrootdir))
|
datetime.datetime.now(),
|
||||||
|
cmd,
|
||||||
|
chrootdir))
|
||||||
real_root = os.open("/", os.O_RDONLY)
|
real_root = os.open("/", os.O_RDONLY)
|
||||||
os.chroot(chrootdir)
|
os.chroot(chrootdir)
|
||||||
os.system('/root/pre-build.sh')
|
os.system('/root/pre-build.sh')
|
||||||
@ -119,7 +121,7 @@ def chrootTrim(build):
|
|||||||
tarball = '{0}/root.{1}/usr/local/{2}/{2}.db.tar.xz'.format(chrootdir, a, i)
|
tarball = '{0}/root.{1}/usr/local/{2}/{2}.db.tar.xz'.format(chrootdir, a, i)
|
||||||
dbdir = '{0}/root.{1}/var/lib/{2}/local'.format(chrootdir, a, i)
|
dbdir = '{0}/root.{1}/var/lib/{2}/local'.format(chrootdir, a, i)
|
||||||
if os.path.isdir(dbdir):
|
if os.path.isdir(dbdir):
|
||||||
print("{0}: Now compressing the {1} cache ({2}). Please wait...".format(
|
print("{0}: [CHROOT] Compressing {1}'s cache ({2})...".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
chrootdir,
|
chrootdir,
|
||||||
a))
|
a))
|
||||||
@ -128,7 +130,7 @@ def chrootTrim(build):
|
|||||||
with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this complains, use x:xz instead
|
with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this complains, use x:xz instead
|
||||||
tar.add(dbdir, arcname = os.path.basename(dbdir))
|
tar.add(dbdir, arcname = os.path.basename(dbdir))
|
||||||
shutil.rmtree(dbdir, ignore_errors = True)
|
shutil.rmtree(dbdir, ignore_errors = True)
|
||||||
print("{0}: Done creating {1} ({2}).\n\t\t\t {3} cleared.".format(
|
print("{0}: [CHROOT] Created {1} ({2}). {3} cleared.".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
tarball,
|
tarball,
|
||||||
humanize.naturalsize(
|
humanize.naturalsize(
|
||||||
|
@ -20,7 +20,7 @@ def http(conf):
|
|||||||
archboot = build['archboot']
|
archboot = build['archboot']
|
||||||
# remove the destination if it exists
|
# remove the destination if it exists
|
||||||
if os.path.isdir(httpdir):
|
if os.path.isdir(httpdir):
|
||||||
print('{0}: Removing {1} in preparation of syncing. Please wait...'.format(
|
print('{0}: [HTTP] Removing {1}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
httpdir))
|
httpdir))
|
||||||
shutil.rmtree(httpdir)
|
shutil.rmtree(httpdir)
|
||||||
@ -28,7 +28,7 @@ def http(conf):
|
|||||||
os.makedirs(httpdir)
|
os.makedirs(httpdir)
|
||||||
# here we build a dict of files to copy and their destination paths.
|
# here we build a dict of files to copy and their destination paths.
|
||||||
httpfiles = {}
|
httpfiles = {}
|
||||||
print('{0}: Now syncing files to {1}. Please wait...'.format(
|
print('{0}: [HTTP] (Boot files) => {1}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
httpdir))
|
httpdir))
|
||||||
for a in arch:
|
for a in arch:
|
||||||
@ -36,8 +36,8 @@ def http(conf):
|
|||||||
httpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
|
httpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
|
||||||
httpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
|
httpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
|
||||||
if 'x86_64' in arch:
|
if 'x86_64' in arch:
|
||||||
httpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.kern'.format(bdisk['uxname'])
|
httpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.64.kern'.format(bdisk['uxname'])
|
||||||
httpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.img'.format(bdisk['uxname'])
|
httpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
|
||||||
if 'i686' in arch:
|
if 'i686' in arch:
|
||||||
httpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
|
httpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
|
||||||
httpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
|
httpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
|
||||||
@ -68,15 +68,15 @@ def tftp(conf):
|
|||||||
tftpdir = tftp['path']
|
tftpdir = tftp['path']
|
||||||
# remove the destination if it exists
|
# remove the destination if it exists
|
||||||
if os.path.isdir(tftpdir):
|
if os.path.isdir(tftpdir):
|
||||||
print('{0}: Removing {1} in preparation of syncing. Please wait...'.format(
|
print('{0}: [TFTP] Removing {1}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
tftpdir))
|
tftpdir))
|
||||||
shutil.rmtree(tftpdir)
|
shutil.rmtree(tftpdir)
|
||||||
# and we make it again
|
# and we make it again
|
||||||
os.makedirs(httpdir)
|
os.makedirs(tftpdir)
|
||||||
# and make a dict of the files etc.
|
# and make a dict of the files etc.
|
||||||
tftpfiles = {}
|
tftpfiles = {}
|
||||||
print('{0}: Now syncing files to {1}. Please wait...'.format(
|
print('{0}: [TFTP] (Boot files) => {1}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
tftpdir))
|
tftpdir))
|
||||||
for a in arch:
|
for a in arch:
|
||||||
@ -108,17 +108,22 @@ def git(conf):
|
|||||||
git_name = conf['bdisk']['dev']
|
git_name = conf['bdisk']['dev']
|
||||||
git_email = conf['bdisk']['email']
|
git_email = conf['bdisk']['email']
|
||||||
if conf['sync']['git']:
|
if conf['sync']['git']:
|
||||||
print('{0}: Creating a commit. Please wait...'.format(datetime.datetime.now()))
|
print('{0}: [GIT] Creating commit...'.format(datetime.datetime.now()))
|
||||||
repo = git.Repo(build['basedir'])
|
repo = git.Repo(build['basedir'])
|
||||||
repo.git.add('--all')
|
repo.git.add('--all')
|
||||||
repo.index.commit("automated commit from BDisk (git:sync)")
|
repo.index.commit("automated commit from BDisk (git:sync)")
|
||||||
print('{0}: Pushing to the remote. Please wait...'.format(datetime.datetime.now()))
|
print('{0}: [GIT] Pushing to remote...'.format(datetime.datetime.now()))
|
||||||
repo.remotes.origin.push()
|
repo.remotes.origin.push()
|
||||||
|
|
||||||
|
|
||||||
def rsync(conf):
|
def rsync(conf):
|
||||||
|
# TODO: just copy tftpbooting pxelinux.cfg (to be generated) if tftp,
|
||||||
|
# and do nothing if http- copying over three copies of the squashed filesystems
|
||||||
|
# is a waste of time, bandwidth, and disk space on target.
|
||||||
build = conf['build']
|
build = conf['build']
|
||||||
tempdir = build['tempdir']
|
tempdir = build['tempdir']
|
||||||
|
isodir = build['isodir']
|
||||||
|
arch = build['arch']
|
||||||
rsync = conf['rsync']
|
rsync = conf['rsync']
|
||||||
sync = conf['sync']
|
sync = conf['sync']
|
||||||
server = rsync['host']
|
server = rsync['host']
|
||||||
@ -130,54 +135,58 @@ def rsync(conf):
|
|||||||
cmd = ['/usr/bin/rsync',
|
cmd = ['/usr/bin/rsync',
|
||||||
'-a',
|
'-a',
|
||||||
'-q',
|
'-q',
|
||||||
|
'-z',
|
||||||
locpath,
|
locpath,
|
||||||
'{0}@{1}:{2}/.'.format(user, server, path)]
|
'{0}@{1}:{2}/.'.format(user, server, path)]
|
||||||
if sync['http']:
|
#if sync['http']:
|
||||||
cmd[3] = conf['http']['path']
|
# cmd[4] = conf['http']['path']
|
||||||
print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
||||||
datetime.datetime.now(),
|
# datetime.datetime.now(),
|
||||||
cmd[4],
|
# cmd[4],
|
||||||
server))
|
# server))
|
||||||
subprocess.call(cmd)
|
# subprocess.call(cmd)
|
||||||
if sync['tftp']:
|
#if sync['tftp']:
|
||||||
cmd[3] = conf['tftp']['path']
|
# cmd[4] = conf['tftp']['path']
|
||||||
print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
||||||
datetime.datetime.now(),
|
# datetime.datetime.now(),
|
||||||
cmd[4],
|
# cmd[4],
|
||||||
server))
|
# server))
|
||||||
subprocess.call(cmd)
|
# subprocess.call(cmd)
|
||||||
if conf['ipxe']:
|
if conf['ipxe']:
|
||||||
cmd[3] = build['archboot']
|
cmd[4] = build['archboot']
|
||||||
print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
print('{0}: [RSYNC] {1} => {2}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
cmd[4],
|
cmd[4],
|
||||||
server))
|
server))
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd)
|
||||||
cmd[3] = isodir
|
cmd[4] = '{0}/boot'.format(build['tempdir'])
|
||||||
print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
subprocess.call(cmd)
|
||||||
|
cmd[4] = isodir
|
||||||
|
print('{0}: [RSYNC] {1} => {2}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
cmd[4],
|
cmd[4],
|
||||||
server))
|
server))
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd)
|
||||||
# Now we copy some extra files.
|
# Now we copy some extra files.
|
||||||
prebuild_dir = '{0}/extra/pre-build.d/'
|
prebuild_dir = '{0}/extra/pre-build.d'.format(build['basedir'])
|
||||||
rsync_files = ['{0}/VERSION_INFO.txt'.format(tempdir),
|
rsync_files = ['{0}/VERSION_INFO.txt'.format(tempdir),
|
||||||
'{0}/root/packages.both'.format(prebuild_dir),
|
'{0}/root/packages.both'.format(prebuild_dir),
|
||||||
'{0}/root/iso.pkgs.both'.format(prebuild_dir)]
|
'{0}/root/iso.pkgs.both'.format(prebuild_dir)]
|
||||||
for x in rsync_files:
|
for x in rsync_files:
|
||||||
cmd[3] = x
|
cmd[4] = x
|
||||||
print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
print('{0}: [RSYNC] {1} => {2}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
cmd[4],
|
cmd[4],
|
||||||
server))
|
server))
|
||||||
|
subprocess.call(cmd)
|
||||||
# And we grab the remaining, since we need to rename them.
|
# And we grab the remaining, since we need to rename them.
|
||||||
print('{0}: Syncing some extra files to {1}. Please wait...'.format(
|
print('{0}: [RSYNC] (Informational files) => {1}...'.format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
server))
|
server))
|
||||||
for a in arch:
|
for a in arch:
|
||||||
cmd[3] = '{0}/{1}/root/packages.arch'.format(prebuild_dir, a)
|
cmd[4] = '{0}/{1}/root/packages.arch'.format(prebuild_dir, a)
|
||||||
cmd[4] = '{0}@{1}:{2}/packages.{3}'.format(user, server, path, a)
|
cmd[5] = '{0}@{1}:{2}/packages.{3}'.format(user, server, path, a)
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd)
|
||||||
cmd[3] = '{0}/{1}/root/iso.pkgs.arch'.format(prebuild_dir, a)
|
cmd[4] = '{0}/{1}/root/iso.pkgs.arch'.format(prebuild_dir, a)
|
||||||
cmd[4] = '{0}@{1}:{2}/iso.pkgs.{3}'.format(user, server, path, a)
|
cmd[5] = '{0}@{1}:{2}/iso.pkgs.{3}'.format(user, server, path, a)
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd)
|
||||||
|
@ -28,7 +28,7 @@ def genImg(build, bdisk):
|
|||||||
airoot = archboot + '/' + a + '/'
|
airoot = archboot + '/' + a + '/'
|
||||||
squashimg = airoot + 'airootfs.sfs'
|
squashimg = airoot + 'airootfs.sfs'
|
||||||
os.makedirs(airoot, exist_ok = True)
|
os.makedirs(airoot, exist_ok = True)
|
||||||
print("{0}: Generating squashed filesystem image for {1}. Please wait...".format(
|
print("{0}: [BUILD] Squashing filesystem ({1})...".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
chrootdir + '/root.' + a))
|
chrootdir + '/root.' + a))
|
||||||
# TODO: use stdout and -progress if debugging is enabled. the below subprocess.call() just redirects to
|
# TODO: use stdout and -progress if debugging is enabled. the below subprocess.call() just redirects to
|
||||||
@ -41,13 +41,13 @@ def genImg(build, bdisk):
|
|||||||
'-noappend',
|
'-noappend',
|
||||||
'-comp', 'xz']
|
'-comp', 'xz']
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
||||||
print("{0}: Generated {1} ({2}).".format(
|
print("{0}: [BUILD] Generated {1} ({2}).".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
squashimg,
|
squashimg,
|
||||||
humanize.naturalsize(
|
humanize.naturalsize(
|
||||||
os.path.getsize(squashimg))))
|
os.path.getsize(squashimg))))
|
||||||
# Generate the checksum files
|
# Generate the checksum files
|
||||||
print("{0}: Generating SHA256 and MD5 hash checksum files for {1}. Please wait...".format(
|
print("{0}: [BUILD] Generating SHA256, MD5 checksums ({1})...".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
squashimg))
|
squashimg))
|
||||||
hashes['sha256'][a] = hashlib.sha256()
|
hashes['sha256'][a] = hashlib.sha256()
|
||||||
@ -64,7 +64,7 @@ def genImg(build, bdisk):
|
|||||||
f.write("{0} airootfs.sfs".format(hashes['sha256'][a].hexdigest()))
|
f.write("{0} airootfs.sfs".format(hashes['sha256'][a].hexdigest()))
|
||||||
with open(airoot + 'airootfs.md5', 'w+') as f:
|
with open(airoot + 'airootfs.md5', 'w+') as f:
|
||||||
f.write("{0} airootfs.sfs".format(hashes['md5'][a].hexdigest()))
|
f.write("{0} airootfs.sfs".format(hashes['md5'][a].hexdigest()))
|
||||||
print("{0}: Hash checksums complete.".format(datetime.datetime.now()))
|
print("{0}: [BUILD] Hash checksums complete.".format(datetime.datetime.now()))
|
||||||
# Logo
|
# Logo
|
||||||
os.makedirs(tempdir + '/boot', exist_ok = True)
|
os.makedirs(tempdir + '/boot', exist_ok = True)
|
||||||
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
|
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
|
||||||
@ -101,7 +101,7 @@ def genUEFI(build, bdisk):
|
|||||||
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
||||||
if not os.path.isfile(tempdir + '/EFI/shellx64_v2.efi'):
|
if not os.path.isfile(tempdir + '/EFI/shellx64_v2.efi'):
|
||||||
shell2_path = tempdir + '/EFI/shellx64_v2.efi'
|
shell2_path = tempdir + '/EFI/shellx64_v2.efi'
|
||||||
print("{0}: You are missing {1}. We'll download it for you.".format(datetime.datetime.now(), shell2_path))
|
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell2_path))
|
||||||
shell2_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi'
|
shell2_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi'
|
||||||
shell2_fetch = urlopen(shell2_url)
|
shell2_fetch = urlopen(shell2_url)
|
||||||
with open(shell2_path, 'wb+') as dl:
|
with open(shell2_path, 'wb+') as dl:
|
||||||
@ -111,13 +111,13 @@ def genUEFI(build, bdisk):
|
|||||||
# TODO: is there an Arch package for this? can we just install that in the chroot and copy the shell binaries?
|
# TODO: is there an Arch package for this? can we just install that in the chroot and copy the shell binaries?
|
||||||
if not os.path.isfile(tempdir + '/EFI/shellx64_v1.efi'):
|
if not os.path.isfile(tempdir + '/EFI/shellx64_v1.efi'):
|
||||||
shell1_path = tempdir + '/EFI/shellx64_v1.efi'
|
shell1_path = tempdir + '/EFI/shellx64_v1.efi'
|
||||||
print("{0}: You are missing {1}. We'll download it for you.".format(datetime.datetime.now(), shell1_path))
|
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell1_path))
|
||||||
shell1_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi'
|
shell1_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi'
|
||||||
shell1_fetch = urlopen(shell1_url)
|
shell1_fetch = urlopen(shell1_url)
|
||||||
with open(shell1_path, 'wb+') as dl:
|
with open(shell1_path, 'wb+') as dl:
|
||||||
dl.write(shell1_fetch.read())
|
dl.write(shell1_fetch.read())
|
||||||
shell1_fetch.close()
|
shell1_fetch.close()
|
||||||
print("{0}: Configuring UEFI bootloading...".format(datetime.datetime.now()))
|
print("{0}: [BUILD] Building UEFI support...".format(datetime.datetime.now()))
|
||||||
## But wait! That's not all! We need more binaries.
|
## But wait! That's not all! We need more binaries.
|
||||||
# http://blog.hansenpartnership.com/linux-foundation-secure-boot-system-released/
|
# http://blog.hansenpartnership.com/linux-foundation-secure-boot-system-released/
|
||||||
shim_url = 'http://blog.hansenpartnership.com/wp-uploads/2013/'
|
shim_url = 'http://blog.hansenpartnership.com/wp-uploads/2013/'
|
||||||
@ -176,7 +176,7 @@ def genUEFI(build, bdisk):
|
|||||||
fname = os.path.join(path, file)
|
fname = os.path.join(path, file)
|
||||||
sizetotal += os.path.getsize(fname)
|
sizetotal += os.path.getsize(fname)
|
||||||
# And now we create the EFI binary filesystem image/binary...
|
# And now we create the EFI binary filesystem image/binary...
|
||||||
print("{0}: Creating a {1} EFI ESP image at {2}. Please wait...".format(
|
print("{0}: [BUILD] Creating EFI ESP image {2} ({1})...".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
humanize.naturalsize(sizetotal),
|
humanize.naturalsize(sizetotal),
|
||||||
efiboot_img))
|
efiboot_img))
|
||||||
@ -239,7 +239,7 @@ def genUEFI(build, bdisk):
|
|||||||
cmd = ['/bin/umount', mountpt]
|
cmd = ['/bin/umount', mountpt]
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd)
|
||||||
efisize = humanize.naturalsize(os.path.getsize(efiboot_img))
|
efisize = humanize.naturalsize(os.path.getsize(efiboot_img))
|
||||||
print('{0}: Built EFI binary.'.format(datetime.datetime.now()))
|
print('{0}: [BUILD] Built EFI binary.'.format(datetime.datetime.now()))
|
||||||
return(efiboot_img)
|
return(efiboot_img)
|
||||||
|
|
||||||
def genISO(conf):
|
def genISO(conf):
|
||||||
@ -257,6 +257,9 @@ def genISO(conf):
|
|||||||
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/bios/'
|
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/bios/'
|
||||||
sysl_tmp = tempdir + '/isolinux/'
|
sysl_tmp = tempdir + '/isolinux/'
|
||||||
ver = bdisk['ver']
|
ver = bdisk['ver']
|
||||||
|
if len(arch) == 1:
|
||||||
|
isofile = '{0}-{1}-{2}.iso'.format(bdisk['uxname'], bdisk['ver'], arch[0])
|
||||||
|
else:
|
||||||
isofile = '{0}-{1}.iso'.format(bdisk['uxname'], bdisk['ver'])
|
isofile = '{0}-{1}.iso'.format(bdisk['uxname'], bdisk['ver'])
|
||||||
isopath = build['isodir'] + '/' + isofile
|
isopath = build['isodir'] + '/' + isofile
|
||||||
arch = build['arch']
|
arch = build['arch']
|
||||||
@ -288,7 +291,7 @@ def genISO(conf):
|
|||||||
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
|
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
|
||||||
minipath = build['isodir'] + '/' + usbfile
|
minipath = build['isodir'] + '/' + usbfile
|
||||||
# Copy isolinux files
|
# Copy isolinux files
|
||||||
print("{0}: Staging some files for ISO preparation. Please wait...".format(datetime.datetime.now()))
|
print("{0}: [BUILD] Staging ISO preparation...".format(datetime.datetime.now()))
|
||||||
isolinux_files = ['isolinux.bin',
|
isolinux_files = ['isolinux.bin',
|
||||||
'vesamenu.c32',
|
'vesamenu.c32',
|
||||||
'linux.c32',
|
'linux.c32',
|
||||||
@ -317,7 +320,7 @@ def genISO(conf):
|
|||||||
f.write(tpl_out)
|
f.write(tpl_out)
|
||||||
# And we need to build the ISO!
|
# And we need to build the ISO!
|
||||||
# TODO: only include UEFI support if we actually built it!
|
# TODO: only include UEFI support if we actually built it!
|
||||||
print("{0}: Generating the full ISO at {1}. Please wait.".format(datetime.datetime.now(), isopath))
|
print("{0}: [BUILD] Building full ISO ({1})...".format(datetime.datetime.now(), isopath))
|
||||||
if efi:
|
if efi:
|
||||||
cmd = ['/usr/bin/xorriso',
|
cmd = ['/usr/bin/xorriso',
|
||||||
'-as', 'mkisofs',
|
'-as', 'mkisofs',
|
||||||
@ -383,10 +386,10 @@ def genISO(conf):
|
|||||||
|
|
||||||
def displayStats(iso):
|
def displayStats(iso):
|
||||||
for i in iso['name']:
|
for i in iso['name']:
|
||||||
print("{0}: == {1} {2} ==".format(datetime.datetime.now(), iso[i]['type'], iso[i]['fmt']))
|
print("{0}:\t\t\t == {1} {2} ==".format(datetime.datetime.now(), iso[i]['type'], iso[i]['fmt']))
|
||||||
print('\t\t\t = Size: {0}'.format(iso[i]['size']))
|
print('\t\t\t = Size: {0}'.format(iso[i]['size']))
|
||||||
print('\t\t\t = SHA256: {0}'.format(iso[i]['sha']))
|
print('\t\t\t = SHA256: {0}'.format(iso[i]['sha']))
|
||||||
print('\t\t\t = Location: {0}\n'.format(iso[i]['file']))
|
print('\t\t\t = Location: {0}'.format(iso[i]['file']))
|
||||||
|
|
||||||
def cleanUp():
|
def cleanUp():
|
||||||
# TODO: clear out all of tempdir?
|
# TODO: clear out all of tempdir?
|
||||||
|
@ -30,7 +30,7 @@ def buildIPXE(conf):
|
|||||||
mini_file = '{0}{1}'.format(img_path, ipxe_mini)
|
mini_file = '{0}{1}'.format(img_path, ipxe_mini)
|
||||||
ipxe_git_uri = 'git://git.ipxe.org/ipxe.git'
|
ipxe_git_uri = 'git://git.ipxe.org/ipxe.git'
|
||||||
patches_git_uri = 'https://github.com/eworm-de/ipxe.git'
|
patches_git_uri = 'https://github.com/eworm-de/ipxe.git'
|
||||||
print('{0}: Preparing and fetching sources for iPXE. Please wait...'.format(
|
print('{0}: [IPXE] Prep/fetch sources...'.format(
|
||||||
datetime.datetime.now()))
|
datetime.datetime.now()))
|
||||||
# Get the source and apply some cherrypicks
|
# Get the source and apply some cherrypicks
|
||||||
if os.path.isdir(ipxe_src):
|
if os.path.isdir(ipxe_src):
|
||||||
@ -110,8 +110,8 @@ def buildIPXE(conf):
|
|||||||
DEVNULL = open(os.devnull, 'w')
|
DEVNULL = open(os.devnull, 'w')
|
||||||
if os.path.isfile(build['dlpath'] + '/ipxe.log'):
|
if os.path.isfile(build['dlpath'] + '/ipxe.log'):
|
||||||
os.remove(build['dlpath'] + '/ipxe.log')
|
os.remove(build['dlpath'] + '/ipxe.log')
|
||||||
print(('{0}: Building iPXE in {1}. Please wait...\n\t\t\t You can view progress' +
|
print(('{0}: [IPXE] Building iPXE ({1})...\n\t\t\t PROGRESS: ' +
|
||||||
' via:\n\t\t\t tail -f {2}/ipxe.log').format(
|
'tail -f {2}/ipxe.log').format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
ipxe_src,
|
ipxe_src,
|
||||||
build['dlpath']))
|
build['dlpath']))
|
||||||
@ -125,7 +125,7 @@ def buildIPXE(conf):
|
|||||||
with open('{0}/ipxe.log'.format(build['dlpath']), 'a') as f:
|
with open('{0}/ipxe.log'.format(build['dlpath']), 'a') as f:
|
||||||
subprocess.call(build_cmd['efi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
subprocess.call(build_cmd['efi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
||||||
subprocess.call(build_cmd['img'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
subprocess.call(build_cmd['img'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
||||||
print('{0}: Built iPXE image(s) successfully.'.format(datetime.datetime.now()))
|
print('{0}: [IPXE] Built iPXE image(s) successfully.'.format(datetime.datetime.now()))
|
||||||
os.chdir(cwd)
|
os.chdir(cwd)
|
||||||
# move the files to the results dir
|
# move the files to the results dir
|
||||||
os.rename('{0}/src/bin/ipxe.usb'.format(ipxe_src), usb_file)
|
os.rename('{0}/src/bin/ipxe.usb'.format(ipxe_src), usb_file)
|
||||||
|
@ -45,15 +45,15 @@ def downloadTarball(build):
|
|||||||
if build['mirrorgpgsig'] != '':
|
if build['mirrorgpgsig'] != '':
|
||||||
# we don't want to futz with the user's normal gpg.
|
# we don't want to futz with the user's normal gpg.
|
||||||
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
|
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
|
||||||
print("\n{0}: Generating a GPG key. Please wait...".format(datetime.datetime.now()))
|
print("\n{0}: [PREP] Generating a GPG key...".format(datetime.datetime.now()))
|
||||||
# python-gnupg 0.3.9 spits this error in Arch. it's harmless, but ugly af.
|
# python-gnupg 0.3.9 spits this error in Arch. it's harmless, but ugly af.
|
||||||
# TODO: remove this when the error doesn't happen anymore.
|
# TODO: remove this when the error doesn't happen anymore.
|
||||||
print("\t\t\t If you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error,\n\t\t\t it can be safely ignored.")
|
print("\t\t\t If you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error,\n\t\t\t it can be safely ignored.")
|
||||||
print("\t\t\t If this is taking a VERY LONG time, try installing haveged and starting it.\n\t\t\t This can be " +
|
print("\t\t\t If this is taking a VERY LONG time, try installing haveged and starting it.\n\t\t\t This can be " +
|
||||||
"done safely in parallel with the build process.\n")
|
"done safely in parallel with the build process.\n")
|
||||||
input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase')
|
input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase')
|
||||||
key = gpg.gen_key(input_data)
|
key = gpg.gen_key(input_data) # this gives the "error"
|
||||||
keyid = build['gpgkey']
|
keyid = build['gpgkey'] # this gives the "error" as well
|
||||||
gpg.recv_keys(build['gpgkeyserver'], keyid)
|
gpg.recv_keys(build['gpgkeyserver'], keyid)
|
||||||
for a in arch:
|
for a in arch:
|
||||||
pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$')
|
pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$')
|
||||||
@ -63,7 +63,7 @@ def downloadTarball(build):
|
|||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
# fetch the tarball...
|
# fetch the tarball...
|
||||||
print("{0}: Fetching the tarball for {1} architecture, please wait...".format(
|
print("{0}: [PREP] Fetching tarball ({1} architecture)...".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
a))
|
a))
|
||||||
#dl_file = urllib.URLopener()
|
#dl_file = urllib.URLopener()
|
||||||
@ -71,18 +71,18 @@ def downloadTarball(build):
|
|||||||
with open(tarball_path[a], 'wb') as f:
|
with open(tarball_path[a], 'wb') as f:
|
||||||
f.write(tarball_dl.read())
|
f.write(tarball_dl.read())
|
||||||
tarball_dl.close()
|
tarball_dl.close()
|
||||||
print("{0}: Done fetching {1} ({2}).".format(
|
print("{0}: [PREP] Done fetching {1} ({2}).".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
tarball_path[a],
|
tarball_path[a],
|
||||||
humanize.naturalsize(
|
humanize.naturalsize(
|
||||||
os.path.getsize(tarball_path[a]))))
|
os.path.getsize(tarball_path[a]))))
|
||||||
print("{0}: Checking that the hash checksum for {1}\n\t\t\t matches {2}, please wait...".format(
|
print("{0}: [PREP] Checking hash checksum {1} against {2}...".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
tarball_path[a],
|
sha1,
|
||||||
sha1))
|
tarball_path[a]))
|
||||||
tarball_hash = hashlib.sha1(open(tarball_path[a], 'rb').read()).hexdigest()
|
tarball_hash = hashlib.sha1(open(tarball_path[a], 'rb').read()).hexdigest()
|
||||||
if tarball_hash != sha1:
|
if tarball_hash != sha1:
|
||||||
exit(("{0}: {1} either did not download correctly or a wrong (probably old) version exists on the filesystem.\n" +
|
exit(("{0}: {1} either did not download correctly\n\t\t\t or a wrong (probably old) version exists on the filesystem.\n\t\t\t " +
|
||||||
"Please delete it and try again.").format(datetime.datetime.now(), tarball))
|
"Please delete it and try again.").format(datetime.datetime.now(), tarball))
|
||||||
elif build['mirrorgpgsig'] != '':
|
elif build['mirrorgpgsig'] != '':
|
||||||
# okay, so the sha1 matches. let's verify the signature.
|
# okay, so the sha1 matches. let's verify the signature.
|
||||||
@ -121,7 +121,7 @@ def unpackTarball(tarball_path, build, keep = False):
|
|||||||
# Open and extract the tarball
|
# Open and extract the tarball
|
||||||
if not keep:
|
if not keep:
|
||||||
for a in build['arch']:
|
for a in build['arch']:
|
||||||
print("{0}: Extracting tarball {1} ({2}). Please wait...".format(
|
print("{0}: [PREP] Extracting tarball {1} ({2}). Please wait...".format(
|
||||||
datetime.datetime.now(),
|
datetime.datetime.now(),
|
||||||
tarball_path[a],
|
tarball_path[a],
|
||||||
humanize.naturalsize(
|
humanize.naturalsize(
|
||||||
@ -129,7 +129,7 @@ def unpackTarball(tarball_path, build, keep = False):
|
|||||||
tar = tarfile.open(tarball_path[a], 'r:gz')
|
tar = tarfile.open(tarball_path[a], 'r:gz')
|
||||||
tar.extractall(path = chrootdir)
|
tar.extractall(path = chrootdir)
|
||||||
tar.close()
|
tar.close()
|
||||||
print("{0}: Extraction for {1} finished.".format(datetime.datetime.now(), tarball_path[a]))
|
print("{0}: [PREP] Extraction for {1} finished.".format(datetime.datetime.now(), tarball_path[a]))
|
||||||
|
|
||||||
def buildChroot(build, keep = False):
|
def buildChroot(build, keep = False):
|
||||||
dlpath = build['dlpath']
|
dlpath = build['dlpath']
|
||||||
|
@ -334,14 +334,14 @@ rsync = no
|
|||||||
; 2.) If specified, it will be created if it doesn't exist
|
; 2.) If specified, it will be created if it doesn't exist
|
||||||
; 3.) If it does exist, it will be deleted first- MAKE SURE
|
; 3.) If it does exist, it will be deleted first- MAKE SURE
|
||||||
; you do not store files here that you want to keep.
|
; you do not store files here that you want to keep.
|
||||||
path = ${build:tempdir}/http
|
path = ${build:dlpath}/http
|
||||||
|
|
||||||
; What user and group, if applicable, should the HTTP files
|
; What user and group, if applicable, should the HTTP files
|
||||||
; be owned as? This is most likely going to be either 'http',
|
; be owned as? This is most likely going to be either 'http',
|
||||||
; 'nginx', or 'apache'.
|
; 'nginx', or 'apache'.
|
||||||
; 0.) No whitespace
|
; 0.) No whitespace
|
||||||
; 1.) User must exist on system
|
; 1.) User must exist on system
|
||||||
; 2.) If sync:httpdir is blank, they will not be used
|
; 2.) If path is blank, they will not be used
|
||||||
user = http
|
user = http
|
||||||
group = http
|
group = http
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ group = http
|
|||||||
; 2.) If specified, it will be created if it doesn't exist
|
; 2.) If specified, it will be created if it doesn't exist
|
||||||
; 3.) If it does exist, it will be deleted first- MAKE SURE
|
; 3.) If it does exist, it will be deleted first- MAKE SURE
|
||||||
; you do not store files here that you want to keep.
|
; you do not store files here that you want to keep.
|
||||||
path = ${build:tempdir}/tftpboot
|
path = ${build:dlpath}/tftpboot
|
||||||
|
|
||||||
; What user and group, if applicable, should the TFTP files
|
; What user and group, if applicable, should the TFTP files
|
||||||
; be owned as? This is most likely going to be either 'tftp'
|
; be owned as? This is most likely going to be either 'tftp'
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
## Enable IPv6 support
|
|
||||||
s/^#undef([[:space:]]*NET_PROTO_IPV6)/#define\1/g
|
|
||||||
## Enable HTTPS
|
|
||||||
s/^#undef([[:space:]]*DOWNLOAD_PROTO_HTTPS)/#define\1/g
|
|
||||||
s@^//(#define[[:space:]]*IMAGE_TRUST_CMD@\1@g
|
|
||||||
## Enable FTP
|
|
||||||
s/^#undef([[:space:]]*DOWNLOAD_PROTO_FTP)/#define\1/g
|
|
||||||
## Currently broken for EFI building
|
|
||||||
#s@^//(#define[[:space:]]*CONSOLE_CMD)@\1@g
|
|
||||||
#s@^//(#define[[:space:]]*IMAGE_PNG@\1@g
|
|
@ -1,2 +0,0 @@
|
|||||||
## Currently broken on EFI systems
|
|
||||||
#s@^//(#define[[:space:]]*CONSOLE_VESAFB)@\1@g
|
|
@ -1,8 +0,0 @@
|
|||||||
#!ipxe
|
|
||||||
|
|
||||||
dhcp
|
|
||||||
## TODO: signed kernel and initrd
|
|
||||||
#imgtrust --permanent
|
|
||||||
#imgverify vmlinuz path/to/vmlinuz.sig
|
|
||||||
#imgverify initrd path/to/initrd.sig
|
|
||||||
chain https://bdisk.square-r00t.net
|
|
@ -1,8 +0,0 @@
|
|||||||
Thanks to "eworm" for his work on the AUR iPXE-git package:
|
|
||||||
https://aur.archlinux.org/packages/ipxe-git/
|
|
||||||
|
|
||||||
and specifically the following patches:
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0001-git-version.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0002-banner.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0003-iso-efi.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0004-fix-no-pie-workaround.patch
|
|
@ -1,32 +0,0 @@
|
|||||||
From 4c139ece028b5dd6c4e5f46ce2bf8134c390de90 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Christian Hesse <mail@eworm.de>
|
|
||||||
Date: Thu, 12 Feb 2015 08:59:37 +0100
|
|
||||||
Subject: [PATCH] git version
|
|
||||||
|
|
||||||
Signed-off-by: Christian Hesse <mail@eworm.de>
|
|
||||||
---
|
|
||||||
src/Makefile | 7 ++++---
|
|
||||||
1 file changed, 4 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/Makefile b/src/Makefile
|
|
||||||
index cf9cfd2..38ebb3d 100644
|
|
||||||
--- a/src/Makefile
|
|
||||||
+++ b/src/Makefile
|
|
||||||
@@ -194,10 +194,11 @@ VERSION_MINOR = 0
|
|
||||||
VERSION_PATCH = 0
|
|
||||||
EXTRAVERSION = +
|
|
||||||
MM_VERSION = $(VERSION_MAJOR).$(VERSION_MINOR)
|
|
||||||
-VERSION = $(MM_VERSION).$(VERSION_PATCH)$(EXTRAVERSION)
|
|
||||||
ifneq ($(wildcard ../.git),)
|
|
||||||
-GITVERSION := $(shell git describe --always --abbrev=1 --match "" 2>/dev/null)
|
|
||||||
-VERSION += ($(GITVERSION))
|
|
||||||
+GITVERSION := $(shell git describe --tags --long 2>/dev/null)
|
|
||||||
+VERSION = $(GITVERSION)
|
|
||||||
+else
|
|
||||||
+VERSION = $(MM_VERSION).$(VERSION_PATCH)$(EXTRAVERSION)
|
|
||||||
endif
|
|
||||||
version :
|
|
||||||
@$(ECHO) "$(VERSION)"
|
|
||||||
--
|
|
||||||
2.3.0
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
|||||||
diff --git a/src/usr/autoboot.c b/src/usr/autoboot.c
|
|
||||||
index e93b015..758e187 100644
|
|
||||||
--- a/src/usr/autoboot.c
|
|
||||||
+++ b/src/usr/autoboot.c
|
|
||||||
@@ -71,6 +71,7 @@ static int ( * is_autoboot_device ) ( struct net_device *netdev );
|
|
||||||
#define NORMAL "\033[0m"
|
|
||||||
#define BOLD "\033[1m"
|
|
||||||
#define CYAN "\033[36m"
|
|
||||||
+#define BLUE "\033[34m"
|
|
||||||
|
|
||||||
/** The "scriptlet" setting */
|
|
||||||
const struct setting scriptlet_setting __setting ( SETTING_MISC, scriptlet ) = {
|
|
||||||
@@ -521,7 +522,6 @@ static int shell_banner ( void ) {
|
|
||||||
* @ret rc Return status code
|
|
||||||
*/
|
|
||||||
int ipxe ( struct net_device *netdev ) {
|
|
||||||
- struct feature *feature;
|
|
||||||
struct image *image;
|
|
||||||
char *scriptlet;
|
|
||||||
int rc;
|
|
||||||
@@ -538,11 +538,11 @@ int ipxe ( struct net_device *netdev ) {
|
|
||||||
* do so.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
- printf ( NORMAL "\n\n" PRODUCT_NAME "\n" BOLD PRODUCT_SHORT_NAME " %s"
|
|
||||||
+ printf ( NORMAL "\n\n" PRODUCT_NAME "\n" BOLD PRODUCT_SHORT_NAME
|
|
||||||
NORMAL " -- " PRODUCT_TAG_LINE " -- "
|
|
||||||
- CYAN PRODUCT_URI NORMAL "\nFeatures:", product_version );
|
|
||||||
- for_each_table_entry ( feature, FEATURES )
|
|
||||||
- printf ( " %s", feature->name );
|
|
||||||
+ CYAN PRODUCT_URI NORMAL "\n"
|
|
||||||
+ BOLD "BDisk" BLUE "LiveDistro" NORMAL " -- Welp, Yer Boned!(TM) -- "
|
|
||||||
+ BOLD BLUE "https://bdisk.square-r00t.net/" NORMAL "\n" );
|
|
||||||
printf ( "\n" );
|
|
||||||
|
|
||||||
/* Boot system */
|
|
@ -1,125 +0,0 @@
|
|||||||
From d2092664b3cf866b2ab338fe056149d3266d0acc Mon Sep 17 00:00:00 2001
|
|
||||||
From: Christian Hesse <mail@eworm.de>
|
|
||||||
Date: Sun, 19 Apr 2015 13:16:09 +0200
|
|
||||||
Subject: [PATCH 1/1] allow to build ISO image with EFI support (ipxe.eiso)
|
|
||||||
|
|
||||||
Signed-off-by: Christian Hesse <mail@eworm.de>
|
|
||||||
---
|
|
||||||
src/arch/i386/Makefile.pcbios | 6 +++++
|
|
||||||
src/util/geniso | 52 +++++++++++++++++++++++++++++++++----------
|
|
||||||
2 files changed, 46 insertions(+), 12 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/arch/i386/Makefile.pcbios b/src/arch/i386/Makefile.pcbios
|
|
||||||
index ff82373..c7a58eb 100644
|
|
||||||
--- a/src/arch/i386/Makefile.pcbios
|
|
||||||
+++ b/src/arch/i386/Makefile.pcbios
|
|
||||||
@@ -59,6 +59,12 @@ NON_AUTO_MEDIA += iso
|
|
||||||
$(QM)$(ECHO) " [GENISO] $@"
|
|
||||||
$(Q)ISOLINUX_BIN=$(ISOLINUX_BIN) VERSION="$(VERSION)" bash util/geniso -o $@ $<
|
|
||||||
|
|
||||||
+# rule to make a non-emulation ISO boot image with EFI support
|
|
||||||
+NON_AUTO_MEDIA += eiso
|
|
||||||
+%eiso: %lkrn bin-i386-efi/ipxe.efi bin-x86_64-efi/ipxe.efi util/geniso
|
|
||||||
+ $(QM)$(ECHO) " [GENISO] $@"
|
|
||||||
+ $(Q)ISOLINUX_BIN=$(ISOLINUX_BIN) VERSION="$(VERSION)" bash util/geniso -e -o $@ $<
|
|
||||||
+
|
|
||||||
# rule to make a floppy emulation ISO boot image
|
|
||||||
NON_AUTO_MEDIA += liso
|
|
||||||
%liso: %lkrn util/geniso
|
|
||||||
diff --git a/src/util/geniso b/src/util/geniso
|
|
||||||
index 521c929..9e8588c 100755
|
|
||||||
--- a/src/util/geniso
|
|
||||||
+++ b/src/util/geniso
|
|
||||||
@@ -6,16 +6,21 @@ function help() {
|
|
||||||
echo "usage: ${0} [OPTIONS] foo.lkrn [bar.lkrn,...]"
|
|
||||||
echo
|
|
||||||
echo "where OPTIONS are:"
|
|
||||||
+ echo " -e build image with EFI support"
|
|
||||||
echo " -h show this help"
|
|
||||||
echo " -l build legacy image with floppy emulation"
|
|
||||||
echo " -o FILE save iso image to file"
|
|
||||||
}
|
|
||||||
|
|
||||||
+EFI=0
|
|
||||||
LEGACY=0
|
|
||||||
FIRST=""
|
|
||||||
|
|
||||||
-while getopts "hlo:" opt; do
|
|
||||||
+while getopts "ehlo:" opt; do
|
|
||||||
case ${opt} in
|
|
||||||
+ e)
|
|
||||||
+ EFI=1
|
|
||||||
+ ;;
|
|
||||||
h)
|
|
||||||
help
|
|
||||||
exit 0
|
|
||||||
@@ -37,17 +42,25 @@ if [ -z "${OUT}" ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
-# There should either be mkisofs or the compatible genisoimage program
|
|
||||||
-for command in genisoimage mkisofs; do
|
|
||||||
- if ${command} --version >/dev/null 2>/dev/null; then
|
|
||||||
- mkisofs=(${command})
|
|
||||||
- break
|
|
||||||
- fi
|
|
||||||
-done
|
|
||||||
-
|
|
||||||
-if [ -z "${mkisofs}" ]; then
|
|
||||||
- echo "${0}: mkisofs or genisoimage not found, please install or set PATH" >&2
|
|
||||||
+# We need xorriso (from libisoburn) for EFI support, so try that first.
|
|
||||||
+if xorriso --version >/dev/null 2>/dev/null; then
|
|
||||||
+ mkisofs=(xorriso -as mkisofs)
|
|
||||||
+elif [ ${EFI} -eq 1 ]; then
|
|
||||||
+ echo "${0}: xorriso not found, but required for EFI support. Please install." >&2
|
|
||||||
exit 1
|
|
||||||
+else
|
|
||||||
+ # fall back to mkisofs or the compatible genisoimage program
|
|
||||||
+ for command in genisoimage mkisofs; do
|
|
||||||
+ if ${command} --version >/dev/null 2>/dev/null; then
|
|
||||||
+ mkisofs=(${command})
|
|
||||||
+ break
|
|
||||||
+ fi
|
|
||||||
+ done
|
|
||||||
+
|
|
||||||
+ if [ -z "${mkisofs}" ]; then
|
|
||||||
+ echo "${0}: mkisofs or genisoimage not found, please install or set PATH" >&2
|
|
||||||
+ exit 1
|
|
||||||
+ fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
dir=$(mktemp -d bin/iso.dir.XXXXXX)
|
|
||||||
@@ -122,6 +135,21 @@ case "${LEGACY}" in
|
|
||||||
# copy isolinux bootloader
|
|
||||||
cp ${ISOLINUX_BIN} ${dir}
|
|
||||||
|
|
||||||
+ mkisofs+=(-b isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table)
|
|
||||||
+
|
|
||||||
+ if [ "${EFI}" -eq 1 ]; then
|
|
||||||
+ # generate EFI image
|
|
||||||
+ img=${dir}/efiboot.img
|
|
||||||
+
|
|
||||||
+ mformat -f 2880 -C -i ${img} ::
|
|
||||||
+ mmd -i ${img} "::/EFI"
|
|
||||||
+ mmd -i ${img} "::/EFI/BOOT"
|
|
||||||
+ mcopy -m -i ${img} bin-x86_64-efi/ipxe.efi "::EFI/BOOT/BOOTX64.EFI"
|
|
||||||
+ mcopy -m -i ${img} bin-i386-efi/ipxe.efi "::EFI/BOOT/BOOTIA32.EFI"
|
|
||||||
+
|
|
||||||
+ mkisofs+=(-eltorito-alt-boot -e efiboot.img -isohybrid-gpt-basdat -no-emul-boot)
|
|
||||||
+ fi
|
|
||||||
+
|
|
||||||
# syslinux 6.x needs a file called ldlinux.c32
|
|
||||||
LDLINUX_C32=$(dirname ${ISOLINUX_BIN})/ldlinux.c32
|
|
||||||
if [ -s ${LDLINUX_C32} ]; then
|
|
||||||
@@ -129,7 +157,7 @@ case "${LEGACY}" in
|
|
||||||
fi
|
|
||||||
|
|
||||||
# generate the iso image
|
|
||||||
- "${mkisofs[@]}" -b isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -output ${OUT} ${dir}
|
|
||||||
+ "${mkisofs[@]}" -output ${OUT} ${dir}
|
|
||||||
|
|
||||||
# isohybrid will be used if available
|
|
||||||
if isohybrid --version >/dev/null 2>/dev/null; then
|
|
||||||
--
|
|
||||||
2.3.5
|
|
||||||
|
|
@ -1,120 +0,0 @@
|
|||||||
From 189652b03032305a2db860e76fb58e81e3420c4d Mon Sep 17 00:00:00 2001
|
|
||||||
From: Christian Hesse <mail@eworm.de>
|
|
||||||
Date: Wed, 24 Feb 2016 09:16:51 +0100
|
|
||||||
Subject: [PATCH] allow to build ISO image with EFI support (ipxe.eiso)
|
|
||||||
|
|
||||||
---
|
|
||||||
src/arch/x86/Makefile.pcbios | 6 +++++
|
|
||||||
src/util/geniso | 52 ++++++++++++++++++++++++++++++++++----------
|
|
||||||
2 files changed, 46 insertions(+), 12 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/arch/x86/Makefile.pcbios b/src/arch/x86/Makefile.pcbios
|
|
||||||
index f8c2253..1e01636 100644
|
|
||||||
--- a/src/arch/x86/Makefile.pcbios
|
|
||||||
+++ b/src/arch/x86/Makefile.pcbios
|
|
||||||
@@ -86,6 +86,12 @@ NON_AUTO_MEDIA += iso
|
|
||||||
$(Q)ISOLINUX_BIN=$(ISOLINUX_BIN) LDLINUX_C32=$(LDLINUX_C32) \
|
|
||||||
VERSION="$(VERSION)" bash util/geniso -o $@ $<
|
|
||||||
|
|
||||||
+# rule to make a non-emulation ISO boot image with EFI support
|
|
||||||
+NON_AUTO_MEDIA += eiso
|
|
||||||
+%eiso: %lkrn bin-i386-efi/ipxe.efi bin-x86_64-efi/ipxe.efi util/geniso
|
|
||||||
+ $(QM)$(ECHO) " [GENISO] $@"
|
|
||||||
+ $(Q)ISOLINUX_BIN=$(ISOLINUX_BIN) VERSION="$(VERSION)" bash util/geniso -e -o $@ $<
|
|
||||||
+
|
|
||||||
# rule to make a floppy emulation ISO boot image
|
|
||||||
NON_AUTO_MEDIA += liso
|
|
||||||
%liso: %lkrn util/geniso
|
|
||||||
diff --git a/src/util/geniso b/src/util/geniso
|
|
||||||
index ff090d4..7694036 100755
|
|
||||||
--- a/src/util/geniso
|
|
||||||
+++ b/src/util/geniso
|
|
||||||
@@ -6,16 +6,21 @@ function help() {
|
|
||||||
echo "usage: ${0} [OPTIONS] foo.lkrn [bar.lkrn,...]"
|
|
||||||
echo
|
|
||||||
echo "where OPTIONS are:"
|
|
||||||
+ echo " -e build image with EFI support"
|
|
||||||
echo " -h show this help"
|
|
||||||
echo " -l build legacy image with floppy emulation"
|
|
||||||
echo " -o FILE save iso image to file"
|
|
||||||
}
|
|
||||||
|
|
||||||
+EFI=0
|
|
||||||
LEGACY=0
|
|
||||||
FIRST=""
|
|
||||||
|
|
||||||
-while getopts "hlo:" opt; do
|
|
||||||
+while getopts "ehlo:" opt; do
|
|
||||||
case ${opt} in
|
|
||||||
+ e)
|
|
||||||
+ EFI=1
|
|
||||||
+ ;;
|
|
||||||
h)
|
|
||||||
help
|
|
||||||
exit 0
|
|
||||||
@@ -37,17 +42,25 @@ if [ -z "${OUT}" ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
-# There should either be mkisofs or the compatible genisoimage program
|
|
||||||
-for command in genisoimage mkisofs; do
|
|
||||||
- if ${command} --version >/dev/null 2>/dev/null; then
|
|
||||||
- mkisofs=(${command})
|
|
||||||
- break
|
|
||||||
- fi
|
|
||||||
-done
|
|
||||||
-
|
|
||||||
-if [ -z "${mkisofs}" ]; then
|
|
||||||
- echo "${0}: mkisofs or genisoimage not found, please install or set PATH" >&2
|
|
||||||
+# We need xorriso (from libisoburn) for EFI support, so try that first.
|
|
||||||
+if xorriso --version >/dev/null 2>/dev/null; then
|
|
||||||
+ mkisofs=(xorriso -as mkisofs)
|
|
||||||
+elif [ ${EFI} -eq 1 ]; then
|
|
||||||
+ echo "${0}: xorriso not found, but required for EFI support. Please install." >&2
|
|
||||||
exit 1
|
|
||||||
+else
|
|
||||||
+ # fall back to mkisofs or the compatible genisoimage program
|
|
||||||
+ for command in genisoimage mkisofs; do
|
|
||||||
+ if ${command} --version >/dev/null 2>/dev/null; then
|
|
||||||
+ mkisofs=(${command})
|
|
||||||
+ break
|
|
||||||
+ fi
|
|
||||||
+ done
|
|
||||||
+
|
|
||||||
+ if [ -z "${mkisofs}" ]; then
|
|
||||||
+ echo "${0}: mkisofs or genisoimage not found, please install or set PATH" >&2
|
|
||||||
+ exit 1
|
|
||||||
+ fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
dir=$(mktemp -d bin/iso.dir.XXXXXX)
|
|
||||||
@@ -122,13 +135,28 @@ case "${LEGACY}" in
|
|
||||||
# copy isolinux bootloader
|
|
||||||
cp ${ISOLINUX_BIN} ${dir}
|
|
||||||
|
|
||||||
+ mkisofs+=(-b isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table)
|
|
||||||
+
|
|
||||||
+ if [ "${EFI}" -eq 1 ]; then
|
|
||||||
+ # generate EFI image
|
|
||||||
+ img=${dir}/efiboot.img
|
|
||||||
+
|
|
||||||
+ mformat -f 2880 -C -i ${img} ::
|
|
||||||
+ mmd -i ${img} "::/EFI"
|
|
||||||
+ mmd -i ${img} "::/EFI/BOOT"
|
|
||||||
+ mcopy -m -i ${img} bin-x86_64-efi/ipxe.efi "::EFI/BOOT/BOOTX64.EFI"
|
|
||||||
+ mcopy -m -i ${img} bin-i386-efi/ipxe.efi "::EFI/BOOT/BOOTIA32.EFI"
|
|
||||||
+
|
|
||||||
+ mkisofs+=(-eltorito-alt-boot -e efiboot.img -isohybrid-gpt-basdat -no-emul-boot)
|
|
||||||
+ fi
|
|
||||||
+
|
|
||||||
# syslinux 6.x needs a file called ldlinux.c32
|
|
||||||
if [ -n "${LDLINUX_C32}" -a -s "${LDLINUX_C32}" ]; then
|
|
||||||
cp ${LDLINUX_C32} ${dir}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# generate the iso image
|
|
||||||
- "${mkisofs[@]}" -b isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -output ${OUT} ${dir}
|
|
||||||
+ "${mkisofs[@]}" -output ${OUT} ${dir}
|
|
||||||
|
|
||||||
# isohybrid will be used if available
|
|
||||||
if isohybrid --version >/dev/null 2>/dev/null; then
|
|
@ -1,34 +0,0 @@
|
|||||||
From a4f7e3ba395af4cd0a706df635309d4ef837ecf8 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Christian Hesse <mail@eworm.de>
|
|
||||||
Date: Wed, 8 Apr 2015 09:51:41 +0200
|
|
||||||
Subject: [PATCH 2/2] Fix no-PIE workaround for i386 builds
|
|
||||||
|
|
||||||
This workaround did not work for my version of gcc (4.9.2 20150304) as
|
|
||||||
no option -nopie exists.
|
|
||||||
|
|
||||||
We take another way: Let's check whether or not the macro __PIE__ is defined
|
|
||||||
and add -fno-PIE if it is.
|
|
||||||
|
|
||||||
Signed-off-by: Christian Hesse <mail@eworm.de>
|
|
||||||
---
|
|
||||||
src/arch/i386/Makefile | 4 ++--
|
|
||||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/arch/i386/Makefile b/src/arch/i386/Makefile
|
|
||||||
index 99f8753..897081b 100644
|
|
||||||
--- a/src/arch/i386/Makefile
|
|
||||||
+++ b/src/arch/i386/Makefile
|
|
||||||
@@ -75,8 +75,8 @@ CFLAGS += -Ui386
|
|
||||||
# output on stderr instead of checking the exit status.
|
|
||||||
#
|
|
||||||
ifeq ($(CCTYPE),gcc)
|
|
||||||
-PIE_TEST = [ -z "`$(CC) -fno-PIE -nopie -x c -c /dev/null -o /dev/null 2>&1`" ]
|
|
||||||
-PIE_FLAGS := $(shell $(PIE_TEST) && $(ECHO) '-fno-PIE -nopie')
|
|
||||||
+PIE_TEST = $(CC) -dM -E - < /dev/null | grep -q '__PIE__'
|
|
||||||
+PIE_FLAGS := $(shell $(PIE_TEST) && $(ECHO) '-fno-PIE')
|
|
||||||
WORKAROUND_CFLAGS += $(PIE_FLAGS)
|
|
||||||
endif
|
|
||||||
|
|
||||||
--
|
|
||||||
2.3.5
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
|||||||
[ ca ]
|
|
||||||
default_ca = ca_default
|
|
||||||
|
|
||||||
[ ca_default ]
|
|
||||||
certificate = crts/ca.crt
|
|
||||||
private_key = keys/ca.key
|
|
||||||
serial = txt/ca.srl
|
|
||||||
database = txt/ca.idx
|
|
||||||
#new_certs_dir = signed
|
|
||||||
new_certs_dir = crts
|
|
||||||
#default_md = default
|
|
||||||
default_md = sha512
|
|
||||||
policy = policy_anything
|
|
||||||
preserve = yes
|
|
||||||
default_days = 90
|
|
||||||
unique_subject = no
|
|
||||||
|
|
||||||
[ policy_anything ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
localityName = optional
|
|
||||||
organizationName = optional
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = optional
|
|
||||||
emailAddress = optional
|
|
||||||
|
|
||||||
[ cross ]
|
|
||||||
basicConstraints = critical,CA:true
|
|
||||||
keyUsage = critical,cRLSign,keyCertSign
|
|
||||||
|
|
||||||
[ codesigning ]
|
|
||||||
keyUsage = digitalSignature
|
|
||||||
extendedKeyUsage = codeSigning
|
|
Loading…
Reference in New Issue
Block a user