Compare commits
234 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a0d1c1df5c
|
||
|
|
1d35f4deca
|
||
|
|
1437639338
|
||
|
|
90b88d82e8
|
||
|
e17005b729
|
|||
|
aa0a7e5837
|
|||
|
77b22b8b8a
|
|||
|
6543b230d4
|
|||
|
67338238af
|
|||
|
d8686ca943
|
|||
|
|
b740d2bfff
|
||
|
11a2db0acb
|
|||
|
379795ee06
|
|||
|
|
436bc3d083 | ||
|
4663c3cd02
|
|||
|
f75e4ee896
|
|||
|
|
88828685c2 | ||
|
|
4fa98eb805
|
||
|
916ea1dc2c
|
|||
|
9b2eff59d8
|
|||
|
13349d6d99
|
|||
|
b0fba9b441
|
|||
|
6954e8dc4e
|
|||
|
|
0febad432a
|
||
|
473833a58f
|
|||
|
|
743edf045b
|
||
|
|
8f3da5ee34
|
||
|
|
c2c051b6a3
|
||
|
|
6deef053d3
|
||
|
|
c4bb612381
|
||
|
289c2711b8
|
|||
|
b2848970c3
|
|||
|
b638e58dc8
|
|||
|
b8592686e4
|
|||
|
95aa8aa3bc
|
|||
|
31eec2d3f3
|
|||
| fcc2cb674f | |||
| add247d622 | |||
| 542166de67 | |||
| 66ece65699 | |||
|
|
701949b8f7 | ||
|
|
72298d7a4c | ||
|
|
62a7d65be5 | ||
|
|
6c7f0a3a6f | ||
|
|
86f94f49ef | ||
|
|
026a296444 | ||
|
|
7474012ada | ||
|
|
84b6c80b07 | ||
|
|
3848a0bf7e | ||
|
|
31826960c1 | ||
|
|
af732a1d64 | ||
|
|
0c0f6ee81b | ||
|
|
c149a7b3b7 | ||
|
|
3976fd631c | ||
|
|
c95f1f535b | ||
|
|
ea3c90d85d | ||
|
|
eb9bbd8b3b | ||
|
|
76c898588f | ||
|
|
7dd42eaf4d | ||
|
|
e6652df291 | ||
|
|
2ab3116d52 | ||
|
|
5af337fca7 | ||
|
|
68669a7fd5 | ||
|
|
118e1355bc | ||
|
|
419f266f0f | ||
|
|
e4b7bf85e9 | ||
|
|
6d6d1e20b1 | ||
|
|
001925af88 | ||
|
|
6ba2b6287c | ||
|
|
7eee1c4658 | ||
|
|
a89a6ec94b | ||
|
|
4ef4a939e8 | ||
|
|
431b4e3425 | ||
|
|
130746fa00 | ||
|
|
36061cccb5 | ||
|
|
0422038c47 | ||
|
|
fbce0d448e | ||
|
|
b1383ff3d5 | ||
|
|
b8012e8b4b | ||
|
|
58ee4cff4d | ||
|
|
55b61fab65 | ||
|
|
981d92db92 | ||
|
|
b8622c4462 | ||
|
|
d744250c1b | ||
|
|
ae2a7be09d | ||
|
|
305da25420 | ||
|
|
623c0e3abd | ||
|
|
31e8c4acee | ||
|
|
c55b844ad1 | ||
|
|
5d4e218db5 | ||
|
|
423c509878 | ||
|
|
94b326f1e5 | ||
|
|
6cea3c012e | ||
|
|
262d10f55d | ||
|
|
06bfb8f3de | ||
|
|
e091d94f91 | ||
|
|
a79802afa1 | ||
|
|
122c366490 | ||
|
|
aa8fa6f1c4 | ||
|
|
46357d8cf8 | ||
|
|
c5821b4e56 | ||
|
|
86875ff09f | ||
|
|
676a2be088 | ||
|
|
6d191405be | ||
|
|
ca4a9e4b08 | ||
|
|
cfc48cac26 | ||
|
|
1fc59208b6 | ||
|
|
69d13d5c97 | ||
|
|
b17646ea4f | ||
|
|
b27ee82a9d | ||
|
|
33043a3499 | ||
|
|
6f450ab68f | ||
|
|
d84a98520a | ||
|
|
fa2fda8054 | ||
|
|
e1eefebf9d | ||
|
|
f169080f59 | ||
|
|
ef28b55686 | ||
|
|
24b5899280 | ||
|
|
81f85b7e48 | ||
|
|
399819748f | ||
|
|
6078736f70 | ||
|
|
120b576a38 | ||
|
|
b566970d57 | ||
|
|
380301725d | ||
|
|
cd9148bcec | ||
|
|
4dba35f455 | ||
|
|
5bd2a87d0c | ||
|
|
91bff5412c | ||
|
|
8c9a3cd14b | ||
|
|
e1cd54a7b2 | ||
|
|
e169160159 | ||
|
|
9f1515b96c | ||
|
|
aac603b4ee | ||
|
|
5243da4d0a | ||
|
|
a39e6e4bb6 | ||
|
|
f23c20da99 | ||
|
|
ba53a8d162 | ||
|
|
e18ebb24fb | ||
|
|
38227cf938 | ||
|
|
07ab9840ca | ||
|
|
4f775159c8 | ||
|
|
36c20eae91 | ||
|
|
eb33ecd559 | ||
|
|
b843a473bc | ||
|
|
2d0e15f811 | ||
|
|
4640030373 | ||
|
|
0836b93fee | ||
|
|
20129f5ac0 | ||
|
|
67c3eb93fa | ||
|
|
0ed777e86b | ||
|
|
c546d427fd | ||
|
|
5b941f171f | ||
|
|
652d616471 | ||
|
|
6d04f262db | ||
|
|
ea414ca5b7 | ||
|
|
3d537e42bc | ||
|
|
3252303573 | ||
|
|
166f5a9021 | ||
|
|
080ee26fff | ||
|
|
5b2f4d5c0a | ||
|
|
591db419ad | ||
|
|
7b55f4e3f6 | ||
|
|
48364561bf | ||
|
|
a8b9ecc7a0 | ||
| 7f03396325 | |||
| e4552a879f | |||
|
|
caca1d1e84 | ||
|
|
3ccef84028 | ||
|
|
48eb809e84 | ||
|
|
2d42adaaf7 | ||
|
|
045bcfaa0d | ||
|
|
9c528c4908 | ||
|
|
b2109646f3 | ||
|
|
7598e2bf6f | ||
|
|
aa791870be | ||
|
|
1bac4a9d78 | ||
|
|
fd68ba87b7 | ||
|
|
731609f689 | ||
|
|
405bf79d56 | ||
|
|
d264281284 | ||
|
|
30a9e1fc58 | ||
|
|
d2f92dad86 | ||
|
|
598fd5fcde | ||
|
|
aaf6b16407 | ||
|
|
7ffde14257 | ||
|
|
e41b3f5ab3 | ||
|
|
e19ae130e9 | ||
|
|
44fce6e456 | ||
|
|
b09a07e3aa | ||
|
|
ae118ee9ed | ||
|
|
5ab91b01f7 | ||
|
|
a8914da258 | ||
|
|
58736f7f95 | ||
|
|
b651901b91 | ||
|
|
afd24b8070 | ||
|
|
3a61ea161a | ||
|
|
d4a5bf60db | ||
|
|
5834e8fafc | ||
|
|
c3e5baf04b | ||
|
|
836aacd425 | ||
|
|
b4ee009465 | ||
|
|
33558610a6 | ||
|
|
3bcdb408a1 | ||
|
|
6801047d0a | ||
|
|
11f82ee44e | ||
|
|
f8b5d7e2d8 | ||
|
|
ce317bf3f7 | ||
|
|
b3a3427a9a | ||
|
|
7bc6ea3408 | ||
|
|
8add03fadb | ||
|
|
f904052111 | ||
|
|
704e590891 | ||
|
|
08d3958b47 | ||
|
|
c49112a28d | ||
|
|
eb6999169e | ||
|
|
b489c69772 | ||
|
|
300beededb | ||
|
|
7786f0f49d | ||
|
|
8441148e36 | ||
|
|
6423c36f24 | ||
|
|
3776f89d9c | ||
|
|
e74c554643 | ||
|
|
f47f2f8640 | ||
|
|
54751f9753 | ||
|
|
a31a528e60 | ||
|
|
ef73b92929 | ||
|
|
055a373885 | ||
|
|
1491db7e1e | ||
|
|
a6c557097a | ||
|
|
8aaf23cdac | ||
|
|
6c2dfce9a7 | ||
|
|
20185dea68 | ||
|
|
a7fb958a2c | ||
|
|
e03be139ef |
16
.githooks/pre-commit/01_asciiref_gen
Executable file
16
.githooks/pre-commit/01_asciiref_gen
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
origdir="${PWD}"
|
||||
docsdir="${PWD}/ref/ascii/"
|
||||
|
||||
if ! command -v asciidoctor &> /dev/null;
|
||||
then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cd "${docsdir}"
|
||||
|
||||
asciidoctor -o ascii.html ascii.adoc
|
||||
|
||||
cd ${origdir}
|
||||
git add "${docsdir}/ascii.html"
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -22,4 +22,6 @@ __pycache__/
|
||||
*.run
|
||||
*.7z
|
||||
*.rar
|
||||
*.sqlite3
|
||||
*.deb
|
||||
.idea/
|
||||
|
||||
674
LICENSE
Normal file
674
LICENSE
Normal file
@@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
39
TODO
Normal file
39
TODO
Normal file
@@ -0,0 +1,39 @@
|
||||
- sshsecure is being re-written in golang
|
||||
|
||||
-vault, schema dumper (dump mounts, paths (otional w/switch or toggle), and meta information)
|
||||
--ability to recreate from xml dump
|
||||
|
||||
-git
|
||||
|
||||
-net/addr needs DNS/PTR/allocation stuff etc.
|
||||
|
||||
-net/mirroring
|
||||
|
||||
-storage, see if we can access lvm and cryptsetup functions via https://github.com/storaged-project/libblockdev/issues/41
|
||||
--http://storaged.org/doc/udisks2-api/latest/gdbus-org.freedesktop.UDisks2.MDRaid.html
|
||||
--http://storaged.org/doc/udisks2-api/latest/gdbus-org.freedesktop.UDisks2.Encrypted.html
|
||||
--http://mindbending.org/en/python-and-udisks-part-2
|
||||
--http://storaged.org/doc/udisks2-api/2.6.5/gdbus-org.freedesktop.UDisks2.Block.html
|
||||
--https://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html
|
||||
|
||||
|
||||
sshkeys:
|
||||
-need to verify keys via GPG signature. we also need to have a more robust way of updating pubkeys - categorization, role
|
||||
-write API to get pubkeys, hostkeys? really wish DBs supported nesting
|
||||
-separate by algo, but this is easy to do (split on space, [0])
|
||||
|
||||
snippet: create mtree with libarchive, bsdtar -cf /tmp/win.mtree --one-file-system --format=mtree --options='mtree:sha512,mtree:indent' /path/*
|
||||
probably need to package https://packages.debian.org/source/stretch/freebsd-buildutils to get fmtree for reading
|
||||
|
||||
-net, add ipxe - write flask app that determines path based on MAC addr
|
||||
|
||||
-net, add shorewall templater
|
||||
|
||||
-port in sslchk
|
||||
|
||||
-script that uses uconv(?) and pymysql to export database to .ods
|
||||
|
||||
-IRC
|
||||
-- i should use the python IRC module on pypi to join an irc network (freenode, probably, for my personal interests) and
|
||||
run an iteration over all nicks in a channel with /ctcp <nick> version. handy when i'm trying to find someone running
|
||||
a certain platform/client i have some questions about.
|
||||
62
aif/cfgs/base.xml
Normal file
62
aif/cfgs/base.xml
Normal file
@@ -0,0 +1,62 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<aif xmlns:aif="https://aif.square-r00t.net"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="https://aif.square-r00t.net aif.xsd">
|
||||
<storage>
|
||||
<disk device="/dev/sda" diskfmt="gpt">
|
||||
<part num="1" start="0%" size="10%" fstype="ef00" />
|
||||
<part num="2" start="10%" size="100%" fstype="8300" />
|
||||
</disk>
|
||||
<mount source="/dev/sda2" target="/mnt/aif" order="1" />
|
||||
<mount source="/dev/sda1" target="/mnt/aif/boot" order="2" />
|
||||
</storage>
|
||||
<network hostname="aiftest.square-r00t.net">
|
||||
<iface device="auto" address="auto" netproto="ipv4" />
|
||||
</network>
|
||||
<system timezone="EST5EDT" locale="en_US.UTF-8" chrootpath="/mnt/aif" reboot="1">
|
||||
<users rootpass="!" />
|
||||
<service name="sshd" status="1" />
|
||||
<service name="cronie" status="1" />
|
||||
<service name="haveged" status="1" />
|
||||
</system>
|
||||
<pacman command="apacman -S">
|
||||
<repos>
|
||||
<repo name="core" enabled="true" siglevel="default" mirror="file:///etc/pacman.d/mirrorlist" />
|
||||
<repo name="extra" enabled="true" siglevel="default" mirror="file:///etc/pacman.d/mirrorlist" />
|
||||
<repo name="community" enabled="true" siglevel="default" mirror="file:///etc/pacman.d/mirrorlist" />
|
||||
<repo name="multilib" enabled="true" siglevel="default" mirror="file:///etc/pacman.d/mirrorlist" />
|
||||
<repo name="testing" enabled="false" siglevel="default" mirror="file:///etc/pacman.d/mirrorlist" />
|
||||
<repo name="multilib-testing" enabled="false" siglevel="default" mirror="file:///etc/pacman.d/mirrorlist" />
|
||||
<repo name="archlinuxfr" enabled="false" siglevel="Optional TrustedOnly" mirror="http://repo.archlinux.fr/$arch" />
|
||||
</repos>
|
||||
<mirrorlist>
|
||||
<mirror>http://mirror.us.leaseweb.net/archlinux/$repo/os/$arch</mirror>
|
||||
<mirror>http://mirrors.advancedhosters.com/archlinux/$repo/os/$arch</mirror>
|
||||
<mirror>http://ftp.osuosl.org/pub/archlinux/$repo/os/$arch</mirror>
|
||||
<mirror>http://arch.mirrors.ionfish.org/$repo/os/$arch</mirror>
|
||||
<mirror>http://mirrors.gigenet.com/archlinux/$repo/os/$arch</mirror>
|
||||
<mirror>http://mirror.jmu.edu/pub/archlinux/$repo/os/$arch</mirror>
|
||||
</mirrorlist>
|
||||
<software>
|
||||
<package name="sed" repo="core" />
|
||||
<package name="python" />
|
||||
<package name="openssh" />
|
||||
<package name="vim" />
|
||||
<package name="vim-plugins" />
|
||||
<package name="haveged" />
|
||||
<package name="byobu" />
|
||||
<package name="etc-update" />
|
||||
<package name="cronie" />
|
||||
<package name="mlocate" />
|
||||
<package name="mtree-git" />
|
||||
</software>
|
||||
</pacman>
|
||||
<bootloader type="grub" target="/boot" efi="true" />
|
||||
<scripts>
|
||||
<script uri="https://aif.square-r00t.net/cfgs/scripts/pkg/python.sh" order="1" execution="pkg" />
|
||||
<script uri="https://aif.square-r00t.net/cfgs/scripts/pkg/apacman.py" order="2" execution="pkg" />
|
||||
<script uri="https://aif.square-r00t.net/cfgs/scripts/post/sshsecure.py" order="1" execution="post" />
|
||||
<script uri="https://aif.square-r00t.net/cfgs/scripts/post/sshkeys.py" order="2" execution="post" />
|
||||
<script uri="https://aif.square-r00t.net/cfgs/scripts/post/configs.py" order="3" execution="post" />
|
||||
</scripts>
|
||||
</aif>
|
||||
98
aif/scripts/pkg/apacman.py
Normal file
98
aif/scripts/pkg/apacman.py
Normal file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from urllib.request import urlopen
|
||||
|
||||
pkg_base = 'apacman'
|
||||
pkgs = ('', '-deps', '-utils')
|
||||
url_base = 'https://aif.square-r00t.net/cfgs/files'
|
||||
local_dir = '/tmp'
|
||||
|
||||
conf_options = {}
|
||||
conf_options['apacman'] = {'enabled': ['needed', 'noconfirm', 'noedit', 'progress', 'purgebuild', 'skipcache', 'keepkeys'],
|
||||
'disabled': [],
|
||||
'values': {'tmpdir': '"/var/tmp/apacmantmp-$UID"'}}
|
||||
conf_options['pacman'] = {'enabled': [],
|
||||
'disabled': [],
|
||||
'values': {'UseSyslog': None, 'Color': None, 'TotalDownload': None, 'CheckSpace': None, 'VerbosePkgLists': None}}
|
||||
|
||||
def downloadPkg(pkgfile, dlfile):
|
||||
url = os.path.join(url_base, pkgfile)
|
||||
# Prep the destination
|
||||
os.makedirs(os.path.dirname(dlfile), exist_ok = True)
|
||||
# Download the pacman package
|
||||
with urlopen(url) as u:
|
||||
with open(dlfile, 'wb') as f:
|
||||
f.write(u.read())
|
||||
return()
|
||||
|
||||
def installPkg(pkgfile):
|
||||
# Install it
|
||||
subprocess.run(['pacman', '-Syyu']) # Installing from an inconsistent state is bad, mmkay?
|
||||
subprocess.run(['pacman', '--noconfirm', '--needed', '-S', 'base-devel'])
|
||||
subprocess.run(['pacman', '--noconfirm', '--needed', '-S', 'multilib-devel'])
|
||||
subprocess.run(['pacman', '--noconfirm', '--needed', '-U', pkgfile])
|
||||
return()
|
||||
|
||||
def configurePkg(opts, pkgr):
|
||||
cf = '/etc/{0}.conf'.format(pkgr)
|
||||
# Configure it
|
||||
shutil.copy2(cf, '{0}.bak.{1}'.format(cf, int(datetime.datetime.utcnow().timestamp())))
|
||||
with open(cf, 'r') as f:
|
||||
conf = f.readlines()
|
||||
for idx, line in enumerate(conf):
|
||||
l = line.split('=')
|
||||
opt = l[0].strip('\n').strip()
|
||||
if len(l) > 1:
|
||||
val = l[1].strip('\n').strip()
|
||||
# enabled options
|
||||
for o in opts['enabled']:
|
||||
if re.sub('^#?', '', opt).strip() == o:
|
||||
if pkgr == 'apacman':
|
||||
conf[idx] = '{0}=1\n'.format(o)
|
||||
elif pkgr == 'pacman':
|
||||
conf[idx] = '{0}\n'.format(o)
|
||||
# disabled options
|
||||
for o in opts['disabled']:
|
||||
if re.sub('^#?', '', opt).strip() == o:
|
||||
if pkgr == 'apacman':
|
||||
conf[idx] = '{0}=0\n'.format(o)
|
||||
elif pkgr == 'pacman':
|
||||
conf[idx] = '#{0}\n'.format(o)
|
||||
# values
|
||||
for o in opts['values']:
|
||||
if opts['values'][o] is not None:
|
||||
if re.sub('^#?', '', opt).strip() == o:
|
||||
if pkgr == 'apacman':
|
||||
conf[idx] = '{0}={1}\n'.format(o, opts['values'][o])
|
||||
elif pkgr == 'pacman':
|
||||
conf[idx] = '{0} = {1}\n'.format(o, opts['values'][o])
|
||||
else:
|
||||
if re.sub('^#?', '', opt).strip() == o:
|
||||
conf[idx] = '{0}\n'.format(o)
|
||||
with open(cf, 'w') as f:
|
||||
f.write(''.join(conf))
|
||||
|
||||
def finishPkg():
|
||||
# Finish installing (optional deps)
|
||||
for p in ('git', 'customizepkg-scripting', 'pkgfile', 'rsync'):
|
||||
subprocess.run(['apacman', '--noconfirm', '--needed', '-S', p])
|
||||
|
||||
def main():
|
||||
for p in pkgs:
|
||||
pkg = pkg_base + p
|
||||
fname = '{0}.tar.xz'.format(pkg)
|
||||
local_pkg = os.path.join(local_dir, fname)
|
||||
downloadPkg(fname, local_pkg)
|
||||
installPkg(local_pkg)
|
||||
for tool in ('pacman', 'apacman'):
|
||||
configurePkg(conf_options[tool], tool)
|
||||
finishPkg()
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
3
aif/scripts/pkg/python.sh
Normal file
3
aif/scripts/pkg/python.sh
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
pacman --needed --noconfirm -S python python-pip python-setuptools
|
||||
136
aif/scripts/post/configs.py
Normal file
136
aif/scripts/post/configs.py
Normal file
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import pwd
|
||||
import subprocess
|
||||
|
||||
def byobu(user = 'root'):
|
||||
homedir = os.path.expanduser('~{0}'.format(user))
|
||||
subprocess.run(['byobu-enable'])
|
||||
b = '{0}/.byobu'.format(homedir)
|
||||
# The keybindings, and general enabling
|
||||
confs = {'backend': 'BYOBU_BACKEND=tmux\n',
|
||||
'color': 'BACKGROUND=k\nFOREGROUND=w\nMONOCHROME=0', # NOT a typo; the original source I got this from had no end newline.
|
||||
'color.tmux': 'BYOBU_DARK="\#333333"\nBYOBU_LIGHT="\#EEEEEE"\nBYOBU_ACCENT="\#75507B"\nBYOBU_HIGHLIGHT="\#DD4814"\n',
|
||||
'datetime.tmux': 'BYOBU_DATE="%Y-%m-%d "\nBYOBU_TIME="%H:%M:%S"\n',
|
||||
'keybindings': 'source $BYOBU_PREFIX/share/byobu/keybindings/common\n',
|
||||
'keybindings.tmux': 'unbind-key -n C-a\nset -g prefix ^A\nset -g prefix2 ^A\nbind a send-prefix\n',
|
||||
'profile': 'source $BYOBU_PREFIX/share/byobu/profiles/common\n',
|
||||
'profile.tmux': 'source $BYOBU_PREFIX/share/byobu/profiles/tmux\n',
|
||||
'prompt': '[ -r /usr/share/byobu/profiles/bashrc ] && . /usr/share/byobu/profiles/bashrc #byobu-prompt#\n',
|
||||
'.screenrc': None,
|
||||
'.tmux.conf': None,
|
||||
'.welcome-displayed': None,
|
||||
'windows': None,
|
||||
'windows.tmux': None}
|
||||
for c in confs.keys():
|
||||
with open('{0}/{1}'.format(b, c), 'w') as f:
|
||||
if confs[c] is not None:
|
||||
f.write(confs[c])
|
||||
else:
|
||||
f.write('')
|
||||
# The status file- add some extras, and remove the session string which is broken apparently.
|
||||
# Holy shit I wish there was a way of storing compressed text in plaintext besides base64.
|
||||
statusconf = ["# status - Byobu's default status enabled/disabled settings\n", '#\n', '# Override these in $BYOBU_CONFIG_DIR/status\n',
|
||||
'# where BYOBU_CONFIG_DIR is XDG_CONFIG_HOME if defined,\n', '# and $HOME/.byobu otherwise.\n', '#\n',
|
||||
'# Copyright (C) 2009-2011 Canonical Ltd.\n', '#\n', '# Authors: Dustin Kirkland <kirkland@byobu.org>\n', '#\n',
|
||||
'# This program is free software: you can redistribute it and/or modify\n', '# it under the terms of the GNU ' +
|
||||
'General Public License as published by\n', '# the Free Software Foundation, version 3 of the License.\n', '#\n',
|
||||
'# This program is distributed in the hope that it will be useful,\n', '# but WITHOUT ANY WARRANTY; without even the ' +
|
||||
'implied warranty of\n', '# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n', '# GNU General Public License ' +
|
||||
'for more details.\n', '#\n', '# You should have received a copy of the GNU General Public License\n', '# along with this ' +
|
||||
'program. If not, see <http://www.gnu.org/licenses/>.\n', '\n', "# Status beginning with '#' are disabled.\n", '\n', '# Screen has ' +
|
||||
'two status lines, with 4 quadrants for status\n', 'screen_upper_left="color"\n', 'screen_upper_right="color whoami hostname ' +
|
||||
'ip_address menu"\n', 'screen_lower_left="color logo distro release #arch session"\n', 'screen_lower_right="color network #disk_io ' +
|
||||
'custom #entropy raid reboot_required updates_available #apport #services #mail users uptime #ec2_cost #rcs_cost #fan_speed #cpu_temp ' +
|
||||
'battery wifi_quality #processes load_average cpu_count cpu_freq memory #swap disk #time_utc date time"\n', '\n', '# Tmux has one ' +
|
||||
'status line, with 2 halves for status\n', 'tmux_left=" logo #distro release arch #session"\n', '# You can have as many tmux right ' +
|
||||
'lines below here, and cycle through them using Shift-F5\n', 'tmux_right=" network disk_io #custom #entropy raid reboot_required ' +
|
||||
'#updates_available #apport services #mail #users uptime #ec2_cost #rcs_cost #fan_speed #cpu_temp #battery #wifi_quality processes ' +
|
||||
'load_average cpu_count cpu_freq memory #swap disk whoami hostname ip_address time_utc date time"\n', '#tmux_right="network ' +
|
||||
'#disk_io #custom entropy raid reboot_required updates_available #apport #services #mail users uptime #ec2_cost #rcs_cost fan_speed ' +
|
||||
'cpu_temp battery wifi_quality #processes load_average cpu_count cpu_freq memory #swap #disk whoami hostname ip_address #time_utc ' +
|
||||
'date time"\n', '#tmux_right="network #disk_io custom #entropy raid reboot_required updates_available #apport #services #mail users ' +
|
||||
'uptime #ec2_cost #rcs_cost #fan_speed #cpu_temp battery wifi_quality #processes load_average cpu_count cpu_freq memory #swap #disk ' +
|
||||
'#whoami #hostname ip_address #time_utc date time"\n', '#tmux_right="#network disk_io #custom entropy #raid #reboot_required ' +
|
||||
'#updates_available #apport #services #mail #users #uptime #ec2_cost #rcs_cost fan_speed cpu_temp #battery #wifi_quality #processes ' +
|
||||
'#load_average #cpu_count #cpu_freq #memory #swap whoami hostname ip_address #time_utc disk date time"\n']
|
||||
with open('{0}/status'.format(b), 'w') as f:
|
||||
f.write(''.join(statusconf))
|
||||
# The statusrc file is another lengthy one.
|
||||
statusrc = ["# statusrc - Byobu's default status configurations\n", '#\n', '# Override these in $BYOBU_CONFIG_DIR/statusrc\n',
|
||||
'# where BYOBU_CONFIG_DIR is XDG_CONFIG_HOME if defined,\n', '# and $HOME/.byobu otherwise.\n', '#\n', '# Copyright (C) ' +
|
||||
'2009-2011 Canonical Ltd.\n', '#\n', '# Authors: Dustin Kirkland <kirkland@byobu.org>\n', '#\n', '# This program is free software: ' +
|
||||
'you can redistribute it and/or modify\n', '# it under the terms of the GNU General Public License as published by\n',
|
||||
'# the Free Software Foundation, version 3 of the License.\n', '#\n', '# This program is distributed in the hope that it will be ' +
|
||||
'useful,\n', '# but WITHOUT ANY WARRANTY; without even the implied warranty of\n', '# MERCHANTABILITY or FITNESS FOR A PARTICULAR ' +
|
||||
'PURPOSE. See the\n', '# GNU General Public License for more details.\n', '#\n', '# You should have received a copy of the GNU ' +
|
||||
'General Public License\n', '# along with this program. If not, see <http://www.gnu.org/licenses/>.\n', '\n', '# Configurations that ' +
|
||||
'you can override; if you leave these commented out,\n', '# Byobu will try to auto-detect them.\n', '\n', '# This should be auto-detected ' +
|
||||
'for most distro, but setting it here will save\n', '# some call to lsb_release and the like.\n', '#BYOBU_DISTRO=Ubuntu\n', '\n',
|
||||
'# Default: depends on the distro (which is either auto-detected, either set\n', '# via $DISTRO)\n', '#LOGO="\\o/"\n', '\n', '# Abbreviate ' +
|
||||
'the release to N characters\n', '# By default, this is disabled. But if you set RELEASE_ABBREVIATED=1\n', '# and your lsb_release is ' +
|
||||
'"precise", only "p" will be displayed\n', '#RELEASE_ABBREVIATED=1\n', '\n', '# Default: /\n', '#MONITORED_DISK=/\n', '\n', '# Minimum ' +
|
||||
'disk throughput that triggers the notification (in kB/s)\n', '# Default: 50\n', '#DISK_IO_THRESHOLD=50\n', '\n', '# Default: eth0\n',
|
||||
'#MONITORED_NETWORK=eth0\n', '\n', '# Unit used for network throughput (either bits per second or bytes per second)\n', '# Default: ' +
|
||||
'bits\n', '#NETWORK_UNITS=bytes\n', '\n', '# Minimum network throughput that triggers the notification (in kbit/s)\n', '# Default: 20\n',
|
||||
'#NETWORK_THRESHOLD=20\n', '\n', '# You can add an additional source of temperature here\n', '#MONITORED_TEMP=/proc/acpi/thermal_zone/' +
|
||||
'THM0/temperature\n', '\n', '# Default: C\n', '#TEMP=F\n', '\n', '#SERVICES="eucalyptus-nc|NC eucalyptus-cloud|CLC eucalyptus-walrus ' +
|
||||
'eucalyptus-cc|CC eucalyptus-sc|SC"\n', '\n', '#FAN=$(find /sys -type f -name fan1_input | head -n1)\n', '\n', '# You can set this to 1 ' +
|
||||
'to report your external/public ip address\n', '# Default: 0\n', '#IP_EXTERNAL=0\n', '\n', '# The users notification normally counts ssh ' +
|
||||
"sessions; set this configuration to '1'\n", '# to instead count number of distinct users logged onto the system\n', '# Default: 0\n',
|
||||
'#USERS_DISTINCT=0\n', '\n', '# Set this to zero to hide seconds int the time display\n', '# Default 1\n', '#TIME_SECONDS=0\n']
|
||||
with open('{0}/statusrc'.format(b), 'w') as f:
|
||||
f.write(''.join(statusrc))
|
||||
setPerms(user, b)
|
||||
return()
|
||||
|
||||
def vim():
|
||||
vimc = ['\n', 'set nocompatible\n', 'set number\n', 'syntax on\n', 'set paste\n', 'set ruler\n', 'if has("autocmd")\n',' au BufReadPost * if ' +
|
||||
'line("\'\\"") > 1 && line("\'\\"") <= line("$") | exe "normal! g\'\\"" | endif\n', 'endif\n', '\n', '" bind F3 to insert a timestamp.\n', '" In ' +
|
||||
'normal mode, insert.\n', 'nmap <F3> i<C-R>=strftime("%c")<CR><Esc>\n', '\n', 'set pastetoggle=<F2>\n', '\n', '" https://stackoverflow.com/' +
|
||||
'questions/27771616/turn-off-all-automatic-code-complete-in-jedi-vim\n', 'let g:jedi#completions_enabled = 0\n', 'let g:jedi#show_call_' +
|
||||
'signatures = "0"\n']
|
||||
with open('/etc/vimrc', 'a') as f:
|
||||
f.write(''.join(vimc))
|
||||
setPerms('root', '/etc/vimrc')
|
||||
return()
|
||||
|
||||
def bash():
|
||||
bashc = ['\n', 'alias vi=/usr/bin/vim\n', 'export EDITOR=vim\n', '\n', 'if [ -f ~/.bashrc ];\n', 'then\n', ' source ~/.bashrc\n', 'fi \n',
|
||||
'if [ -d ~/bin ];\n', 'then\n', ' export PATH="$PATH:~/bin"\n', 'fi\n', '\n', 'alias grep="grep --color"\n',
|
||||
'alias egrep="egrep --color"\n', '\n', 'alias ls="ls --color=auto"\n', 'alias vi="/usr/bin/vim"\n', '\n', 'export HISTTIMEFORMAT="%F %T "\n',
|
||||
'export PATH="${PATH}:/sbin:/bin:/usr/sbin"\n']
|
||||
with open('/etc/bash.bashrc', 'a') as f:
|
||||
f.write(''.join(bashc))
|
||||
setPerms('root', '/etc/bash.bashrc')
|
||||
return()
|
||||
|
||||
def mlocate():
|
||||
subprocess.run(['updatedb'])
|
||||
return()
|
||||
|
||||
def setPerms(user, path):
|
||||
uid = pwd.getpwnam(user).pw_uid
|
||||
gid = pwd.getpwnam(user).pw_gid
|
||||
pl = pathlib.PurePath(path).parts
|
||||
for basedir, dirs, files in os.walk(path):
|
||||
os.chown(basedir, uid, gid)
|
||||
if os.path.isdir(basedir):
|
||||
os.chmod(basedir, 0o755)
|
||||
elif os.path.isfile(basedir):
|
||||
os.chmod(basedir, 0o644)
|
||||
for f in files:
|
||||
os.chown(os.path.join(basedir, f), uid, gid)
|
||||
os.chmod(os.path.join(basedir, f), 0o644)
|
||||
return()
|
||||
|
||||
def main():
|
||||
byobu()
|
||||
vim()
|
||||
bash()
|
||||
mlocate()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
206
aif/scripts/post/hostscan.py
Executable file
206
aif/scripts/post/hostscan.py
Executable file
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Note: for hashed known-hosts, https://gist.github.com/maxtaco/5080023
|
||||
|
||||
import argparse
|
||||
import grp
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
# Defaults
|
||||
#def_supported_keys = subprocess.run(['ssh',
|
||||
# '-Q',
|
||||
# 'key'], stdout = subprocess.PIPE).stdout.decode('utf-8').splitlines()
|
||||
def_supported_keys = ['dsa', 'ecdsa', 'ed25519', 'rsa']
|
||||
def_mode = 'append'
|
||||
def_syshostkeys = '/etc/ssh/ssh_known_hosts'
|
||||
def_user = pwd.getpwuid(os.geteuid())[0]
|
||||
def_grp = grp.getgrgid(os.getegid())[0]
|
||||
|
||||
|
||||
class hostscanner(object):
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
if self.args['keytypes'] == ['all']:
|
||||
self.args['keytypes'] = def_supported_keys
|
||||
if self.args['system']:
|
||||
if os.geteuid() != 0:
|
||||
exit(('You have specified system-wide modification but ' +
|
||||
'are not running with root privileges! Exiting.'))
|
||||
self.args['output'] = def_syshostkeys
|
||||
if self.args['output'] != sys.stdout:
|
||||
_pardir = os.path.dirname(os.path.abspath(os.path.expanduser(self.args['output'])))
|
||||
if _pardir.startswith('/home'):
|
||||
_octmode = 0o700
|
||||
else:
|
||||
_octmode = 0o755
|
||||
os.makedirs(_pardir, mode = _octmode, exist_ok = True)
|
||||
os.chown(_pardir,
|
||||
pwd.getpwnam(self.args['chown_user'])[2],
|
||||
grp.getgrnam(self.args['chown_grp'])[2])
|
||||
|
||||
def getHosts(self):
|
||||
self.keys = {}
|
||||
_hosts = os.path.abspath(os.path.expanduser(self.args['infile']))
|
||||
with open(_hosts, 'r') as f:
|
||||
for l in f.readlines():
|
||||
l = l.strip()
|
||||
if re.search('^\s*(#.*)?$', l, re.MULTILINE):
|
||||
continue # Skip commented and blank lines
|
||||
k = re.sub('^([0-9a-z-\.]+)\s*#.*$',
|
||||
'\g<1>',
|
||||
l.strip().lower(),
|
||||
re.MULTILINE)
|
||||
self.keys[k] = []
|
||||
return()
|
||||
|
||||
def getKeys(self):
|
||||
def parseType(k):
|
||||
_newkey = re.sub('^ssh-', '', k).split('-')[0]
|
||||
if _newkey == 'dss':
|
||||
_newkey = 'dsa'
|
||||
return(_newkey)
|
||||
for h in list(self.keys.keys()):
|
||||
_h = h.split(':')
|
||||
if len(_h) == 1:
|
||||
_host = _h[0]
|
||||
_port = 22
|
||||
elif len(_h) == 2:
|
||||
_host = _h[0]
|
||||
_port = int(_h[1])
|
||||
_cmdline = ['ssh-keyscan',
|
||||
'-t', ','.join(self.args['keytypes']),
|
||||
'-p', str(_port),
|
||||
_host]
|
||||
if self.args['hash']:
|
||||
#https://security.stackexchange.com/a/56283
|
||||
# verify via:
|
||||
# SAMPLE ENTRY: |1|F1E1KeoE/eEWhi10WpGv4OdiO6Y=|3988QV0VE8wmZL7suNrYQLITLCg= ssh-rsa ...
|
||||
#key=$(echo F1E1KeoE/eEWhi10WpGv4OdiO6Y= | base64 -d | xxd -p)
|
||||
#echo -n "192.168.1.61" | openssl sha1 -mac HMAC -macopt hexkey:${key} | awk '{print $2}' | xxd -r -p | base64
|
||||
_cmdline.insert(1, '-H')
|
||||
_cmd = subprocess.run(_cmdline,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
if not re.match('\s*#.*', _cmd.stderr.decode('utf-8')):
|
||||
_printerr = []
|
||||
for i in _cmd.stderr.decode('utf-8').splitlines():
|
||||
if i.strip() not in _printerr:
|
||||
_printerr.append(i.strip())
|
||||
print('{0}: errors detected; skipping ({1})'.format(h, '\n'.join(_printerr)))
|
||||
del(self.keys[h])
|
||||
continue
|
||||
for l in _cmd.stdout.decode('utf-8').splitlines():
|
||||
_l = l.split()
|
||||
_key = {'type': _l[1],
|
||||
'host': _l[0],
|
||||
'key': _l[2]}
|
||||
if parseType(_key['type']) in self.args['keytypes']:
|
||||
self.keys[h].append(_key)
|
||||
return()
|
||||
|
||||
def write(self):
|
||||
if self.args['writemode'] == 'replace':
|
||||
if os.path.isfile(self.args['output']) and self.args['output'] != sys.stdout:
|
||||
os.move(self.args['output'], os.path.join(self.args['output'], '.bak'))
|
||||
for h in self.keys.keys():
|
||||
for i in self.keys[h]:
|
||||
_s = '# Automatically added via hostscan.py\n{0} {1} {2}\n'.format(i['host'],
|
||||
i['type'],
|
||||
i['key'])
|
||||
if self.args['output'] == sys.stdout:
|
||||
print(_s, end = '')
|
||||
else:
|
||||
with open(self.args['output'], 'a') as f:
|
||||
f.write(_s)
|
||||
os.chmod(self.args['output'], 0o644)
|
||||
os.chown(self.args['output'],
|
||||
pwd.getpwnam(self.args['chown_user'])[2],
|
||||
grp.getgrnam(self.args['chown_grp'])[2])
|
||||
return()
|
||||
|
||||
def parseArgs():
|
||||
def getTypes(t):
|
||||
keytypes = t.split(',')
|
||||
keytypes = [k.strip() for k in keytypes]
|
||||
for k in keytypes:
|
||||
if k not in ('all', *def_supported_keys):
|
||||
raise argparse.ArgumentError('Must be one or more of the following: all, {0}'.format(', '.join(def_supported_keys)))
|
||||
return(keytypes)
|
||||
args = argparse.ArgumentParser(description = ('Scan a list of hosts and present their hostkeys in ' +
|
||||
'a format suitable for an SSH known_hosts file.'))
|
||||
args.add_argument('-u',
|
||||
'--user',
|
||||
dest = 'chown_user',
|
||||
default = def_user,
|
||||
help = ('The username to chown the file to (if \033[1m{0}\033[0m is specified). ' +
|
||||
'Default: \033[1m{1}\033[0m').format('-o/--output', def_user))
|
||||
args.add_argument('-g',
|
||||
'--group',
|
||||
dest = 'chown_grp',
|
||||
default = def_grp,
|
||||
help = ('The group to chown the file to (if \033[1m{0}\033[0m is specified). ' +
|
||||
'Default: \033[1m{1}\033[0m').format('-o/--output', def_grp))
|
||||
args.add_argument('-H',
|
||||
'--hash',
|
||||
dest = 'hash',
|
||||
action = 'store_true',
|
||||
help = ('If specified, hash the hostkeys (see ssh-keyscan(1)\'s -H option for more info)'))
|
||||
args.add_argument('-m',
|
||||
'--mode',
|
||||
dest = 'writemode',
|
||||
default = def_mode,
|
||||
choices = ['append', 'replace'],
|
||||
help = ('If \033[1m{0}\033[0m is specified, the mode to use for the ' +
|
||||
'destination file. The default is \033[1m{1}\033[0m').format('-o/--output', def_mode))
|
||||
args.add_argument('-k',
|
||||
'--keytypes',
|
||||
dest = 'keytypes',
|
||||
type = getTypes,
|
||||
default = 'all',
|
||||
help = ('A comma-separated list of key types to add (if supported by the target host). ' +
|
||||
'The default is to add all keys found. Must be one (or more) of: \033[1m{0}\033[0m').format(', '.join(def_supported_keys)))
|
||||
args.add_argument('-o',
|
||||
'--output',
|
||||
default = sys.stdout,
|
||||
metavar = 'OUTFILE',
|
||||
dest = 'output',
|
||||
help = ('If specified, write the hostkeys to \033[1m{0}\033[0m instead of ' +
|
||||
'\033[1m{1}\033[0m (the default). ' +
|
||||
'Overrides \033[1m{2}\033[0m').format('OUTFILE',
|
||||
'stdout',
|
||||
'-S/--system-wide'))
|
||||
args.add_argument('-S',
|
||||
'--system-wide',
|
||||
dest = 'system',
|
||||
action = 'store_true',
|
||||
help = ('If specified, apply to the entire system (not just the ' +
|
||||
'specified/running user) via {0}. ' +
|
||||
'Requires \033[1m{1}\033[0m in /etc/ssh/ssh_config (usually ' +
|
||||
'enabled silently by default) and running with root ' +
|
||||
'privileges').format(def_syshostkeys,
|
||||
'GlobalKnownHostsFile {0}'.format(def_syshostkeys)))
|
||||
args.add_argument(metavar = 'HOSTLIST_FILE',
|
||||
dest = 'infile',
|
||||
help = ('The path to the list of hosts. Can contain blank lines and/or comments. ' +
|
||||
'One host per line. Can be \033[1m{0}\033[0m (as long as it\'s resolvable), ' +
|
||||
'\033[1m{1}\033[0m, or \033[1m{2}\033[0m. To specify an alternate port, ' +
|
||||
'add \033[1m{3}\033[0m to the end (e.g. ' +
|
||||
'"some.host.tld:22")').format('hostname',
|
||||
'IP address',
|
||||
'FQDN',
|
||||
':<PORTNUM>'))
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
args = vars(parseArgs().parse_args())
|
||||
scan = hostscanner(args)
|
||||
scan.getHosts()
|
||||
scan.getKeys()
|
||||
scan.write()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
31
aif/scripts/post/sshkeys.py
Normal file
31
aif/scripts/post/sshkeys.py
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import pwd
|
||||
from urllib.request import urlopen
|
||||
|
||||
keysfile = 'https://square-r00t.net/ssh/all'
|
||||
|
||||
def copyKeys(keystring, user = 'root'):
|
||||
uid = pwd.getpwnam(user).pw_uid
|
||||
gid = pwd.getpwnam(user).pw_gid
|
||||
homedir = os.path.expanduser('~{0}'.format(user))
|
||||
sshdir = '{0}/.ssh'.format(homedir)
|
||||
authfile = '{0}/authorized_keys'.format(sshdir)
|
||||
os.makedirs(sshdir, mode = 0o700, exist_ok = True)
|
||||
with open(authfile, 'a') as f:
|
||||
f.write(keystring)
|
||||
for basedir, dirs, files in os.walk(sshdir):
|
||||
os.chown(basedir, uid, gid)
|
||||
os.chmod(basedir, 0o700)
|
||||
for f in files:
|
||||
os.chown(os.path.join(basedir, f), uid, gid)
|
||||
os.chmod(os.path.join(basedir, f), 0o600)
|
||||
return()
|
||||
|
||||
def main():
|
||||
with urlopen(keysfile) as keys:
|
||||
copyKeys(keys.read().decode('utf-8'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
428
aif/scripts/post/sshsecure.py
Normal file
428
aif/scripts/post/sshsecure.py
Normal file
@@ -0,0 +1,428 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Pythonized automated way of running https://sysadministrivia.com/news/hardening-ssh-security
|
||||
# TODO: check for cryptography module. if it exists, we can do this entirely pythonically
|
||||
# without ever needing to use subprocess/ssh-keygen, i think!
|
||||
|
||||
# Thanks to https://stackoverflow.com/a/39126754.
|
||||
|
||||
# Also, I need to re-write this. It's getting uglier.
|
||||
|
||||
# stdlib
|
||||
import datetime
|
||||
import glob
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import signal
|
||||
import shutil
|
||||
import subprocess # REMOVE WHEN SWITCHING TO PURE PYTHON
|
||||
#### PREP FOR PURE PYTHON IMPLEMENTATION ####
|
||||
# # non-stdlib - testing and automatic install if necessary.
|
||||
# # TODO #
|
||||
# - cryptography module won't generate new-format "openssh-key-v1" keys.
|
||||
# - See https://github.com/pts/py_ssh_keygen_ed25519 for possible conversion to python 3
|
||||
# - https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
|
||||
# - https://github.com/pyca/cryptography/issues/3509 and https://github.com/paramiko/paramiko/issues/1136
|
||||
# has_crypto = False
|
||||
# pure_py = False
|
||||
# has_pip = False
|
||||
# pipver = None
|
||||
# try:
|
||||
# import cryptography
|
||||
# has_crypto = True
|
||||
# except ImportError:
|
||||
# # We'll try to install it. We set up the logic below.
|
||||
# try:
|
||||
# import pip
|
||||
# has_pip = True
|
||||
# # We'll use these to create a temporary lib path and remove it when done.
|
||||
# import sys
|
||||
# import tempfile
|
||||
# except ImportError:
|
||||
# # ABSOLUTE LAST fallback, if we got to THIS case, is to use subprocess.
|
||||
# has_pip = False
|
||||
# import subprocess
|
||||
#
|
||||
# # Try installing it then!
|
||||
# if not all((has_crypto, )):
|
||||
# # venv only included after python 3.3.x. We fallback to subprocess if we can't do dis.
|
||||
# if sys.hexversion >= 0x30300f0:
|
||||
# has_ensurepip = False
|
||||
# import venv
|
||||
# if not has_pip and sys.hexversion >= 0x30400f0:
|
||||
# import ensurepip
|
||||
# has_ensurepip = True
|
||||
# temppath = tempfile.mkdtemp('_VENV')
|
||||
# v = venv.create(temppath)
|
||||
# if has_ensurepip and not has_pip:
|
||||
# # This SHOULD be unnecessary, but we want to try really hard.
|
||||
# ensurepip.bootstrap(root = temppath)
|
||||
# import pip
|
||||
# has_pip = True
|
||||
# if has_pip:
|
||||
# pipver = pip.__version__.split('.')
|
||||
# # A thousand people are yelling at me for this.
|
||||
# if int(pipver[0]) >= 10:
|
||||
# from pip._internal import main as pipinstall
|
||||
# else:
|
||||
# pipinstall = pip.main
|
||||
# if int(pipver[0]) >= 8:
|
||||
# pipcmd = ['install',
|
||||
# '--prefix={0}'.format(temppath),
|
||||
# '--ignore-installed']
|
||||
# else:
|
||||
# pipcmd = ['install',
|
||||
# '--install-option="--prefix={0}"'.format(temppath),
|
||||
# '--ignore-installed']
|
||||
# # Get the lib path.
|
||||
# libpath = os.path.join(temppath, 'lib')
|
||||
# if os.path.exists('{0}64'.format(libpath)) and not os.path.islink('{0}64'.format(libpath)):
|
||||
# libpath += '64'
|
||||
# for i in os.listdir(libpath): # TODO: make this more sane. We cheat a bit here by making assumptions.
|
||||
# if re.search('python([0-9]+(\.[0-9]+)?)?$', i):
|
||||
# libpath = os.path.join(libpath, i)
|
||||
# break
|
||||
# libpath = os.path.join(libpath, 'site-packages')
|
||||
# sys.prefix = temppath
|
||||
# for m in ('cryptography', 'ed25519'):
|
||||
# pipinstall(['install', 'cryptography'])
|
||||
# sys.path.append(libpath)
|
||||
# try:
|
||||
# import cryptography
|
||||
# has_crypto = True
|
||||
# except ImportError: # All that trouble for nothin'. Shucks.
|
||||
# pass
|
||||
#
|
||||
# if all((has_crypto, )):
|
||||
# pure_py = True
|
||||
#
|
||||
# if pure_py:
|
||||
# from cryptography.hazmat.primitives import serialization as crypto_serialization
|
||||
# from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
# from cryptography.hazmat.backends import default_backend as crypto_default_backend
|
||||
#
|
||||
|
||||
# We need static backup suffixes.
|
||||
tstamp = int(datetime.datetime.utcnow().timestamp())
|
||||
|
||||
# TODO: associate various config directives with version, too.
|
||||
# For now, we use this for primarily CentOS 6.x, which doesn't support ED25519 and probably some of the MACs.
|
||||
# Bastards.
|
||||
# https://ssh-comparison.quendi.de/comparison/cipher.html at some point in the future...
|
||||
# TODO: maybe implement some parsing of the ssh -Q stuff? https://superuser.com/a/869005/984616
|
||||
# If you encounter a version incompatibility, please let me know!
|
||||
# nmap --script ssh2-enum-algos -PN -sV -p22 <host>
|
||||
magic_ver = 6.5
|
||||
ssh_ver = subprocess.run(['ssh', '-V'], stderr = subprocess.PIPE).stderr.decode('utf-8').strip().split()[0]
|
||||
# FUCK YOU, DEBIAN. FUCK YOU AND ALL OF YOUR DERIVATIVES. YOU'RE FUCKING TRASH.
|
||||
# YOU BELONG NOWHERE NEAR A DATACENTER.
|
||||
ssh_ver = float(re.sub('^(?:Open|Sun_)SSH_([0-9\.]+)(?:p[0-9]+)?(?:,.*)?.*$', '\g<1>', ssh_ver))
|
||||
if ssh_ver >= magic_ver:
|
||||
has_ed25519 = True
|
||||
supported_keys = ('ed25519', 'rsa')
|
||||
new_moduli = False
|
||||
else:
|
||||
has_ed25519 = False
|
||||
supported_keys = ('rsa', )
|
||||
new_moduli = False
|
||||
# https://github.com/openssh/openssh-portable/commit/3e60d18fba1b502c21d64fc7e81d80bcd08a2092
|
||||
if ssh_ver >= 8.1:
|
||||
new_moduli = True
|
||||
|
||||
|
||||
conf_options = {}
|
||||
conf_options['sshd'] = {'KexAlgorithms': 'diffie-hellman-group-exchange-sha256',
|
||||
'Protocol': '2',
|
||||
'HostKey': ['/etc/ssh/ssh_host_rsa_key'],
|
||||
#'PermitRootLogin': 'prohibit-password', # older daemons don't like "prohibit-..."
|
||||
'PermitRootLogin': 'without-password',
|
||||
'PasswordAuthentication': 'no',
|
||||
'ChallengeResponseAuthentication': 'no',
|
||||
'PubkeyAuthentication': 'yes',
|
||||
'Ciphers': 'aes256-ctr,aes192-ctr,aes128-ctr',
|
||||
'MACs': 'hmac-sha2-512,hmac-sha2-256'}
|
||||
if has_ed25519:
|
||||
conf_options['sshd']['HostKey'].append('/etc/ssh/ssh_host_ed25519_key')
|
||||
conf_options['sshd']['KexAlgorithms'] = ','.join(('curve25519-sha256@libssh.org',
|
||||
conf_options['sshd']['KexAlgorithms']))
|
||||
conf_options['sshd']['Ciphers'] = ','.join((('chacha20-poly1305@openssh.com,'
|
||||
'aes256-gcm@openssh.com,'
|
||||
'aes128-gcm@openssh.com'),
|
||||
conf_options['sshd']['Ciphers']))
|
||||
conf_options['sshd']['MACs'] = ','.join((('hmac-sha2-512-etm@openssh.com,'
|
||||
'hmac-sha2-256-etm@openssh.com,'
|
||||
'umac-128-etm@openssh.com'),
|
||||
conf_options['sshd']['MACs'],
|
||||
'umac-128@openssh.com'))
|
||||
# Uncomment if this is further configured
|
||||
#conf_options['sshd']['AllowGroups'] = 'ssh-user'
|
||||
|
||||
conf_options['ssh'] = {'Host': {'*': {'KexAlgorithms': 'diffie-hellman-group-exchange-sha256',
|
||||
'PubkeyAuthentication': 'yes',
|
||||
'HostKeyAlgorithms': 'ssh-rsa'}}}
|
||||
if has_ed25519:
|
||||
conf_options['ssh']['Host']['*']['KexAlgorithms'] = ','.join(('curve25519-sha256@libssh.org',
|
||||
conf_options['ssh']['Host']['*']['KexAlgorithms']))
|
||||
conf_options['ssh']['Host']['*']['HostKeyAlgorithms'] = ','.join(
|
||||
(('ssh-ed25519-cert-v01@openssh.com,'
|
||||
'ssh-rsa-cert-v01@openssh.com,'
|
||||
'ssh-ed25519'),
|
||||
conf_options['ssh']['Host']['*']['HostKeyAlgorithms']))
|
||||
|
||||
|
||||
def hostKeys(buildmoduli):
|
||||
# Starting haveged should help lessen the time load a non-negligible amount, especially on virtual platforms.
|
||||
if os.path.lexists('/usr/bin/haveged'):
|
||||
# We could use psutil here, but then that's a python dependency we don't need.
|
||||
# We could parse the /proc directory, but that's quite unnecessary. pgrep's installed by default on
|
||||
# most distros.
|
||||
with open(os.devnull, 'wb') as devnull:
|
||||
if subprocess.run(['pgrep', 'haveged'], stdout = devnull).returncode != 0:
|
||||
subprocess.run(['haveged'], stdout = devnull)
|
||||
#Warning: The moduli stuff takes a LONG time to run. Hours.
|
||||
if buildmoduli:
|
||||
if not new_moduli:
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-G', '/etc/ssh/moduli.all',
|
||||
'-b', '4096',
|
||||
'-q'])
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-T', '/etc/ssh/moduli.safe',
|
||||
'-f', '/etc/ssh/moduli.all',
|
||||
'-q'])
|
||||
else:
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-q',
|
||||
'-M', 'generate',
|
||||
'-O', 'bits=4096',
|
||||
'/etc/ssh/moduli.all'])
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-q',
|
||||
'-M', 'screen',
|
||||
'-f', '/etc/ssh/moduli.all',
|
||||
'/etc/ssh/moduli.safe'])
|
||||
if os.path.lexists('/etc/ssh/moduli'):
|
||||
os.rename('/etc/ssh/moduli', '/etc/ssh/moduli.old')
|
||||
os.rename('/etc/ssh/moduli.safe', '/etc/ssh/moduli')
|
||||
os.remove('/etc/ssh/moduli.all')
|
||||
for suffix in ('', '.pub'):
|
||||
for k in glob.glob('/etc/ssh/ssh_host_*key{0}'.format(suffix)):
|
||||
os.rename(k, '{0}.old.{1}'.format(k, tstamp))
|
||||
if has_ed25519:
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-t', 'ed25519',
|
||||
'-f', '/etc/ssh/ssh_host_ed25519_key',
|
||||
'-q',
|
||||
'-N', ''])
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-t', 'rsa',
|
||||
'-b', '4096',
|
||||
'-f', '/etc/ssh/ssh_host_rsa_key',
|
||||
'-q',
|
||||
'-N', ''])
|
||||
# We currently don't use this, but for simplicity's sake let's return the host keys.
|
||||
hostkeys = {}
|
||||
for k in supported_keys:
|
||||
with open('/etc/ssh/ssh_host_{0}_key.pub'.format(k), 'r') as f:
|
||||
hostkeys[k] = f.read()
|
||||
return(hostkeys)
|
||||
|
||||
def config(opts, t):
|
||||
special = {'sshd': {}, 'ssh': {}}
|
||||
# We need to handle these directives a little differently...
|
||||
special['sshd']['opts'] = ['Match']
|
||||
special['sshd']['filters'] = ['User', 'Group', 'Host', 'LocalAddress', 'LocalPort', 'Address']
|
||||
# These are arguments supported by each of the special options. We'll use this to verify entries.
|
||||
special['sshd']['args'] = ['AcceptEnv', 'AllowAgentForwarding', 'AllowGroups', 'AllowStreamLocalForwarding',
|
||||
'AllowTcpForwarding', 'AllowUsers', 'AuthenticationMethods', 'AuthorizedKeysCommand',
|
||||
'AuthorizedKeysCommandUser', 'AuthorizedKeysFile', 'AuthorizedPrincipalsCommand',
|
||||
'AuthorizedPrincipalsCommandUser', 'AuthorizedPrincipalsFile', 'Banner',
|
||||
'ChrootDirectory', 'ClientAliveCountMax', 'ClientAliveInterval', 'DenyGroups',
|
||||
'DenyUsers', 'ForceCommand', 'GatewayPorts', 'GSSAPIAuthentication',
|
||||
'HostbasedAcceptedKeyTypes', 'HostbasedAuthentication',
|
||||
'HostbasedUsesNameFromPacketOnly', 'IPQoS', 'KbdInteractiveAuthentication',
|
||||
'KerberosAuthentication', 'MaxAuthTries', 'MaxSessions', 'PasswordAuthentication',
|
||||
'PermitEmptyPasswords', 'PermitOpen', 'PermitRootLogin', 'PermitTTY', 'PermitTunnel',
|
||||
'PermitUserRC', 'PubkeyAcceptedKeyTypes', 'PubkeyAuthentication', 'RekeyLimit',
|
||||
'RevokedKeys', 'StreamLocalBindMask', 'StreamLocalBindUnlink', 'TrustedUserCAKeys',
|
||||
'X11DisplayOffset', 'X11Forwarding', 'X11UseLocalHost']
|
||||
special['ssh']['opts'] = ['Host', 'Match']
|
||||
special['ssh']['args'] = ['canonical', 'exec', 'host', 'originalhost', 'user', 'localuser']
|
||||
cf = '/etc/ssh/{0}_config'.format(t)
|
||||
shutil.copy2(cf, '{0}.bak.{1}'.format(cf, tstamp))
|
||||
with open(cf, 'r') as f:
|
||||
conf = f.readlines()
|
||||
conf.append('\n\n# Added per https://sysadministrivia.com/news/hardening-ssh-security\n\n')
|
||||
confopts = []
|
||||
# Get an index of directives pre-existing in the config file.
|
||||
for line in conf[:]:
|
||||
opt = line.split()
|
||||
if opt:
|
||||
if not re.match('^(#.*|\s+.*)$', opt[0]):
|
||||
confopts.append(opt[0])
|
||||
# We also need to modify the config file- comment out starting with the first occurrence of the
|
||||
# specopts, if it exists. This is why we make a backup.
|
||||
commentidx = None
|
||||
for idx, i in enumerate(conf):
|
||||
if re.match('^({0})\s+.*$'.format('|'.join(special[t]['opts'])), i):
|
||||
commentidx = idx
|
||||
break
|
||||
if commentidx is not None:
|
||||
idx = commentidx
|
||||
while idx <= (len(conf) - 1):
|
||||
conf[idx] = '#{0}'.format(conf[idx])
|
||||
idx += 1
|
||||
# Now we actually start replacing/adding some major configuration.
|
||||
for o in opts.keys():
|
||||
if o in special[t]['opts'] or isinstance(opts[o], dict):
|
||||
# We need to put these at the bottom of the file due to how they're handled by sshd's config parsing.
|
||||
continue
|
||||
# We handle these a little specially too- they're for multiple lines sharing the same directive.
|
||||
# Since the config should be explicit, we remove any existing entries specified that we find.
|
||||
else:
|
||||
if o in confopts:
|
||||
# If I was more worried about recursion, or if I was appending here, I should use conf[:].
|
||||
# But I'm not. So I won't.
|
||||
for idx, opt in enumerate(conf):
|
||||
if re.match('^{0}(\s.*)?\n$'.format(o), opt):
|
||||
conf[idx] = '#{0}'.format(opt)
|
||||
# Here we handle the "multiple-specifying" options- notably, HostKey.
|
||||
if isinstance(opts[o], list):
|
||||
for l in opts[o]:
|
||||
if l is not None:
|
||||
conf.append('{0} {1}\n'.format(o, l))
|
||||
else:
|
||||
conf.append('{0}\n'.format(o))
|
||||
else:
|
||||
# So it isn't something we explicitly save until the end (such as a Match or Host),
|
||||
# and it isn't something that's specified multiple times.
|
||||
if opts[o] is not None:
|
||||
conf.append('{0} {1}\n'.format(o, opts[o]))
|
||||
else:
|
||||
conf.append('{0}\n'.format(o))
|
||||
# NOW we can add the Host/Match/etc. directives.
|
||||
for o in opts.keys():
|
||||
if isinstance(opts[o], dict):
|
||||
for k in opts[o].keys():
|
||||
conf.append('{0} {1}\n'.format(o, k))
|
||||
for l in opts[o][k].keys():
|
||||
if opts[o][k][l] is not None:
|
||||
conf.append('\t{0} {1}\n'.format(l, opts[o][k][l]))
|
||||
else:
|
||||
conf.append('\t{0}\n'.format(l))
|
||||
with open(cf, 'w') as f:
|
||||
f.write(''.join(conf))
|
||||
return()
|
||||
|
||||
def clientKeys(user = 'root'):
|
||||
uid = pwd.getpwnam(user).pw_uid
|
||||
gid = pwd.getpwnam(user).pw_gid
|
||||
homedir = os.path.expanduser('~{0}'.format(user))
|
||||
sshdir = '{0}/.ssh'.format(homedir)
|
||||
os.makedirs(sshdir, mode = 0o700, exist_ok = True)
|
||||
if has_ed25519:
|
||||
if not os.path.lexists('{0}/id_ed25519'.format(sshdir)) \
|
||||
and not os.path.lexists('{0}/id_ed25519.pub'.format(sshdir)):
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-t', 'ed25519',
|
||||
'-o',
|
||||
'-a', '100',
|
||||
'-f', '{0}/id_ed25519'.format(sshdir),
|
||||
'-q',
|
||||
'-N', ''])
|
||||
if not os.path.lexists('{0}/id_rsa'.format(sshdir)) and not os.path.lexists('{0}/id_rsa.pub'.format(sshdir)):
|
||||
if has_ed25519:
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-t', 'rsa',
|
||||
'-b', '4096',
|
||||
'-o',
|
||||
'-a', '100',
|
||||
'-f', '{0}/id_rsa'.format(sshdir),
|
||||
'-q',
|
||||
'-N', ''])
|
||||
else:
|
||||
subprocess.run(['ssh-keygen',
|
||||
'-t', 'rsa',
|
||||
'-b', '4096',
|
||||
'-a', '100',
|
||||
'-f', '{0}/id_rsa'.format(sshdir),
|
||||
'-q',
|
||||
'-N', ''])
|
||||
for basedir, dirs, files in os.walk(sshdir):
|
||||
os.chown(basedir, uid, gid)
|
||||
os.chmod(basedir, 0o700)
|
||||
for f in files:
|
||||
os.chown(os.path.join(basedir, f), uid, gid)
|
||||
os.chmod(os.path.join(basedir, f), 0o600)
|
||||
if 'pubkeys' not in globals():
|
||||
pubkeys = {}
|
||||
pubkeys[user] = {}
|
||||
for k in supported_keys:
|
||||
with open('{0}/id_{1}.pub'.format(sshdir, k), 'r') as f:
|
||||
pubkeys[user][k] = f.read()
|
||||
return(pubkeys)
|
||||
|
||||
def daemonMgr():
|
||||
# In case the script is running without sshd running.
|
||||
pidfile = '/var/run/sshd.pid'
|
||||
if not os.path.isfile(pidfile):
|
||||
return()
|
||||
# We're about to do somethin' stupid. Let's make it a teeny bit less stupid.
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
confchk = subprocess.run(['sshd', '-T'], stdout = devnull)
|
||||
if confchk.returncode != 0:
|
||||
for suffix in ('', '.pub'):
|
||||
for k in glob.glob('/etc/ssh/ssh_host_*key{0}'.format(suffix)):
|
||||
os.rename('{0}.old.{1}'.format(k, tstamp), k)
|
||||
for conf in ('', 'd'):
|
||||
cf = '/etc/ssh/ssh{0}_config'.format(conf)
|
||||
os.rename('{0}.{1}'.format(cf, tstamp),
|
||||
cf)
|
||||
exit('OOPS. We goofed. Backup restored and bailing out.')
|
||||
# We need to restart sshd once we're done. I feel dirty doing this, but this is the most cross-platform way I can
|
||||
# do it. First, we need the path to the PID file.
|
||||
# TODO: do some kind of better way of doing this.
|
||||
with open('/etc/ssh/sshd_config', 'r') as f:
|
||||
for line in f.readlines():
|
||||
if re.search('^\s*PidFile\s+.*', line):
|
||||
pidfile = re.sub('^\s*PidFile\s+(.*)(#.*)?$', '\g<1>', line)
|
||||
break
|
||||
with open(pidfile, 'r') as f:
|
||||
pid = int(f.read().strip())
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
return()
|
||||
|
||||
def main():
|
||||
self_pidfile = '/tmp/sshsecure.pid'
|
||||
is_running = False
|
||||
# First, check to see if we're already running.
|
||||
# This is where I'd put a psutil call... IF I HAD ONE.
|
||||
if os.path.isfile(self_pidfile):
|
||||
is_running = subprocess.run(['pgrep', '-F', self_pidfile], stdout = subprocess.PIPE)
|
||||
if is_running.stdout.decode('utf-8').strip() != '':
|
||||
# We're still running. Exit gracefully.
|
||||
print('We seem to still be running from a past execution; exiting')
|
||||
exit(0)
|
||||
else:
|
||||
# It's a stale PID file.
|
||||
os.remove(self_pidfile)
|
||||
with open(self_pidfile, 'w') as f:
|
||||
f.write(str(os.getpid()) + '\n')
|
||||
_chkfile = '/etc/ssh/.aif-generated'
|
||||
if not os.path.isfile(_chkfile):
|
||||
# Warning: The moduli stuff can take a LONG time to run. Hours.
|
||||
buildmoduli = True
|
||||
hostKeys(buildmoduli)
|
||||
for t in ('sshd', 'ssh'):
|
||||
config(conf_options[t], t)
|
||||
clientKeys()
|
||||
with open(_chkfile, 'w') as f:
|
||||
f.write(('ssh, sshd, and hostkey configurations/keys have been modified by sshsecure.py from OpTools.\n'
|
||||
'https://git.square-r00t.net/OpTools/\n'))
|
||||
daemonMgr()
|
||||
os.remove(self_pidfile)
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
147
arch/arch_mirror_ranking.py
Executable file
147
arch/arch_mirror_ranking.py
Executable file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
# import dns # TODO: replace server['ipv4'] with IPv4 address(es)? etc.
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from urllib.request import urlopen
|
||||
##
|
||||
import iso3166
|
||||
|
||||
|
||||
servers_json_url = 'https://www.archlinux.org/mirrors/status/json/'
|
||||
protos = ('http', 'https', 'rsync')
|
||||
|
||||
|
||||
class MirrorIdx(object):
|
||||
def __init__(self, country = None, proto = None, is_active = None, json_url = servers_json_url,
|
||||
name_re = None, ipv4 = None, ipv6 = None, isos = None, statuses = False, *args, **kwargs):
|
||||
_tmpargs = locals()
|
||||
del (_tmpargs['self'])
|
||||
for k, v in _tmpargs.items():
|
||||
setattr(self, k, v)
|
||||
self.validateParams()
|
||||
self.servers_json = {}
|
||||
self.servers = []
|
||||
self.servers_with_scores = []
|
||||
self.ranked_servers = []
|
||||
self.fetchJSON()
|
||||
self.buildServers()
|
||||
self.rankServers()
|
||||
|
||||
def fetchJSON(self):
|
||||
if self.statuses:
|
||||
sys.stderr.write('Fetching servers from {0}...\n'.format(self.json_url))
|
||||
with urlopen(self.json_url) as u:
|
||||
self.servers_json = json.load(u)
|
||||
return()
|
||||
|
||||
def buildServers(self):
|
||||
_limiters = (self.proto, self.ipv4, self.ipv6, self.isos)
|
||||
_filters = list(_limiters)
|
||||
_filters.extend([self.name_re, self.country])
|
||||
_filters = tuple(_filters)
|
||||
if self.statuses:
|
||||
sys.stderr.write('Applying filters (if any)...\n')
|
||||
for s in self.servers_json['urls']:
|
||||
# We handle these as "tri-value" (None, True, False)
|
||||
if self.is_active is not None:
|
||||
if s['active'] != self.is_active:
|
||||
continue
|
||||
if not any(_filters):
|
||||
self.servers.append(s.copy())
|
||||
if s['score']:
|
||||
self.servers_with_scores.append(s)
|
||||
continue
|
||||
# These are based on string values.
|
||||
if self.name_re:
|
||||
if not self.name_re.search(s['url']):
|
||||
continue
|
||||
if self.country:
|
||||
if self.country != s['country_code']:
|
||||
continue
|
||||
# These are regular True/False switches
|
||||
match = False
|
||||
# We want to be *very* explicit about the ordering and inclusion/exclusion of these.
|
||||
# They MUST match the order of _limiters.
|
||||
values = []
|
||||
for k in ('protocol', 'ipv4', 'ipv6', 'isos'):
|
||||
values.append(s[k])
|
||||
valid = all([v for k, v in zip(_limiters, values) if k])
|
||||
if valid:
|
||||
self.servers.append(s)
|
||||
if s['score']:
|
||||
self.servers_with_scores.append(s)
|
||||
return()
|
||||
|
||||
def rankServers(self):
|
||||
if self.statuses:
|
||||
sys.stderr.write('Ranking mirrors...\n')
|
||||
self.ranked_servers = sorted(self.servers_with_scores, key = lambda i: i['score'])
|
||||
return()
|
||||
|
||||
def validateParams(self):
|
||||
if self.proto and self.proto.lower() not in protos:
|
||||
err = '{0} must be one of: {1}'.format(self.proto, ', '.join([i.upper() for i in protos]))
|
||||
raise ValueError(err)
|
||||
elif self.proto:
|
||||
self.proto = self.proto.upper()
|
||||
if self.country and self.country.upper() not in iso3166.countries:
|
||||
err = ('{0} must be a valid ISO-3166-1 ALPHA-2 country code. '
|
||||
'See https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes'
|
||||
'#Current_ISO_3166_country_codes').format(self.country)
|
||||
raise ValueError()
|
||||
elif self.country:
|
||||
self.country = self.country.upper()
|
||||
if self.name_re:
|
||||
self.name_re = re.compile(self.name_re)
|
||||
return()
|
||||
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = 'Fetch and rank Arch Linux mirrors')
|
||||
args.add_argument('-c', '--country',
|
||||
dest = 'country',
|
||||
help = ('If specified, limit results to this country (in ISO-3166-1 ALPHA-2 format)'))
|
||||
args.add_argument('-p', '--protocol',
|
||||
choices = protos,
|
||||
dest = 'proto',
|
||||
help = ('If specified, limit results to this protocol'))
|
||||
args.add_argument('-r', '--name-regex',
|
||||
dest = 'name_re',
|
||||
help = ('If specified, limit results to URLs that match this regex pattern (Python re syntax)'))
|
||||
args.add_argument('-4', '--ipv4',
|
||||
dest = 'ipv4',
|
||||
action = 'store_true',
|
||||
help = ('If specified, limit results to servers that support IPv4'))
|
||||
args.add_argument('-6', '--ipv6',
|
||||
dest = 'ipv6',
|
||||
action = 'store_true',
|
||||
help = ('If specified, limit results to servers that support IPv6'))
|
||||
args.add_argument('-i', '--iso',
|
||||
dest = 'isos',
|
||||
action = 'store_true',
|
||||
help = ('If specified, limit results to servers that have ISO images'))
|
||||
is_active = args.add_mutually_exclusive_group()
|
||||
is_active.add_argument('-a', '--active-only',
|
||||
default = None,
|
||||
const = True,
|
||||
action = 'store_const',
|
||||
dest = 'is_active',
|
||||
help = ('If specified, only include active servers (default is active + inactive)'))
|
||||
is_active.add_argument('-n', '--inactive-only',
|
||||
default = None,
|
||||
const = False,
|
||||
action = 'store_const',
|
||||
dest = 'is_active',
|
||||
help = ('If specified, only include inactive servers (default is active + inactive)'))
|
||||
return(args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = vars(parseArgs().parse_args())
|
||||
m = MirrorIdx(**args, statuses = True)
|
||||
for s in m.ranked_servers:
|
||||
print('Server = {0}$repo/os/$arch'.format(s['url']))
|
||||
165
arch/autopkg/maintain.py
Executable file
165
arch/autopkg/maintain.py
Executable file
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sqlite3
|
||||
import run
|
||||
from urllib.request import urlopen
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = ('Modify (add/remove) packages for use with Autopkg'),
|
||||
epilog = ('Operation-specific help; try e.g. "add --help"'))
|
||||
commonargs = argparse.ArgumentParser(add_help = False)
|
||||
commonargs.add_argument('-n', '--name',
|
||||
dest = 'pkgnm',
|
||||
required = True,
|
||||
help = ('The name of the PACKAGE to operate on.'))
|
||||
commonargs.add_argument('-d', '--db',
|
||||
dest = 'dbfile',
|
||||
default = '~/.optools/autopkg.sqlite3',
|
||||
help = ('The location of the package database. THIS SHOULD NOT BE ANY FILE USED BY '
|
||||
'ANYTHING ELSE! A default one will be created if it doesn\'t exist'))
|
||||
subparsers = args.add_subparsers(help = ('Operation to perform'),
|
||||
metavar = 'OPERATION',
|
||||
dest = 'oper')
|
||||
addargs = subparsers.add_parser('add',
|
||||
parents = [commonargs],
|
||||
help = ('Add a package. If a matching package NAME exists (-n/--name), '
|
||||
'we\'ll replace it'))
|
||||
addargs.add_argument('-b', '--base',
|
||||
dest = 'pkgbase',
|
||||
default = None,
|
||||
help = ('The pkgbase; only really needed for split-packages and we will automatically '
|
||||
'fetch if it\'s left blank anyways'))
|
||||
addargs.add_argument('-v', '--version',
|
||||
dest = 'pkgver',
|
||||
default = None,
|
||||
help = ('The current version; we will automatically fetch it if it\'s left blank'))
|
||||
addargs.add_argument('-l', '--lock',
|
||||
dest = 'active',
|
||||
action = 'store_false',
|
||||
help = ('If specified, the package will still exist in the DB but it will be marked inactive'))
|
||||
rmargs = subparsers.add_parser('rm',
|
||||
parents = [commonargs],
|
||||
help = ('Remove a package from the DB'))
|
||||
buildargs = subparsers.add_parser('build',
|
||||
help = ('Build all packages; same effect as running run.py'))
|
||||
buildargs.add_argument('-d', '--db',
|
||||
dest = 'dbfile',
|
||||
default = '~/.optools/autopkg.sqlite3',
|
||||
help = ('The location of the package database. THIS SHOULD NOT BE ANY FILE USED BY '
|
||||
'ANYTHING ELSE! A default one will be created if it doesn\'t exist'))
|
||||
listargs = subparsers.add_parser('ls',
|
||||
help = ('List packages (and information about them) only'))
|
||||
listargs.add_argument('-d', '--db',
|
||||
dest = 'dbfile',
|
||||
default = '~/.optools/autopkg.sqlite3',
|
||||
help = ('The location of the package database. THIS SHOULD NOT BE ANY FILE USED BY '
|
||||
'ANYTHING ELSE! A default one will be created if it doesn\'t exist'))
|
||||
return(args)
|
||||
|
||||
def add(args):
|
||||
db = sqlite3.connect(args['dbfile'])
|
||||
db.row_factory = sqlite3.Row
|
||||
cur = db.cursor()
|
||||
if not all((args['pkgbase'], args['pkgver'])):
|
||||
# We need some additional info from the AUR API...
|
||||
aur_url = 'https://aur.archlinux.org/rpc/?v=5&type=info&by=name&arg%5B%5D={0}'.format(args['pkgnm'])
|
||||
with urlopen(aur_url) as url:
|
||||
aur = json.loads(url.read().decode('utf-8'))['results']
|
||||
if not aur:
|
||||
raise ValueError(('Either something is screwy with our network access '
|
||||
'or the package {0} doesn\'t exist').format(args['pkgnm']))
|
||||
if ((aur['PackageBase'] != aur['Name']) and (not args['pkgbase'])):
|
||||
args['pkgbase'] = aur['PackageBase']
|
||||
if not args['pkgver']:
|
||||
args['pkgver'] = aur['Version']
|
||||
cur.execute("SELECT id, pkgname, pkgbase, pkgver, active FROM packages WHERE pkgname = ?",
|
||||
(args['pkgnm'], ))
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
if args['pkgbase']:
|
||||
q = ("UPDATE packages SET pkgbase = ? AND pkgver = ? AND ACTIVE = ? WHERE id = ?",
|
||||
(args['pkgbase'], args['pkgver'], ('0' if args['lock'] else '1'), row['id']))
|
||||
else:
|
||||
q = ("UPDATE packages SET pkgver = ? AND ACTIVE = ? WHERE id = ?",
|
||||
(args['pkgver'], ('0' if args['lock'] else '1'), row['id']))
|
||||
else:
|
||||
if args['pkgbase']:
|
||||
q = (("INSERT INTO "
|
||||
"packages (pkgname, pkgbase, pkgver, active) "
|
||||
"VALUES (?, ?, ?, ?)"),
|
||||
(args['pkgnm'], args['pkgbase'], args['pkgver'], ('0' if args['lock'] else '1')))
|
||||
else:
|
||||
q = (("INSERT INTO "
|
||||
"packages (pkgname, pkgver, active) "
|
||||
"VALUES (?, ?, ?)"),
|
||||
(args['pkgnm'], args['pkgver'], ('0' if args['lock'] else '1')))
|
||||
cur.execute(*q)
|
||||
db.commit()
|
||||
cur.close()
|
||||
db.close()
|
||||
return()
|
||||
|
||||
def rm(args):
|
||||
db = sqlite3.connect(args['dbfile'])
|
||||
cur = db.cursor()
|
||||
cur.execute("DELETE FROM packages WHERE pkgname = ?",
|
||||
(args['pkgnm'], ))
|
||||
db.commit()
|
||||
cur.close()
|
||||
db.close()
|
||||
return()
|
||||
|
||||
def build(args):
|
||||
pm = run.PkgMake(db = args['dbfile'])
|
||||
pm.main()
|
||||
return()
|
||||
|
||||
def ls(args):
|
||||
db = sqlite3.connect(args['dbfile'])
|
||||
db.row_factory = sqlite3.Row
|
||||
cur = db.cursor()
|
||||
rows = []
|
||||
cur.execute("SELECT * FROM packages ORDER BY pkgname")
|
||||
for r in cur.fetchall():
|
||||
pkgnm = r['pkgname']
|
||||
rows.append({'name': r['pkgname'],
|
||||
'row_id': r['id'],
|
||||
'pkgbase': ('' if not r['pkgbase'] else r['pkgbase']),
|
||||
'ver': r['pkgver'],
|
||||
'enabled': ('Yes' if r['active'] else 'No')})
|
||||
header = '| NAME | PACKAGE BASE | VERSION | ENABLED | ROW ID |'
|
||||
sep = '=' * len(header)
|
||||
fmt = '|{name:<16}|{pkgbase:<16}|{ver:^9}|{enabled:^9}|{row_id:<8}|'
|
||||
out = []
|
||||
for row in rows:
|
||||
out.append(fmt.format(**row))
|
||||
header = '\n'.join((sep, header, sep))
|
||||
out.insert(0, header)
|
||||
out.append(sep)
|
||||
print('\n'.join(out))
|
||||
cur.close()
|
||||
db.close()
|
||||
return()
|
||||
|
||||
def main():
|
||||
rawargs = parseArgs()
|
||||
args = vars(rawargs.parse_args())
|
||||
if not args['oper']:
|
||||
rawargs.print_help()
|
||||
exit()
|
||||
args['dbfile'] = os.path.abspath(os.path.expanduser(args['dbfile']))
|
||||
if args['oper'] == 'add':
|
||||
add(args)
|
||||
elif args['oper'] == 'rm':
|
||||
rm(args)
|
||||
elif args['oper'] == 'build':
|
||||
build(args)
|
||||
elif args['oper'] == 'ls':
|
||||
ls(args)
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
278
arch/autopkg/run.py
Executable file
278
arch/autopkg/run.py
Executable file
@@ -0,0 +1,278 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import grp
|
||||
import json
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import shutil
|
||||
import sqlite3
|
||||
import subprocess
|
||||
import tarfile
|
||||
import urllib.request as reqs
|
||||
import urllib.parse as urlparse
|
||||
import setup
|
||||
# I *HATE* relying on non-stlib, and I hate even MORE that this is JUST TO COMPARE VERSION STRINGS.
|
||||
# WHY IS THIS FUNCTIONALITY NOT STDLIB YET.
|
||||
try:
|
||||
from distutils.version import LooseVersion
|
||||
has_lv = True
|
||||
except ImportError:
|
||||
has_lv = False
|
||||
|
||||
# The base API URL (https://wiki.archlinux.org/index.php/Aurweb_RPC_interface)
|
||||
aur_base = 'https://aur.archlinux.org/rpc/?v=5&type=info&by=name'
|
||||
# The length of the above. Important because of uri_limit.
|
||||
base_len = len(aur_base)
|
||||
# Maximum length of the URI.
|
||||
uri_limit = 4443
|
||||
|
||||
class PkgMake(object):
|
||||
def __init__(self, db = '~/.optools/autopkg.sqlite3'):
|
||||
db = os.path.abspath(os.path.expanduser(db))
|
||||
if not os.path.isfile(db):
|
||||
setup.firstrun(db)
|
||||
self.conn = sqlite3.connect(db)
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
self.cur = self.conn.cursor()
|
||||
self.cfg = setup.main(self.conn, self.cur)
|
||||
if self.cfg['sign']:
|
||||
_cmt_mode = self.conn.isolation_level # autocommit
|
||||
self.conn.isolation_level = None
|
||||
self.fpr, self.gpg = setup.GPG(self.cur, homedir = self.cfg['gpg_homedir'], keyid = self.cfg['gpg_keyid'])
|
||||
self.conn.isolation_level = _cmt_mode
|
||||
# don't need this anymore; it should be duplicated or populated into self.fpr.
|
||||
del(self.cfg['gpg_keyid'])
|
||||
self.my_key = self.gpg.get_key(self.fpr, secret = True)
|
||||
self.gpg.signers = [self.my_key]
|
||||
else:
|
||||
self.fpr = self.gpg = self.my_key = None
|
||||
del(self.cfg['gpg_keyid'])
|
||||
self.pkgs = {}
|
||||
self._populatePkgs()
|
||||
|
||||
def main(self):
|
||||
self.getPkg()
|
||||
self.buildPkg()
|
||||
return()
|
||||
|
||||
def _chkver(self, pkgbase):
|
||||
new_ver = self.pkgs[pkgbase]['meta']['new_ver']
|
||||
old_ver = self.pkgs[pkgbase]['meta']['pkgver']
|
||||
is_diff = (new_ver != old_ver) # A super-stupid fallback
|
||||
if is_diff:
|
||||
if has_lv:
|
||||
is_diff = LooseVersion(new_ver) > LooseVersion(old_ver)
|
||||
else:
|
||||
# like, 90% of the time, this would work.
|
||||
new_tuple = tuple(map(int, (re.split('\.|-', new_ver))))
|
||||
old_tuple = tuple(map(int, (re.split('\.|-', old_ver))))
|
||||
# But people at https://stackoverflow.com/a/11887825/733214 are very angry about it, hence the above.
|
||||
is_diff = new_tuple > old_tuple
|
||||
return(is_diff)
|
||||
|
||||
def _populatePkgs(self):
|
||||
# These columns/keys are inferred by structure or unneeded. Applies to both DB and AUR API.
|
||||
_notrack = ('pkgbase', 'pkgname', 'active', 'id', 'packagebaseid', 'numvotes', 'popularity', 'outofdate',
|
||||
'maintainer', 'firstsubmitted', 'lastmodified', 'depends', 'optdepends', 'conflicts', 'license',
|
||||
'keywords')
|
||||
_attr_map = {'version': 'new_ver'}
|
||||
# These are tracked per-package; all others are pkgbase and applied to all split pkgs underneath.
|
||||
_pkg_specific = ('pkgdesc', 'arch', 'url', 'license', 'groups', 'depends', 'optdepends', 'provides',
|
||||
'conflicts', 'replaces', 'backup', 'options', 'install', 'changelog')
|
||||
_aur_results = []
|
||||
_urls = []
|
||||
_params = {'arg[]': []}
|
||||
_tmp_params = {'arg[]': []}
|
||||
self.cur.execute("SELECT * FROM packages WHERE active = '1'")
|
||||
for row in self.cur.fetchall():
|
||||
pkgbase = (row['pkgbase'] if row['pkgbase'] else row['pkgname'])
|
||||
pkgnm = row['pkgname']
|
||||
if pkgbase not in self.pkgs:
|
||||
self.pkgs[pkgbase] = {'packages': {pkgnm: {}},
|
||||
'meta': {}}
|
||||
for k in dict(row):
|
||||
if not k:
|
||||
continue
|
||||
if k in _notrack:
|
||||
continue
|
||||
if k in _pkg_specific:
|
||||
self.pkgs[pkgbase]['packages'][pkgnm][k] = row[k]
|
||||
else:
|
||||
if k not in self.pkgs[pkgbase]['meta']:
|
||||
self.pkgs[pkgbase]['meta'][k] = row[k]
|
||||
# TODO: change this?
|
||||
pkgstr = urlparse.quote(pkgnm) # We perform against a non-pkgbased name for the AUR search.
|
||||
_tmp_params['arg[]'].append(pkgstr)
|
||||
l = base_len + (len(urlparse.urlencode(_tmp_params, doseq = True)) + 1)
|
||||
if l >= uri_limit:
|
||||
# We need to split into multiple URIs based on URI size because of:
|
||||
# https://wiki.archlinux.org/index.php/Aurweb_RPC_interface#Limitations
|
||||
_urls.append('&'.join((aur_base, urlparse.urlencode(_params, doseq = True))))
|
||||
_params = {'arg[]': []}
|
||||
_tmp_params = {'arg[]': []}
|
||||
_params['arg[]'].append(pkgstr)
|
||||
_urls.append('&'.join((aur_base, urlparse.urlencode(_params, doseq = True))))
|
||||
for url in _urls:
|
||||
with reqs.urlopen(url) as u:
|
||||
_aur_results.extend(json.loads(u.read().decode('utf-8'))['results'])
|
||||
for pkg in _aur_results:
|
||||
pkg = {k.lower(): v for (k, v) in pkg.items()}
|
||||
pkgnm = pkg['name']
|
||||
pkgbase = pkg['packagebase']
|
||||
for (k, v) in pkg.items():
|
||||
if k in _notrack:
|
||||
continue
|
||||
if k in _attr_map:
|
||||
k = _attr_map[k]
|
||||
if k in _pkg_specific:
|
||||
self.pkgs[pkgbase]['packages'][pkgnm][k] = v
|
||||
else:
|
||||
self.pkgs[pkgbase]['meta'][k] = v
|
||||
self.pkgs[pkgbase]['meta']['snapshot'] = 'https://aur.archlinux.org{0}'.format(pkg['urlpath'])
|
||||
self.pkgs[pkgbase]['meta']['filename'] = os.path.basename(pkg['urlpath'])
|
||||
self.pkgs[pkgbase]['meta']['build'] = self._chkver(pkgbase)
|
||||
return()
|
||||
|
||||
def _drop_privs(self):
|
||||
# First get the list of groups to assign.
|
||||
# This *should* generate a list *exactly* like as if that user ran os.getgroups(),
|
||||
# with the addition of self.cfg['build_user']['gid'] (if it isn't included already).
|
||||
newgroups = list(sorted([g.gr_gid
|
||||
for g in grp.getgrall()
|
||||
if pwd.getpwuid(self.cfg['build_user']['uid'])
|
||||
in g.gr_mem]))
|
||||
if self.cfg['build_user']['gid'] not in newgroups:
|
||||
newgroups.append(self.cfg['build_user']['gid'])
|
||||
newgroups.sort()
|
||||
# This is the user's "primary group"
|
||||
user_gid = pwd.getpwuid(self.cfg['build_user']['uid']).pw_gid
|
||||
if user_gid not in newgroups:
|
||||
newgroups.append(user_gid)
|
||||
os.setgroups(newgroups)
|
||||
# If we used os.setgid and os.setuid, we would PERMANENTLY/IRREVOCABLY drop privs.
|
||||
# Being that that doesn't suit the meta of the rest of the script (chmodding, etc.) - probably not a good idea.
|
||||
os.setresgid(self.cfg['build_user']['gid'], self.cfg['build_user']['gid'], -1)
|
||||
os.setresuid(self.cfg['build_user']['uid'], self.cfg['build_user']['uid'], -1)
|
||||
# Default on most linux systems. reasonable enough for building? (equal to chmod 755/644)
|
||||
os.umask(0o0022)
|
||||
# TODO: we need a full env construction here, I think, as well. PATH, HOME, GNUPGHOME at the very least?
|
||||
return()
|
||||
|
||||
def _restore_privs(self):
|
||||
os.setresuid(self.cfg['orig_user']['uid'], self.cfg['orig_user']['uid'], self.cfg['orig_user']['uid'])
|
||||
os.setresgid(self.cfg['orig_user']['gid'], self.cfg['orig_user']['gid'], self.cfg['orig_user']['gid'])
|
||||
os.setgroups(self.cfg['orig_user']['groups'])
|
||||
os.umask(self.cfg['orig_user']['umask'])
|
||||
# TODO: if we change the env, we need to change it back here. I capture it in self.cfg['orig_user']['env'].
|
||||
return()
|
||||
|
||||
def getPkg(self):
|
||||
self._drop_privs()
|
||||
for pkgbase in self.pkgs:
|
||||
if not self.pkgs[pkgbase]['meta']['build']:
|
||||
continue
|
||||
_pkgre = re.compile('^(/?.*/)*({0})/?'.format(pkgbase))
|
||||
builddir = os.path.join(self.cfg['cache'], pkgbase)
|
||||
try:
|
||||
shutil.rmtree(builddir)
|
||||
except FileNotFoundError:
|
||||
# We *could* use ignore_errors or onerrors params, but we only want FileNotFoundError.
|
||||
pass
|
||||
os.makedirs(builddir, mode = self.cfg['chmod']['dirs'], exist_ok = True)
|
||||
tarball = os.path.join(builddir, self.pkgs[pkgbase]['meta']['filename'])
|
||||
with reqs.urlopen(self.pkgs[pkgbase]['meta']['snapshot']) as url:
|
||||
# We have to write out to disk first because the tarfile module HATES trying to perform seeks on
|
||||
# a tarfile stream. It HATES it.
|
||||
with open(tarball, 'wb') as f:
|
||||
f.write(url.read())
|
||||
tarnames = {}
|
||||
with tarfile.open(tarball, mode = 'r:*') as tar:
|
||||
for i in tar.getmembers():
|
||||
if any((i.isdir(), i.ischr(), i.isblk(), i.isfifo(), i.isdev())):
|
||||
continue
|
||||
if i.name.endswith('.gitignore'):
|
||||
continue
|
||||
# We want to strip leading dirs out.
|
||||
tarnames[i.name] = _pkgre.sub('', i.name)
|
||||
# Small bugfix.
|
||||
if tarnames[i.name] == '':
|
||||
tarnames[i.name] = os.path.basename(i.name)
|
||||
tarnames[i.name] = os.path.join(builddir, tarnames[i.name])
|
||||
for i in tar.getmembers():
|
||||
if i.name in tarnames:
|
||||
# GOLLY I WISH TARFILE WOULD LET US JUST CHANGE THE ARCNAME DURING EXTRACTION ON THE FLY.
|
||||
with open(tarnames[i.name], 'wb') as f:
|
||||
f.write(tar.extractfile(i.name).read())
|
||||
# No longer needed, so clean it up behind us.
|
||||
os.remove(tarball)
|
||||
self._restore_privs()
|
||||
return()
|
||||
|
||||
def buildPkg(self):
|
||||
self._drop_privs()
|
||||
for pkgbase in self.pkgs:
|
||||
if not self.pkgs[pkgbase]['meta']['build']:
|
||||
continue
|
||||
builddir = os.path.join(self.cfg['cache'], pkgbase)
|
||||
os.chdir(builddir)
|
||||
# subprocess.run(['makepkg']) # TODO: figure out gpg sig checking?
|
||||
subprocess.run(['makepkg', '--clean', '--force', '--skippgpcheck'])
|
||||
self._restore_privs()
|
||||
for pkgbase in self.pkgs:
|
||||
if not self.pkgs[pkgbase]['meta']['build']:
|
||||
continue
|
||||
builddir = os.path.join(self.cfg['cache'], pkgbase)
|
||||
# The i686 isn't even supported anymore, but let's keep this friendly for Archlinux32 folks.
|
||||
_pkgre = re.compile(('^({0})-{1}-'
|
||||
'(x86_64|i686|any)'
|
||||
'\.pkg\.tar\.xz$').format('|'.join(self.pkgs[pkgbase]['packages'].keys()),
|
||||
self.pkgs[pkgbase]['meta']['new_ver']))
|
||||
fname = None
|
||||
# PROBABLY in the first root dir, and could be done with fnmatch, but...
|
||||
for root, dirs, files in os.walk(builddir):
|
||||
for f in files:
|
||||
if _pkgre.search(f):
|
||||
fname = os.path.join(root, f)
|
||||
break
|
||||
if not fname:
|
||||
raise RuntimeError('Could not find proper package build filename for {0}'.format(pkgbase))
|
||||
destfile = os.path.join(self.cfg['dest'], os.path.basename(fname))
|
||||
os.rename(fname, destfile)
|
||||
# TODO: HERE IS WHERE WE SIGN THE PACKAGE?
|
||||
# We also need to update the package info in the DB.
|
||||
for p in self.pkgs[pkgbase]['packages']:
|
||||
self.cur.execute("UPDATE packages SET pkgver = ? WHERE pkgname = ?",
|
||||
(self.pkgs[pkgbase]['meta']['new_ver'], p))
|
||||
self.cfg['pkgpaths'].append(destfile)
|
||||
# No longer needed, so we can clear out the build directory.
|
||||
shutil.rmtree(builddir)
|
||||
os.chdir(self.cfg['dest'])
|
||||
dbfile = os.path.join(self.cfg['dest'], 'autopkg.db.tar.gz') # TODO: Custom repo name?
|
||||
cmd = ['repo-add', '--nocolor', '--delta', dbfile] # -s/--sign?
|
||||
cmd.extend(self.cfg['pkgpaths'])
|
||||
subprocess.run(cmd)
|
||||
for root, dirs, files in os.walk(self.cfg['dest']):
|
||||
for f in files:
|
||||
fpath = os.path.join(root, f)
|
||||
os.chmod(fpath, self.cfg['chmod']['files'])
|
||||
os.chown(fpath, self.cfg['chown']['uid'], self.cfg['chown']['gid'])
|
||||
for d in dirs:
|
||||
dpath = os.path.join(root, d)
|
||||
os.chmod(dpath, self.cfg['chmod']['dirs'])
|
||||
os.chown(dpath, self.cfg['chown']['uid'], self.cfg['chown']['gid'])
|
||||
return()
|
||||
|
||||
def close(self):
|
||||
if self.cur:
|
||||
self.cur.close()
|
||||
if self.conn:
|
||||
self.conn.close()
|
||||
return()
|
||||
|
||||
def main():
|
||||
pm = PkgMake()
|
||||
pm.main()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
127
arch/autopkg/setup.py
Executable file
127
arch/autopkg/setup.py
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import base64
|
||||
import copy
|
||||
import gpg
|
||||
import grp
|
||||
import json
|
||||
import lzma
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
from socket import gethostname
|
||||
import sqlite3
|
||||
|
||||
# NOTE: The gpg homedir should be owned by the user *running autopkg*.
|
||||
# Likely priv-dropping will only work for root.
|
||||
#
|
||||
|
||||
dirs = ('cache', 'dest', 'gpg_homedir')
|
||||
u_g_pairs = ('chown', 'build_user')
|
||||
json_vals = ('chmod', )
|
||||
|
||||
blank_db = """
|
||||
/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4H//AxNdACmURZ1gyBn4JmSIjib+MZX9x4eABpe77H+o
|
||||
CX2bysoKzO/OaDh2QGbNjiU75tmhPrWMvTFue4XOq+6NPls33xRRL8eZoITBdAaLqbwYY2XW/V/X
|
||||
Gx8vpjcBnpACjVno40FoJ1qWxJlBZ0PI/8gMoBr3Sgdqnf+Bqi+E6dOl66ktJMRr3bdZ5C9vOXAf
|
||||
42BtRfwJlwN8NItaWtfRYVfXl+40D05dugcxDLY/3uUe9MSgt46Z9+Q9tGjjrUA8kb5K2fqWSlQ2
|
||||
6KyF3KV1zsJSDLuaRkP42JNsBTgg6mU5rEk/3egdJiLn+7AupvWQ3YlKkeALZvgEKy75wdObf6QI
|
||||
jY4qjXjxOTwOG4oou7lNZ3fPI5qLCQL48M8ZbOQoTAQCuArdYqJmBwT2rF86SdQRP4EY6TlExa4o
|
||||
+E+v26hKhYXO7o188jlmGFbuzqtoyMB1y3UG+Hi2SjPDilD5o6f9fEjiHZm2FY6rkPb9Km4UFlH1
|
||||
d2A4Wt4iGlciZBs0lFRPKkgHR4s7KHTMKuZyC08qE1B7FwvyBTBBYveA2UoZlKY7d22IbiiSQ3tP
|
||||
JKhj8nf8EWcgHPt46Juo80l7vqqn6AviY7b1JZXICdiJMbuWJEyzTLWuk4qlUBfimP7k9IjhDFpJ
|
||||
gEXdNgrnx/wr5CIbr1T5lI9vZz35EacgNA2bGxLA8VI0W9eYDts3BSfhiJOHWwLQPiNzJwd4aeM1
|
||||
IhqgTEpk+BD0nIgSB3AAB+NfJJavoQjpv0QBA6dH52utA5Nw5L//Ufw/YKaA7ui8YQyDJ7y2n9L3
|
||||
ugn6VJFFrYSgIe1oRkJBGRGuBgGNTS3aJmdFqEz1vjZBMkFdF+rryXzub4dst2Qh01E6/elowIUh
|
||||
2whMRVDO28QjyS9tLtLLzfTmBk2NSxs4+znE0ePKKw3n/p6YlbPRAw24QR8MTCOpQ2lH1UZNWBM2
|
||||
epxfmWtgO5b/wGYopRDEvDDdbPAq6+4zxTOT5RmdWZyc46gdizf9+dQW3wZ9iBDjh4MtuYPvLlqr
|
||||
0GRmsyrxgFxkwvVoXASNndS0NPcAADkAhYCxn+W2AAGvBoCAAgB/TQWascRn+wIAAAAABFla
|
||||
"""
|
||||
|
||||
def firstrun(dbfile):
|
||||
dbdata = lzma.decompress(base64.b64decode(blank_db))
|
||||
with open(dbfile, 'wb') as f:
|
||||
f.write(dbdata)
|
||||
return()
|
||||
|
||||
def main(connection, cursor):
|
||||
cfg = {'orig_cwd': os.getcwd(),
|
||||
'pkgpaths': []}
|
||||
cursor.execute("SELECT directive, value FROM config")
|
||||
for r in cursor.fetchall():
|
||||
cfg[r['directive']] = r['value'].strip()
|
||||
for k in cfg:
|
||||
for x in (True, False, None):
|
||||
if cfg[k] == str(x):
|
||||
cfg[k] = x
|
||||
break
|
||||
if k in json_vals:
|
||||
cfg[k] = json.loads(cfg[k])
|
||||
if k == 'path':
|
||||
paths = []
|
||||
for i in cfg[k].split(':'):
|
||||
p = os.path.abspath(os.path.expanduser(i))
|
||||
paths.append(p)
|
||||
cfg[k] = paths
|
||||
if k in dirs:
|
||||
if cfg[k]:
|
||||
cfg[k] = os.path.abspath(os.path.expanduser(cfg[k]))
|
||||
os.makedirs(cfg[k], exist_ok = True)
|
||||
if k in u_g_pairs:
|
||||
dflt = [pwd.getpwuid(os.geteuid()).pw_name, grp.getgrgid(os.getegid()).gr_name]
|
||||
l = re.split(':|\.', cfg[k])
|
||||
if len(l) == 1:
|
||||
l.append(None)
|
||||
for idx, i in enumerate(l[:]):
|
||||
if i in ('', None):
|
||||
l[idx] = dflt[idx]
|
||||
cfg[k] = {}
|
||||
cfg[k]['uid'] = (int(l[0]) if l[0].isnumeric() else pwd.getpwnam(l[0]).pw_uid)
|
||||
cfg[k]['gid'] = (int(l[1]) if l[1].isnumeric() else grp.getgrnam(l[1]).gr_gid)
|
||||
cfg['orig_user'] = {'uid': os.geteuid(),
|
||||
'gid': os.getegid()}
|
||||
# Ugh. https://orkus.wordpress.com/2011/04/17/python-getting-umask-without-change/
|
||||
cfg['orig_user']['umask'] = os.umask(0)
|
||||
os.umask(cfg['orig_user']['umask'])
|
||||
cfg['orig_user']['groups'] = os.getgroups()
|
||||
for i in cfg['chmod']:
|
||||
cfg['chmod'][i] = int(cfg['chmod'][i], 8)
|
||||
cfg['orig_user']['env'] = copy.deepcopy(dict(os.environ))
|
||||
os.chown(cfg['cache'], uid = cfg['build_user']['uid'], gid = cfg['build_user']['gid'])
|
||||
os.chown(cfg['dest'], uid = cfg['chown']['uid'], gid = cfg['chown']['gid'])
|
||||
return(cfg)
|
||||
|
||||
def GPG(cur, homedir = None, keyid = None):
|
||||
g = gpg.Context(home_dir = homedir)
|
||||
if not keyid:
|
||||
# We don't have a key specified, so we need to generate one and update the config.
|
||||
s = ('This signature and signing key were automatically generated using Autopkg from OpTools: '
|
||||
'https://git.square-r00t.net/OpTools/')
|
||||
g.sig_notation_add('automatically-generated@git.square-r00t.net', s, gpg.constants.sig.notation.HUMAN_READABLE)
|
||||
userid = 'Autopkg Signing Key ({0}@{1})'.format(os.getenv('SUDO_USER', os.environ['USER']), gethostname())
|
||||
params = {
|
||||
#'algorithm': 'ed25519',
|
||||
'algorithm': 'rsa4096',
|
||||
'expires': False,
|
||||
'expires_in': 0,
|
||||
'sign': True,
|
||||
'passphrase': None
|
||||
}
|
||||
keyid = g.create_key(userid, **params).fpr
|
||||
# https://stackoverflow.com/a/50718957
|
||||
q = {}
|
||||
for col in ('keyid', 'homedir'):
|
||||
if sqlite3.sqlite_version_info > (3, 24, 0):
|
||||
q[col] = ("INSERT INTO config (directive, value) "
|
||||
"VALUES ('gpg_{0}', ?) "
|
||||
"ON CONFLICT (directive) "
|
||||
"DO UPDATE SET value = excluded.value").format(col)
|
||||
else:
|
||||
cur.execute("SELECT id FROM config WHERE directive = 'gpg_{0}'".format(col))
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
q[col] = ("UPDATE config SET value = ? WHERE id = '{0}'").format(row['id'])
|
||||
else:
|
||||
q[col] = ("INSERT INTO config (directive, value) VALUES ('gpg_{0}', ?)").format(col)
|
||||
cur.execute(q[col], (locals()[col], ))
|
||||
return(keyid, g)
|
||||
223
arch/buildup/pkgchk.py
Executable file
223
arch/buildup/pkgchk.py
Executable file
@@ -0,0 +1,223 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import configparser
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import tarfile # for verifying built PKGBUILDs. We just need to grab <tar>/.PKGINFO, and check: pkgver = <version>
|
||||
import tempfile
|
||||
from collections import OrderedDict
|
||||
from urllib.request import urlopen
|
||||
|
||||
class color(object):
|
||||
PURPLE = '\033[95m'
|
||||
CYAN = '\033[96m'
|
||||
DARKCYAN = '\033[36m'
|
||||
BLUE = '\033[94m'
|
||||
GREEN = '\033[92m'
|
||||
YELLOW = '\033[93m'
|
||||
RED = '\033[91m'
|
||||
BOLD = '\033[1m'
|
||||
UNDERLINE = '\033[4m'
|
||||
END = '\033[0m'
|
||||
|
||||
|
||||
vcstypes = ('bzr', 'git', 'hg', 'svn')
|
||||
|
||||
class pkgChk(object):
|
||||
def __init__(self, pkg):
|
||||
# pkg should be a string of a PKGBUILD,
|
||||
# not the path to a file.
|
||||
self.pkg = pkg
|
||||
# The below holds parsed data from the PKGBUILD.
|
||||
self.pkgdata = {'pkgver': self.getLex('pkgver', 'var'),
|
||||
'_pkgver': self.getLex('_pkgver', 'var'),
|
||||
'pkgname': self.getLex('pkgname', 'var'),
|
||||
'sources': self.getLex('source', 'array')}
|
||||
|
||||
def getLex(self, attrib, attrtype):
|
||||
# Parse the PKGBUILD and return actual values from it.
|
||||
# attrtype should be "var" or "array".
|
||||
# var returns a string and array returns a list.
|
||||
# If the given attrib isn't in the pkgbuild, None is returned.
|
||||
# The sources array is special, though - it returns a tuple of:
|
||||
# (hashtype, dict) where dict is a mapping of:
|
||||
# filename: hash
|
||||
# filename2: hash2
|
||||
# etc.
|
||||
if attrtype not in ('var', 'array'):
|
||||
raise ValueError('{0} is not a valid attribute type.'.format(attrib))
|
||||
_sums = ('sha512', 'sha384', 'sha256', 'sha1', 'md5') # in order of preference
|
||||
_attrmap = {'var': 'echo ${{{0}}}'.format(attrib),
|
||||
'array': 'echo ${{{}[@]}}'.format(attrib)}
|
||||
_tempfile = tempfile.mkstemp(text = True)
|
||||
with open(_tempfile[1], 'w') as f:
|
||||
f.write(self.pkg)
|
||||
_cmd = ['/bin/bash',
|
||||
'--restricted', '--noprofile',
|
||||
'--init-file', _tempfile[1],
|
||||
'-i', '-c', _attrmap[attrtype]]
|
||||
with open(os.devnull, 'wb') as devnull:
|
||||
_out = subprocess.run(_cmd, env = {'PATH': ''},
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = devnull).stdout.decode('utf-8').strip()
|
||||
if _out == '':
|
||||
os.remove(_tempfile[1])
|
||||
return(None)
|
||||
if attrtype == 'var':
|
||||
os.remove(_tempfile[1])
|
||||
return(_out)
|
||||
else: # it's an array
|
||||
if attrib == 'source':
|
||||
_sources = {}
|
||||
_source = shlex.split(_out)
|
||||
_sumarr = [None] * len(_source)
|
||||
for h in _sums:
|
||||
_cmd[-1] = 'echo ${{{0}[@]}}'.format(h + 'sums')
|
||||
with open(os.devnull, 'wb') as devnull:
|
||||
_out = subprocess.run(_cmd, env = {'PATH': ''},
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = devnull).stdout.decode('utf-8').strip()
|
||||
if _out != '':
|
||||
os.remove(_tempfile[1])
|
||||
return(h, OrderedDict(zip(_source, shlex.split(_out))))
|
||||
else:
|
||||
continue
|
||||
# No match for checksums.
|
||||
os.remove(_tempfile[1])
|
||||
return(None, OrderedDict(zip(_source, shlex.split(_out))))
|
||||
else:
|
||||
os.remove(_tempfile[1])
|
||||
return(shlex.split(_out))
|
||||
return()
|
||||
|
||||
def getURL(self, url):
|
||||
with urlopen(url) as http:
|
||||
code = http.getcode()
|
||||
return(code)
|
||||
|
||||
def chkVer(self):
|
||||
_separators = []
|
||||
# TODO: this is to explicitly prevent parsing
|
||||
# VCS packages, so might need some re-tooling in the future.
|
||||
if self.pkgdata['pkgname'].split('-')[-1] in vcstypes:
|
||||
return(None)
|
||||
# transform the current version into a list of various components.
|
||||
if not self.pkgdata['pkgver']:
|
||||
return(None)
|
||||
if self.pkgdata['_pkgver']:
|
||||
_cur_ver = self.pkgdata['_pkgver']
|
||||
else:
|
||||
_cur_ver = self.pkgdata['pkgver']
|
||||
# This will catch like 90% of the software versions out there.
|
||||
# Unfortunately, it won't catch all of them. I dunno how to
|
||||
# handle that quite yet. TODO.
|
||||
_split_ver = _cur_ver.split('.')
|
||||
_idx = len(_split_ver) - 1
|
||||
while _idx >= 0:
|
||||
_url = re.sub('^[A-Za-z0-9]+::',
|
||||
'',
|
||||
list(self.pkgdata['sources'].keys())[0])
|
||||
_code = self.getURL(_url)
|
||||
_idx -= 1
|
||||
|
||||
def parseArgs():
|
||||
_ini = '~/.config/optools/buildup.ini'
|
||||
_defini = os.path.abspath(os.path.expanduser(_ini))
|
||||
args = argparse.ArgumentParser()
|
||||
args.add_argument('-c', '--config',
|
||||
default = _defini,
|
||||
dest = 'config',
|
||||
help = ('The path to the config file. ' +
|
||||
'Default: {0}{1}{2}').format(color.BOLD,
|
||||
_defini,
|
||||
color.END))
|
||||
args.add_argument('-R', '--no-recurse',
|
||||
action = 'store_false',
|
||||
dest = 'recurse',
|
||||
help = ('If specified, and the path provided is a directory, ' +
|
||||
'do NOT recurse into subdirectories.'))
|
||||
args.add_argument('-p', '--path',
|
||||
metavar = 'path/to/dir/or/PKGBUILD',
|
||||
default = None,
|
||||
dest = 'pkgpath',
|
||||
help = ('The path to either a directory containing PKGBUILDs (recursion ' +
|
||||
'enabled - see {0}-R/--no-recurse{1}) ' +
|
||||
'or a single PKGBUILD. Use to override ' +
|
||||
'the config\'s PKG:paths.').format(color.BOLD, color.END))
|
||||
return(args)
|
||||
|
||||
def parsePkg(pkgbuildstr):
|
||||
p = pkgChk(pkgbuildstr)
|
||||
p.chkVer()
|
||||
return()
|
||||
|
||||
def iterDir(pkgpath, recursion = True):
|
||||
filepaths = []
|
||||
if os.path.isfile(pkgpath):
|
||||
return([pkgpath])
|
||||
if recursion:
|
||||
for root, subdirs, files in os.walk(pkgpath):
|
||||
for vcs in vcstypes:
|
||||
if '.{0}'.format(vcs) in subdirs:
|
||||
subdirs.remove('.{0}'.format(vcs))
|
||||
for f in files:
|
||||
if 'PKGBUILD' in f:
|
||||
filepaths.append(os.path.join(root, f))
|
||||
else:
|
||||
for f in os.listdir(pkgpath):
|
||||
if 'PKGBUILD' in f:
|
||||
filepaths.append(f)
|
||||
filepaths.sort()
|
||||
return(filepaths)
|
||||
|
||||
def parseCfg(cfgfile):
|
||||
def getPath(p):
|
||||
return(os.path.abspath(os.path.expanduser(p)))
|
||||
_defcfg = '[PKG]\npaths = \ntestbuild = no\n[VCS]\n'
|
||||
for vcs in vcstypes:
|
||||
_defcfg += '{0} = no\n'.format(vcs)
|
||||
_cfg = configparser.ConfigParser()
|
||||
_cfg._interpolation = configparser.ExtendedInterpolation()
|
||||
_cfg.read((_defcfg, cfgfile))
|
||||
# We convert to a dict so we can do things like list comprehension.
|
||||
cfg = {s:dict(_cfg.items(s)) for s in _cfg.sections()}
|
||||
if 'paths' not in cfg['PKG'].keys():
|
||||
raise ValueError('You must provide a valid configuration ' +
|
||||
'file with the PKG:paths setting specified and valid.')
|
||||
cfg['PKG']['paths'] = sorted([getPath(p.strip()) for p in cfg['PKG']['paths'].split(',')],
|
||||
reverse = True)
|
||||
for p in cfg['PKG']['paths'][:]:
|
||||
if not os.path.exists(p):
|
||||
print('WARNING: {0} does not exist; skipping...'.format(p))
|
||||
cfg['PKG']['paths'].remove(p)
|
||||
# We also want to convert these to pythonic True/False
|
||||
cfg['PKG']['testbuild'] = _cfg['PKG'].getboolean('testbuild')
|
||||
for k in vcstypes:
|
||||
cfg['VCS'][k] = _cfg['VCS'].getboolean(k)
|
||||
return(cfg)
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = vars(parseArgs().parse_args())
|
||||
if not os.path.isfile(args['config']):
|
||||
raise FileNotFoundError('{0} does not exist.'.format(cfg))
|
||||
cfg = parseCfg(args['config'])
|
||||
if args['pkgpath']:
|
||||
args['pkgpath'] = os.path.abspath(os.path.expanduser(args['pkgpath']))
|
||||
if os.path.isdir(args['pkgpath']):
|
||||
iterDir(args['pkgpath'], recursion = args['recurse'])
|
||||
elif os.path.isfile(args['pkgpath']):
|
||||
parsePkg(args['pkgpath'])
|
||||
else:
|
||||
raise FileNotFoundError('{0} does not exist.'.format(args['pkgpath']))
|
||||
else:
|
||||
files = []
|
||||
for p in cfg['PKG']['paths']:
|
||||
files.extend(iterDir(p))
|
||||
files.sort()
|
||||
for p in files:
|
||||
with open(p, 'r') as f:
|
||||
parsePkg(f.read())
|
||||
39
arch/buildup/sample.buildup.ini
Normal file
39
arch/buildup/sample.buildup.ini
Normal file
@@ -0,0 +1,39 @@
|
||||
## This configuration file will allow you to perform more
|
||||
## fine-grained control of BuildUp.
|
||||
## It supports the syntax shortcuts found here:
|
||||
## https://docs.python.org/3/library/configparser.html#configparser.ExtendedInterpolation
|
||||
|
||||
[PKG]
|
||||
# The path(s) to your PKGBUILD(s), or a directory/directories containing them.
|
||||
# If you have more than one, separate with a comma.
|
||||
paths = path/to/pkgbuilds,another/path/to/pkgbuilds
|
||||
|
||||
# If 'yes', try building the package with the new version.
|
||||
# If 'no' (the default), don't try to build with the new version.
|
||||
# This can be a good way to test that you don't need to modify the PKGBUILD,
|
||||
# but can be error-prone (missing makedeps, etc.).
|
||||
testbuild = no
|
||||
|
||||
[VCS]
|
||||
# Here you can enable or disable which VCS platforms you want to support.
|
||||
# Note that it will increase the time of your check, as it will
|
||||
# actually perform a checkout/clone/etc. of the source and check against
|
||||
# the version function inside the PKGBUILD.
|
||||
# It's also generally meaningless, as VCS PKGBUILDs are intended
|
||||
# to be dynamic. Nonetheless, the options are there.
|
||||
# Use 'yes' to enable, or 'no' to disable (the default).
|
||||
# Currently only the given types are supported (i.e. no CVS).
|
||||
|
||||
# THESE ARE CURRENTLY NOT SUPPORTED.
|
||||
|
||||
# Check revisions for -git PKGBUILDs
|
||||
git = no
|
||||
|
||||
# Check revisions for -svn PKGBUILDs
|
||||
svn = no
|
||||
|
||||
# Check revisions for -hg PKGBUILDs
|
||||
hg = no
|
||||
|
||||
# Check revisions for -bzr PKGBUILDs
|
||||
bzr = no
|
||||
81
arch/mirrorchk.py
Normal file
81
arch/mirrorchk.py
Normal file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
from urllib.request import urlopen
|
||||
|
||||
# The local list of mirrors
|
||||
mfile = '/etc/pacman.d/mirrorlist'
|
||||
# The URL for the list of mirros
|
||||
# TODO: customize with country in a config
|
||||
rlist = 'https://www.archlinux.org/mirrorlist/?country=US&protocol=http&protocol=https&ip_version=4&use_mirror_status=on'
|
||||
# If local_mirror is set to None, don't do any modifications.
|
||||
# If it's a dict in the format of:
|
||||
# local_mirror = {'profile': 'PROFILE_NAME',
|
||||
# 'url': 'http://host/arch/%os/$arch',
|
||||
# 'state_file': '/var/lib/netctl/netctl.state'}
|
||||
# Then we will check 'state_file'. If its contents match 'profile',
|
||||
# then we will add 'url' to the *top* of mfile.
|
||||
# TODO: I need to move this to a config.
|
||||
local_mirror = {'profile': '<PROFILENAME>',
|
||||
'url': 'http://<REPOBOX>/arch/$repo/os/$arch',
|
||||
'state_file': '/var/lib/netctl/netctl.state'}
|
||||
|
||||
def getList(url):
|
||||
with urlopen(url) as http:
|
||||
l = http.read().decode('utf-8')
|
||||
return(l)
|
||||
|
||||
def uncomment(url_list):
|
||||
urls = []
|
||||
if isinstance(url_list, str):
|
||||
url_list = [u.strip() for u in url_list.splitlines()]
|
||||
for u in url_list:
|
||||
u = u.strip()
|
||||
if u == '':
|
||||
continue
|
||||
urls.append(re.sub('^\s*#', '', u))
|
||||
return(urls)
|
||||
|
||||
def rankList(mfile):
|
||||
c = ['rankmirrors',
|
||||
'-n', '6',
|
||||
mfile]
|
||||
ranked_urls = subprocess.run(c, stdout = subprocess.PIPE)
|
||||
url_list = ranked_urls.stdout.decode('utf-8').splitlines()
|
||||
for u in url_list[:]:
|
||||
if u.strip() == '':
|
||||
url_list.remove(u)
|
||||
continue
|
||||
if re.match('^\s*(#.*)$', u, re.MULTILINE | re.DOTALL):
|
||||
url_list.remove(u)
|
||||
return(url_list)
|
||||
|
||||
def localMirror(url_list):
|
||||
# If checking the state_file doesn't work out, use netctl
|
||||
# directly.
|
||||
if not isinstance(local_mirror, dict):
|
||||
return(url_list)
|
||||
with open(local_mirror['state_file'], 'r') as f:
|
||||
state = f.read().strip()
|
||||
state = [s.strip() for s in state]
|
||||
if local_mirror['profile'] in state:
|
||||
url_list.insert(0, 'Server = {0}'.format(local_mirror['url']))
|
||||
return(url_list)
|
||||
|
||||
def writeList(mirrorfile, url_list):
|
||||
with open(mirrorfile, 'w') as f:
|
||||
f.write('{0}\n'.format('\n'.join(url_list)))
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
if os.geteuid() != 0:
|
||||
exit('Must be run as root.')
|
||||
urls = getList(rlist)
|
||||
t = tempfile.mkstemp(text = True)
|
||||
writeList(t[1], uncomment(urls))
|
||||
ranked_mirrors = localMirror(rankList(t[1]))
|
||||
writeList(mfile, ranked_mirrors)
|
||||
os.remove(t[1])
|
||||
288
arch/repo-maint.py
Executable file
288
arch/repo-maint.py
Executable file
@@ -0,0 +1,288 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import io
|
||||
import os
|
||||
import pprint
|
||||
import re
|
||||
import sys
|
||||
import tarfile
|
||||
|
||||
|
||||
# PREREQS:
|
||||
# Mostly stdlib.
|
||||
#
|
||||
# IF:
|
||||
# 1.) You want to sign or verify packages (-s/--sign and -v/--verify, respectively),
|
||||
# 2.) You want to work with delta updates,
|
||||
# THEN:
|
||||
# 1.) You need to install the python GnuPG GPGME bindings (the "gpg" module; NOT the "gpgme" module). They're
|
||||
# distributed with the GPG source. They're also in PyPI (https://pypi.org/project/gpg/).
|
||||
# 2.) You need to install the xdelta3 module (https://pypi.org/project/xdelta3/).
|
||||
|
||||
_delta_re = re.compile('(.*)-*-*_to*')
|
||||
|
||||
|
||||
class RepoMaint(object):
|
||||
def __init__(self, **kwargs):
|
||||
# https://stackoverflow.com/a/2912884/733214
|
||||
user_params = kwargs
|
||||
# Define a set of defaults to update with kwargs since we
|
||||
# aren't explicitly defining params.
|
||||
self.args = {'color': True,
|
||||
'db': './repo.db.tar.xz',
|
||||
'key': None,
|
||||
'pkgs': [],
|
||||
'quiet': False,
|
||||
'sign': False,
|
||||
'verify': False}
|
||||
self.args.update(user_params)
|
||||
self.db_exts = {'db.tar': False, # No compression
|
||||
'db.tar.xz': 'xz',
|
||||
'db.tar.gz': 'gz',
|
||||
'db.tar.bz2': 'bz2',
|
||||
# We explicitly check False vs. None.
|
||||
# For None, we do a custom check and wrap it.
|
||||
# In .Z's case, we use the lzw module. It's the only non-stdlib compression
|
||||
# that Arch Linux repo DB files support.
|
||||
'db.tar.Z': None}
|
||||
self.args['db'] = os.path.abspath(os.path.expanduser(self.args['db']))
|
||||
self.db = None
|
||||
_is_valid_repo_db = False
|
||||
if not _is_valid_repo_db:
|
||||
raise ValueError(('Repo DB {0} is not a valid DB type. '
|
||||
'Must be one of {1}.').format(self.args['db'],
|
||||
', '.join(['*.{0}'.format(i) for i in self.db_exts])))
|
||||
self.repo_dir = os.path.dirname(self.args['db'])
|
||||
self.lockfile = '{0}.lck'.format(self.args['db'])
|
||||
os.makedirs(self.repo_dir, exist_ok = True)
|
||||
self.gpg = None
|
||||
self.sigkey = None
|
||||
if self.args['sign'] or self.args['verify']:
|
||||
# Set up GPG handler.
|
||||
self._initGPG()
|
||||
self._importDB()
|
||||
|
||||
def _initGPG(self):
|
||||
import gpg
|
||||
self.gpg = gpg.Context()
|
||||
if self.args['sign']:
|
||||
_seckeys = [k for k in self.gpg.keylist(secret = True) if k.can_sign]
|
||||
if self.args['key']:
|
||||
for k in _seckeys:
|
||||
if self.sigkey:
|
||||
break
|
||||
for s in k.subkeys:
|
||||
if self.sigkey:
|
||||
break
|
||||
if s.can_sign:
|
||||
if self.args['key'].lower() in (s.keyid.lower(),
|
||||
s.fpr.lower()):
|
||||
self.sigkey = k
|
||||
self.gpg.signers = [k]
|
||||
else:
|
||||
# Grab the first key that can sign.
|
||||
if _seckeys:
|
||||
self.sigkey = _seckeys[0]
|
||||
self.gpg.signers = [_seckeys[0]]
|
||||
if not self.args['quiet']:
|
||||
print('Key ID not specified; using {0} as the default'.format(self.sigkey.fpr))
|
||||
if not self.sigkey:
|
||||
raise RuntimeError('Private key ID not found, cannot sign, or no secret keys exist.')
|
||||
# TODO: confirm verifying works without a key
|
||||
return()
|
||||
|
||||
def _LZWcompress(self, data):
|
||||
# Based largely on:
|
||||
# https://github.com/HugoPouliquen/lzw-tools/blob/master/utils/compression.py
|
||||
data_arr = []
|
||||
rawdata = io.BytesIO(data)
|
||||
for i in range(int(len(data) / 2)):
|
||||
data_arr.insert(i, rawdata.read(2))
|
||||
w = bytes()
|
||||
b_size = 256
|
||||
b = []
|
||||
compressed = io.BytesIO()
|
||||
for c in data_arr:
|
||||
c = c.to_bytes(2, 'big')
|
||||
wc = w + c
|
||||
if wc in b:
|
||||
w = wc
|
||||
else:
|
||||
b.insert(b_size, wc)
|
||||
compressed.write(b.index(wc).to_bytes(2, 'big'))
|
||||
b_size += 1
|
||||
w = c
|
||||
return(compressed.getvalue())
|
||||
|
||||
def _LZWdecompress(self, data):
|
||||
# Based largely on:
|
||||
# https://github.com/HugoPouliquen/lzw-tools/blob/master/utils/decompression.py
|
||||
b_size = 256
|
||||
b = []
|
||||
out = io.BytesIO()
|
||||
for i in range(b_size):
|
||||
b.insert(i, i.to_bytes(2, 'big'))
|
||||
w = data.pop(0)
|
||||
out.write(w)
|
||||
i = 0
|
||||
for byte in data:
|
||||
x = int.from_bytes(byte, byteorder = 'big')
|
||||
if x < b_size:
|
||||
entry = b[x]
|
||||
elif x == b_size:
|
||||
entry = w + w
|
||||
else:
|
||||
raise ValueError('Bad uncompressed value for "{0}"'.format(byte))
|
||||
for y in entry:
|
||||
if i % 2 == 1:
|
||||
out.write(y.to_bytes(1, byteorder = 'big'))
|
||||
i += 1
|
||||
b.insert(b_size, w + x)
|
||||
b_size += 1
|
||||
w = entry
|
||||
return(out.getvalue())
|
||||
|
||||
def _importDB(self):
|
||||
# Get the compression type.
|
||||
for ct in self.db_exts:
|
||||
if self.args['db'].lower().endswith(ct):
|
||||
if self.db_exts[ct] == False:
|
||||
if ct.endswith('.Z'): # Currently the only custom one.
|
||||
pass
|
||||
|
||||
|
||||
def add(self):
|
||||
# Fresh pkg set (in case the instance was re-used).
|
||||
self.pkgs = {}
|
||||
# First handle any wildcard
|
||||
for p in self.args['pkgs'][:]:
|
||||
if p.strip() == '*':
|
||||
for root, dirs, files in os.walk(self.repo_dir):
|
||||
for f in files:
|
||||
abspath = os.path.join(root, f)
|
||||
if f.endswith('.pkg.tar.xz'): # Recommended not to be changed per makepkg.conf
|
||||
if abspath not in self.args['pkgs']:
|
||||
self.args['pkgs'].append(abspath)
|
||||
if self.args['delta']:
|
||||
if f.endswith('.delta'):
|
||||
if abspath not in self.args['pkgs']:
|
||||
self.args['pkgs'].append(abspath)
|
||||
self.args['pkgs'].remove(p)
|
||||
# Then de-dupe and convert to full path.
|
||||
self.args['pkgs'] = sorted(list(set([os.path.abspath(os.path.expanduser(d)) for d in self.args['pkgs']])))
|
||||
for p in self.args['pkgs']:
|
||||
pkgfnm = os.path.basename(p)
|
||||
if p.endswith('.delta'):
|
||||
pkgnm = _delta_re.sub('\g<1>', os.path.basename(pkgfnm))
|
||||
|
||||
return()
|
||||
|
||||
def remove(self):
|
||||
for p in self.args['pkgs']:
|
||||
pass
|
||||
return()
|
||||
|
||||
|
||||
def hatch():
|
||||
import base64
|
||||
import lzma
|
||||
import random
|
||||
h = ((
|
||||
'/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4AB6AEtdABBok+MQCtEh'
|
||||
'BisubEtc2ebacaLGrSRAMmHrcwUr39J24q4iODdNz7wfQl9e6I3C'
|
||||
'ooyuOkptNISdo50CRdknGAU4JBBh+IQTkHwiAAAABW1d7drLmkUA'
|
||||
'AWd7/+DtzR+2830BAAAAAARZWg=='
|
||||
).encode('utf-8'),
|
||||
(
|
||||
'/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4AHEALtdABBpE/AVEKFC'
|
||||
'fdT16ly2cCwT/MnXTY2D4r8nWgH6mLetLPn17nza3ZK+tSFU7d5j'
|
||||
'my91M8fvPGu9Tf0NYkWlRU7vJM8r2V3kK/Gs6/GS7tq2qIum/C/X'
|
||||
'sOnYUewVB2yMvlACqwp3gWJlmXSfwcpGiU662EmATS8kUgF+OdP+'
|
||||
'EATXhM/1bAn07wJbVWPoAL2SBmJBo2zL1tXQklbQu1J20eWfd1bD'
|
||||
'cgSBGqcU1/CdHnW6lcb6BmWKTg0p9IAAAEoEyN1gLkAMAAHXAcUD'
|
||||
'AACXcduyscRn+wIAAAAABFla'
|
||||
).encode('utf-8'))
|
||||
h = lzma.decompress(base64.b64decode(h[random.randint(0, 1)]))
|
||||
return(h.decode('utf-8'))
|
||||
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = ('Python implementation of repo-add/repo-remove.'),
|
||||
epilog = ('See https://wiki.archlinux.org/index.php/Pacman/'
|
||||
'Tips_and_tricks#Custom_local_repository for more information.\n'
|
||||
'Each operation has sub-help (e.g. "... add -h")'),
|
||||
formatter_class = argparse.RawDescriptionHelpFormatter)
|
||||
operargs = args.add_subparsers(dest = 'oper',
|
||||
help = ('Operation to perform'))
|
||||
commonargs = argparse.ArgumentParser(add_help = False)
|
||||
commonargs.add_argument('db',
|
||||
metavar = '</path/to/repository/repo.db.tar.xz>',
|
||||
help = ('The path to the repository DB (required)'))
|
||||
commonargs.add_argument('pkgs',
|
||||
nargs = '+',
|
||||
metavar = '<package|delta>',
|
||||
help = ('Package filepath (for adding)/name (for removing) or delta; '
|
||||
'can be specified multiple times (at least 1 required)'))
|
||||
commonargs.add_argument('--nocolor',
|
||||
dest = 'color',
|
||||
action = 'store_false',
|
||||
help = ('If specified, turn off color in output (currently does nothing; '
|
||||
'output is currently not colorized)'))
|
||||
commonargs.add_argument('-q', '--quiet',
|
||||
dest = 'quiet',
|
||||
action = 'store_true',
|
||||
help = ('Minimize output'))
|
||||
commonargs.add_argument('-s', '--sign',
|
||||
dest = 'sign',
|
||||
action = 'store_true',
|
||||
help = ('If specified, sign database with GnuPG after update'))
|
||||
commonargs.add_argument('-k', '--key',
|
||||
metavar = 'KEY_ID',
|
||||
nargs = 1,
|
||||
help = ('Use the specified GPG key to sign the database '
|
||||
'(only used if -s/--sign is active)'))
|
||||
commonargs.add_argument('-v', '--verify',
|
||||
dest = 'verify',
|
||||
action = 'store_true',
|
||||
help = ('If specified, verify the database\'s signature before update'))
|
||||
addargs = operargs.add_parser('add',
|
||||
parents = [commonargs],
|
||||
help = ('Add package(s) to a repository'))
|
||||
remargs = operargs.add_parser('remove',
|
||||
parents = [commonargs],
|
||||
help = ('Remove package(s) from a repository'))
|
||||
addargs.add_argument('-d', '--delta',
|
||||
dest = 'delta',
|
||||
action = 'store_true',
|
||||
help = ('If specified, generate and add package deltas for the update'))
|
||||
addargs.add_argument('-n', '--new',
|
||||
dest = 'new_only',
|
||||
action = 'store_true',
|
||||
help = ('If specified, only add packages that are not already in the database'))
|
||||
addargs.add_argument('-R', '--remove',
|
||||
dest = 'remove_old',
|
||||
action = 'store_true',
|
||||
help = ('If specified, remove old packages from disk after updating the database'))
|
||||
# Removal args have no add'l arguments, just the common ones.
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
if (len(sys.argv) == 2) and (sys.argv[1] == 'elephant'):
|
||||
print(hatch())
|
||||
return()
|
||||
else:
|
||||
rawargs = parseArgs()
|
||||
args = rawargs.parse_args()
|
||||
if not args.oper:
|
||||
rawargs.print_help()
|
||||
exit()
|
||||
rm = RepoMaint(**vars(args))
|
||||
if args.oper == 'add':
|
||||
rm.add()
|
||||
elif args.oper == 'remove':
|
||||
rm.remove()
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,151 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import configparser
|
||||
import datetime
|
||||
import os
|
||||
import pprint
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
cfgfile = os.path.join(os.environ['HOME'], '.arch.repoclone.ini')
|
||||
|
||||
# Rsync options
|
||||
opts = [
|
||||
'--recursive', # recurse into directories
|
||||
'--times', # preserve modification times
|
||||
'--links', # copy symlinks as symlinks
|
||||
'--hard-links', # preserve hard links
|
||||
'--quiet', # suppress non-error messages
|
||||
'--delete-after', # receiver deletes after transfer, not during
|
||||
'--delay-updates', # put all updated files into place at end
|
||||
'--copy-links', # transform symlink into referent file/dir
|
||||
'--safe-links', # ignore symlinks that point outside the tree
|
||||
#'--max-delete', # don't delete more than NUM files
|
||||
'--delete-excluded', # also delete excluded files from dest dirs
|
||||
'--exclude=.*' # exclude files matching PATTERN
|
||||
]
|
||||
|
||||
def sync(args):
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
mntchk = subprocess.run(['findmnt', args['mount']], stdout = devnull, stderr = devnull)
|
||||
if mntchk.returncode != 0:
|
||||
exit('!! BAILING OUT; {0} isn\'t mounted !!'.format(args['mount']))
|
||||
if args['bwlimit'] >= 1:
|
||||
opts.insert(10, '--bwlimit=' + str(args['bwlimit'])) # limit socket I/O bandwidth
|
||||
for k in ('destination', 'logfile', 'lockfile'):
|
||||
os.makedirs(os.path.dirname(args[k]), exist_ok = True)
|
||||
paths = os.environ['PATH'].split(':')
|
||||
rsync = '/usr/bin/rsync' # set the default
|
||||
for p in paths:
|
||||
testpath = os.path.join(p, 'rsync')
|
||||
if os.path.isfile(testpath):
|
||||
rsync = testpath # in case rsync isn't in /usr/bin/rsync
|
||||
break
|
||||
cmd = [rsync] # the path to the binary
|
||||
cmd.extend(opts) # the arguments
|
||||
# TODO: implement repos here?
|
||||
cmd.append(os.path.join(args['mirror'], '.')) # the path on the remote mirror
|
||||
cmd.append(os.path.join(args['destination'], '.')) # the local destination
|
||||
if os.path.isfile(args['lockfile']):
|
||||
with open(args['lockfile'], 'r') as f:
|
||||
existingpid = f.read().strip()
|
||||
if os.isatty(sys.stdin.fileno()):
|
||||
# Running from shell
|
||||
exit('!! A repo synchronization seems to already be running (PID: {0}). Quitting. !!'.format(existingpid))
|
||||
else:
|
||||
exit() # we're running in cron, shut the hell up.
|
||||
else:
|
||||
with open(args['lockfile'], 'w') as f:
|
||||
f.write(str(os.getpid()))
|
||||
with open(args['logfile'], 'a') as log:
|
||||
c = subprocess.run(cmd, stdout = log, stderr = subprocess.PIPE)
|
||||
now = int(datetime.datetime.timestamp(datetime.datetime.utcnow()))
|
||||
with open(os.path.join(args['destination'], 'lastsync'), 'w') as f:
|
||||
f.write(str(now) + '\n')
|
||||
os.remove(args['lockfile'])
|
||||
# Only report errors at the end of the run if we aren't running in cron. Otherwise, log them.
|
||||
errors = c.stderr.decode('utf-8').splitlines()
|
||||
if os.isatty(sys.stdin.fileno()):
|
||||
print('We encountered some errors:')
|
||||
for e in errors:
|
||||
if e.startswith('symlink has no referent: '):
|
||||
print('Broken upstream symlink: {0}'.format(e.split()[1].replace('"', '')))
|
||||
else:
|
||||
print(e)
|
||||
else:
|
||||
with open(args['logfile'], 'a') as f:
|
||||
for e in errors:
|
||||
f.write('{0}\n'.format(e))
|
||||
return()
|
||||
|
||||
def getDefaults():
|
||||
# Hardcoded defaults
|
||||
dflt = {'mirror': 'rsync://mirror.square-r00t.net/arch/',
|
||||
'repos': 'core,extra,community,multilib,iso/latest',
|
||||
'destination': '/srv/repos/arch',
|
||||
'mount': '/',
|
||||
'bwlimit': 0,
|
||||
'lockfile': '/var/run/repo-sync.lck',
|
||||
'logfile': '/var/log/repo/arch.log'}
|
||||
realcfg = configparser.ConfigParser(defaults = dflt)
|
||||
if not os.path.isfile(cfgfile):
|
||||
with open(cfgfile, 'w') as f:
|
||||
realcfg.write(f)
|
||||
realcfg.read(cfgfile)
|
||||
return(realcfg)
|
||||
|
||||
def parseArgs():
|
||||
cfg = getDefaults()
|
||||
liveopts = cfg['DEFAULT']
|
||||
args = argparse.ArgumentParser(description = 'Synchronization for a remote Arch repository to a local one.',
|
||||
epilog = ('This program will write a default configuration file to {0} ' +
|
||||
'if one is not found.'.format(cfgfile)))
|
||||
args.add_argument('-m',
|
||||
'--mirror',
|
||||
dest = 'mirror',
|
||||
default = liveopts['mirror'],
|
||||
help = ('The upstream mirror to sync from, must be an rsync URI '+
|
||||
'(Default: {0}').format(liveopts['mirror']))
|
||||
# TODO: can we do this?
|
||||
# args.add_argument('-r',
|
||||
# '--repos',
|
||||
# dest = 'repos',
|
||||
# default = liveopts['repos'],
|
||||
# help = ('The repositories to sync; must be a comma-separated list. ' +
|
||||
# '(Currently not used.) Default: {0}').format(','.join(liveopts['repos'])))
|
||||
args.add_argument('-d',
|
||||
'--destination',
|
||||
dest = 'destination',
|
||||
default = liveopts['destination'],
|
||||
help = 'The destination directory to sync to. Default: {0}'.format(liveopts['destination']))
|
||||
args.add_argument('-b',
|
||||
'--bwlimit',
|
||||
dest = 'bwlimit',
|
||||
default = liveopts['bwlimit'],
|
||||
type = int,
|
||||
help = 'The amount, in Kilobytes per second, to throttle the sync to. Default is to not throttle (0).')
|
||||
args.add_argument('-l',
|
||||
'--log',
|
||||
dest = 'logfile',
|
||||
default = liveopts['logfile'],
|
||||
help = 'The path to the logfile. Default: {0}'.format(liveopts['logfile']))
|
||||
args.add_argument('-L',
|
||||
'--lock',
|
||||
dest = 'lockfile',
|
||||
default = liveopts['lockfile'],
|
||||
help = 'The path to the lockfile. Default: {0}'.format(liveopts['lockfile']))
|
||||
args.add_argument('-M',
|
||||
'--mount',
|
||||
dest = 'mount',
|
||||
default = liveopts['mount'],
|
||||
help = 'The mountpoint for your --destination. The script will exit if this point is not mounted. ' +
|
||||
'If you don\'t need mount checking, just use /. Default: {0}'.format(liveopts['mount']))
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
args = vars(parseArgs().parse_args())
|
||||
sync(args)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
207
centos/extract_files_package.py
Executable file
207
centos/extract_files_package.py
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Supports CentOS 6.9 and up, untested on lower versions.
|
||||
# Lets you extract files for a given package name(s) without installing
|
||||
# any extra packages (such as yum-utils for repoquery).
|
||||
|
||||
# NOTE: If you're on CentOS 6.x, since it uses such an ancient version of python you need to either install
|
||||
# python-argparse OR just resign to using it for all packages with none of the features.
|
||||
try:
|
||||
import argparse
|
||||
has_argparse = True
|
||||
except ImportError:
|
||||
has_argparse = False
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
# For when CentOS/RHEL switch to python 3 by default (if EVER).
|
||||
import sys
|
||||
pyver = sys.version_info
|
||||
try:
|
||||
import yum
|
||||
# Needed for verbosity
|
||||
from yum.logginglevels import __NO_LOGGING as yum_nolog
|
||||
has_yum = True
|
||||
except ImportError:
|
||||
has_yum = False
|
||||
exit('This script only runs on the system-provided Python on RHEL/CentOS/other RPM-based distros.')
|
||||
try:
|
||||
# pip install libarchive
|
||||
# https://github.com/dsoprea/PyEasyArchive
|
||||
import libarchive.public as lap
|
||||
is_ctype = False
|
||||
except ImportError:
|
||||
try:
|
||||
# pip install libarchive
|
||||
# https://github.com/Changaco/python-libarchive-c
|
||||
import libarchive
|
||||
if 'file_reader' in dir(libarchive):
|
||||
is_legacy = False
|
||||
else:
|
||||
# https://code.google.com/archive/p/python-libarchive
|
||||
is_legacy = True
|
||||
is_ctype = True
|
||||
except ImportError:
|
||||
raise ImportError('Try yum -y install python-libarchive')
|
||||
|
||||
|
||||
class FileExtractor(object):
|
||||
def __init__(self, dest_dir, paths, verbose = False, *args, **kwargs):
|
||||
self.dest_dir = os.path.abspath(os.path.expanduser(dest_dir))
|
||||
self.verbose = verbose # TODO: print file name as extracting? Verbose as argument?
|
||||
self.rpms = {}
|
||||
if 'pkgs' in kwargs and kwargs['pkgs']:
|
||||
self.pkgs = kwargs['pkgs']
|
||||
self.yum_getFiles()
|
||||
if 'rpm_files' in kwargs and kwargs['rpm_files']:
|
||||
self.rpm_files = kwargs['rpm_files']
|
||||
self.getFiles()
|
||||
if '*' in paths:
|
||||
self.paths = None
|
||||
else:
|
||||
self.paths = [re.sub('^', '.', os.path.abspath(i)) for i in paths]
|
||||
|
||||
def yum_getFiles(self):
|
||||
import logging
|
||||
yumloggers = ['yum.filelogging.RPMInstallCallback', 'yum.verbose.Repos', 'yum.verbose.plugin', 'yum.Depsolve',
|
||||
'yum.verbose', 'yum.plugin', 'yum.Repos', 'yum', 'yum.verbose.YumBase', 'yum.filelogging',
|
||||
'yum.verbose.YumPlugins', 'yum.RepoStorage', 'yum.YumBase', 'yum.filelogging.YumBase',
|
||||
'yum.verbose.Depsolve']
|
||||
# This actually silences everything. Nice.
|
||||
# https://stackoverflow.com/a/46716482/733214
|
||||
if not self.verbose:
|
||||
for loggerName in yumloggers:
|
||||
logger = logging.getLogger(loggerName)
|
||||
logger.setLevel(yum_nolog)
|
||||
# http://yum.baseurl.org/api/yum/yum/__init__.html#yumbase
|
||||
yb = yum.YumBase()
|
||||
yb.conf.downloadonly = True
|
||||
yb.conf.downloaddir = os.path.join(self.dest_dir, '.CACHE')
|
||||
yb.conf.quiet = True
|
||||
yb.conf.assumeyes = True
|
||||
for pkg in self.pkgs:
|
||||
try:
|
||||
p = yb.reinstall(name = pkg)
|
||||
except yum.Errors.ReinstallRemoveError:
|
||||
p = yb.install(name = pkg)
|
||||
p = p[0]
|
||||
# I am... not 100% certain on this. Might be a better way?
|
||||
fname = '{0}-{3}-{4}.{1}.rpm'.format(*p.pkgtup)
|
||||
self.rpms[pkg] = os.path.join(yb.conf.downloaddir, fname)
|
||||
yb.buildTransaction()
|
||||
try:
|
||||
yb.processTransaction()
|
||||
except SystemExit:
|
||||
pass # It keeps passing an exit because it's downloading only. Get it together, RH.
|
||||
yb.closeRpmDB()
|
||||
yb.close()
|
||||
return()
|
||||
|
||||
def getFiles(self):
|
||||
for rf in self.rpm_files:
|
||||
# TODO: check if we have the rpm module and if so, rip pkg name from it? use that as key instead of rf?
|
||||
self.rpms[os.path.basename(rf)] = os.path.abspath(os.path.expanduser(rf))
|
||||
return()
|
||||
|
||||
def extractFiles(self):
|
||||
# TODO: globbing or regex on self.paths?
|
||||
# If we have yum, we can, TECHNICALLY, do this with:
|
||||
# http://yum.baseurl.org/api/yum/rpmUtils/miscutils.html#rpmUtils.miscutils.rpm2cpio
|
||||
# But nope. We can't selectively decompress members based on path with rpm2cpio-like funcs.
|
||||
# We keep getting extraction artefacts, at least with legacy libarchive_c, so we use a hammer.
|
||||
_curdir = os.getcwd()
|
||||
_tempdir = tempfile.mkdtemp()
|
||||
os.chdir(_tempdir)
|
||||
for rpm_file in self.rpms:
|
||||
rf = self.rpms[rpm_file]
|
||||
if is_ctype:
|
||||
if not is_legacy:
|
||||
# ctype - extracts to pwd
|
||||
with libarchive.file_reader(rf) as reader:
|
||||
for entry in reader:
|
||||
if self.paths and entry.path not in self.paths:
|
||||
continue
|
||||
if entry.isdir():
|
||||
continue
|
||||
fpath = os.path.join(self.dest_dir, rpm_file, entry.path)
|
||||
if not os.path.isdir(os.path.dirname(fpath)):
|
||||
os.makedirs(os.path.dirname(fpath))
|
||||
with open(fpath, 'wb') as f:
|
||||
for b in entry.get_blocks():
|
||||
f.write(b)
|
||||
else:
|
||||
with libarchive.Archive(rf) as reader:
|
||||
for entry in reader:
|
||||
if (self.paths and entry.pathname not in self.paths) or (entry.isdir()):
|
||||
continue
|
||||
fpath = os.path.join(self.dest_dir, rpm_file, entry.pathname)
|
||||
if not os.path.isdir(os.path.dirname(fpath)):
|
||||
os.makedirs(os.path.dirname(fpath))
|
||||
reader.readpath(fpath)
|
||||
else:
|
||||
# pyEasyArchive/"pypi/libarchive"
|
||||
with lap.file_reader(rf) as reader:
|
||||
for entry in reader:
|
||||
if (self.paths and entry.pathname not in self.paths) or (entry.filetype.IFDIR):
|
||||
continue
|
||||
fpath = os.path.join(self.dest_dir, rpm_file, entry.pathname)
|
||||
if not os.path.isdir(os.path.dirname(fpath)):
|
||||
os.makedirs(os.path.dirname(fpath))
|
||||
with open(fpath, 'wb') as f:
|
||||
for b in entry.get_blocks():
|
||||
f.write(b)
|
||||
os.chdir(_curdir)
|
||||
shutil.rmtree(_tempdir)
|
||||
return()
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = ('This script allows you to extract files for a given package '
|
||||
'{0}without installing any extra packages (such as yum-utils '
|
||||
'for repoquery). '
|
||||
'You must use at least one -r/--rpm{1}.').format(
|
||||
('name(s) ' if has_yum else ''),
|
||||
(', -p/--package, or both' if has_yum else '')))
|
||||
args.add_argument('-d', '--dest-dir',
|
||||
dest = 'dest_dir',
|
||||
default = '/var/tmp/rpm_extract',
|
||||
help = ('The destination for the extracted package file tree (in the format of '
|
||||
'<dest_dir>/<pkg_nm>/<tree>). '
|
||||
'Default: /var/tmp/rpm_extract'))
|
||||
args.add_argument('-r', '--rpm',
|
||||
dest = 'rpm_files',
|
||||
metavar = 'PATH/TO/RPM',
|
||||
action = 'append',
|
||||
default = [],
|
||||
help = ('If specified, use this RPM file instead of the system\'s RPM database. Can be '
|
||||
'specified multiple times'))
|
||||
if has_yum:
|
||||
args.add_argument('-p', '--package',
|
||||
dest = 'pkgs',
|
||||
#nargs = 1,
|
||||
metavar = 'PKGNAME',
|
||||
action = 'append',
|
||||
default = [],
|
||||
help = ('If specified, restrict the list of packages to check against to only this package. '
|
||||
'Can be specified multiple times. HIGHLY RECOMMENDED'))
|
||||
args.add_argument('paths',
|
||||
nargs = '+',
|
||||
metavar = 'path/file/name.ext',
|
||||
help = ('The path(s) of files to extract. If \'*\' is used, extract all files'))
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
if has_argparse:
|
||||
args = vars(parseArgs().parse_args())
|
||||
args['rpm_files'] = [os.path.abspath(os.path.expanduser(i)) for i in args['rpm_files']]
|
||||
if not any((args['rpm_files'], args['pkgs'])):
|
||||
exit(('You have not specified any package files{0}.\n'
|
||||
'This is so dumb we are bailing out.\n').format((' or package names') if has_yum else ''))
|
||||
else:
|
||||
raise RuntimeError('Please yum -y install python-argparse')
|
||||
fe = FileExtractor(**args)
|
||||
fe.extractFiles()
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
171
centos/find_changed_confs.py
Executable file
171
centos/find_changed_confs.py
Executable file
@@ -0,0 +1,171 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Supports CentOS 6.9 and up, untested on lower versions.
|
||||
# Definitely probably won't work on 5.x since they use MD5(?), and 6.5? and up
|
||||
# use SHA256.
|
||||
|
||||
# TODO: add support for .rpm files (like list_files_package.py)
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import datetime
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
from sys import version_info as py_ver
|
||||
try:
|
||||
import rpm
|
||||
except ImportError:
|
||||
exit('This script only runs on RHEL/CentOS/other RPM-based distros.')
|
||||
|
||||
# Thanks, dude!
|
||||
# https://blog.fpmurphy.com/2011/08/programmatically-retrieve-rpm-package-details.html
|
||||
|
||||
class PkgChk(object):
|
||||
def __init__(self, dirpath, symlinks = True, pkgs = None):
|
||||
self.path = dirpath
|
||||
self.pkgs = pkgs
|
||||
self.symlinks = symlinks
|
||||
self.orig_pkgs = copy.deepcopy(pkgs)
|
||||
self.pkgfilemap = {}
|
||||
self.flatfiles = []
|
||||
self.flst = {}
|
||||
self.trns = rpm.TransactionSet()
|
||||
self.getFiles()
|
||||
self.getActualFiles()
|
||||
|
||||
def getFiles(self):
|
||||
if not self.pkgs:
|
||||
for p in self.trns.dbMatch():
|
||||
self.pkgs.append(p['name'])
|
||||
for p in self.pkgs:
|
||||
for pkg in self.trns.dbMatch('name', p):
|
||||
# Get the canonical package name
|
||||
_pkgnm = pkg.sprintf('%{NAME}')
|
||||
self.pkgfilemap[_pkgnm] = {}
|
||||
# Get the list of file(s) and their MD5 hash(es)
|
||||
for f in pkg.fiFromHeader():
|
||||
if not f[0].startswith(self.path):
|
||||
continue
|
||||
if f[12] == '0' * 64:
|
||||
_hash = None
|
||||
else:
|
||||
_hash = f[12]
|
||||
self.pkgfilemap[_pkgnm][f[0]] = {'hash': _hash,
|
||||
'date': f[3],
|
||||
'size': f[1]}
|
||||
self.flatfiles.append(f[0])
|
||||
return()
|
||||
|
||||
def getActualFiles(self):
|
||||
print('Getting a list of local files and their hashes.')
|
||||
print('Please wait...\n')
|
||||
for root, dirs, files in os.walk(self.path):
|
||||
for f in files:
|
||||
_fpath = os.path.join(root, f)
|
||||
_stat = os.stat(_fpath)
|
||||
if _fpath in self.flatfiles:
|
||||
_hash = hashlib.sha256()
|
||||
with open(_fpath, 'rb') as r:
|
||||
for chunk in iter(lambda: r.read(4096), b''):
|
||||
_hash.update(chunk)
|
||||
self.flst[_fpath] = {'hash': str(_hash.hexdigest()),
|
||||
'date': int(_stat.st_mtime),
|
||||
'size': _stat.st_size}
|
||||
else:
|
||||
# It's not even in the package, so don't waste time
|
||||
# with generating hashes or anything else.
|
||||
self.flst[_fpath] = {'hash': None}
|
||||
return()
|
||||
|
||||
def compareFiles(self):
|
||||
for f in self.flst.keys():
|
||||
if f not in self.flatfiles:
|
||||
if not self.orig_pkgs:
|
||||
print(('{0} is not installed by any package.').format(f))
|
||||
else:
|
||||
print(('{0} is not installed by package(s) ' +
|
||||
'specified.').format(f))
|
||||
else:
|
||||
for p in self.pkgs:
|
||||
if f not in self.pkgfilemap[p].keys():
|
||||
continue
|
||||
if (f in self.flst.keys() and
|
||||
(self.flst[f]['hash'] !=
|
||||
self.pkgfilemap[p][f]['hash'])):
|
||||
if not self.symlinks:
|
||||
if ((not self.pkgfilemap[p][f]['hash'])
|
||||
or re.search('^0+$',
|
||||
self.pkgfilemap[p][f]['hash'])):
|
||||
continue
|
||||
r_time = datetime.datetime.fromtimestamp(
|
||||
self.pkgfilemap[p][f]['date'])
|
||||
r_hash = self.pkgfilemap[p][f]['hash']
|
||||
r_size = self.pkgfilemap[p][f]['size']
|
||||
l_time = datetime.datetime.fromtimestamp(
|
||||
self.flst[f]['date'])
|
||||
l_hash = self.flst[f]['hash']
|
||||
l_size = self.flst[f]['size']
|
||||
r_str = ('\n{0} differs per {1}:\n' +
|
||||
'\tRPM:\n' +
|
||||
'\t\tSHA256: {2}\n' +
|
||||
'\t\tBYTES: {3}\n' +
|
||||
'\t\tDATE: {4}').format(f, p,
|
||||
r_hash,
|
||||
r_size,
|
||||
r_time)
|
||||
l_str = ('\tLOCAL:\n' +
|
||||
'\t\tSHA256: {0}\n' +
|
||||
'\t\tBYTES: {1}\n' +
|
||||
'\t\tDATE: {2}').format(l_hash,
|
||||
l_size,
|
||||
l_time)
|
||||
print(r_str)
|
||||
print(l_str)
|
||||
# Now we print missing files
|
||||
for f in sorted(list(set(self.flatfiles))):
|
||||
if not os.path.exists(f):
|
||||
print('{0} was deleted from the filesystem.'.format(f))
|
||||
return()
|
||||
|
||||
def parseArgs():
|
||||
def dirchk(path):
|
||||
p = os.path.abspath(path)
|
||||
if not os.path.isdir(p):
|
||||
raise argparse.ArgumentTypeError(('{0} is not a valid ' +
|
||||
'directory').format(path))
|
||||
return(p)
|
||||
args = argparse.ArgumentParser(description = ('Get a list of config ' +
|
||||
'files that have changed ' +
|
||||
'from the package\'s ' +
|
||||
'defaults'))
|
||||
args.add_argument('-l', '--ignore-symlinks',
|
||||
dest = 'symlinks',
|
||||
action = 'store_false',
|
||||
help = ('If specified, don\'t track files that are ' +
|
||||
'symlinks in the RPM'))
|
||||
args.add_argument('-p', '--package',
|
||||
dest = 'pkgs',
|
||||
#nargs = 1,
|
||||
metavar = 'PKGNAME',
|
||||
action = 'append',
|
||||
default = [],
|
||||
help = ('If specified, restrict the list of ' +
|
||||
'packages to check against to only this ' +
|
||||
'package. Can be specified multiple times. ' +
|
||||
'HIGHLY RECOMMENDED'))
|
||||
args.add_argument('dirpath',
|
||||
type = dirchk,
|
||||
metavar = 'path/to/directory',
|
||||
help = ('The path to the directory containing the ' +
|
||||
'configuration files to check against (e.g. ' +
|
||||
'"/etc/ssh")'))
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
args = vars(parseArgs().parse_args())
|
||||
p = PkgChk(**args)
|
||||
p.compareFiles()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
92
centos/isomirror_sort.py
Executable file
92
centos/isomirror_sort.py
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# requires python lxml module as well
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
from urllib.request import urlopen
|
||||
from urllib.parse import urlparse
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
# The page that contains the list of (authoritative ISO) mirrors
|
||||
URL = 'http://isoredirect.centos.org/centos/7/isos/x86_64/'
|
||||
# The formatting on the page is pretty simple - no divs, etc. - so we need to
|
||||
# blacklist some links we pull in.
|
||||
blacklisted_link_URLs = ('http://bittorrent.com/',
|
||||
'http://wiki.centos.org/AdditionalResources/Repositories')
|
||||
|
||||
mirrors = {}
|
||||
|
||||
dflt_ports = {'https': 443, # unlikely. "HTTPS is currently not used for mirrors." per https://wiki.centos.org/HowTos/CreatePublicMirrors
|
||||
'http': 80, # most likely.
|
||||
'ftp': 21,
|
||||
'rsync': 873}
|
||||
|
||||
def getMirrors():
|
||||
mirrors = []
|
||||
with urlopen(URL) as u:
|
||||
pg_src = u.read().decode('utf-8')
|
||||
soup = BeautifulSoup(pg_src, 'lxml')
|
||||
for tag in soup.find_all('br')[4].next_siblings:
|
||||
if tag.name == 'a' and tag['href'] not in blacklisted_link_URLs:
|
||||
mirrors.append(tag['href'].strip())
|
||||
return(mirrors)
|
||||
|
||||
def getHosts(mirror):
|
||||
port = None
|
||||
fqdn = None
|
||||
login = ''
|
||||
# "mirror" should be a base URI of the CentOS mirror path.
|
||||
# mirrors.centos.org is pointless to use for this!
|
||||
#url = os.path.join(mirror, 'sha256sum.txt.asc')
|
||||
uri = urlparse(mirror)
|
||||
spl_dom = uri.netloc.split(':')
|
||||
if len(spl_dom) >= 2: # more complex URI
|
||||
if len(spl_dom) == 2: # probably domain:port?
|
||||
try:
|
||||
port = int(spl_dom[-1:])
|
||||
except ValueError: # ooookay, so it's not domain:port, it's a user:pass@
|
||||
if '@' in uri.netloc:
|
||||
auth = uri.netloc.split('@')
|
||||
fqdn = auth[1]
|
||||
login = auth[0] + '@'
|
||||
elif len(spl_dom) > 2: # even more complex URI, which ironically makes parsing easier
|
||||
auth = uri.netloc.split('@')
|
||||
fqdn = spl_dom[1].split('@')[1]
|
||||
port = int(spl_dom[-1:])
|
||||
login = auth[0] + '@'
|
||||
# matches missing values and simple URI. like, 99%+ of mirror URIs being passed.
|
||||
if not fqdn:
|
||||
fqdn = uri.netloc
|
||||
if not port:
|
||||
port = dflt_ports[uri.scheme]
|
||||
mirrors[fqdn] = {'proto': uri.scheme,
|
||||
'port': port,
|
||||
'path': uri.path,
|
||||
'auth': login}
|
||||
return()
|
||||
|
||||
def getSpeeds():
|
||||
for fqdn in mirrors.keys():
|
||||
start = time.time()
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.connect((fqdn, mirrors[fqdn]['port']))
|
||||
mirrors[fqdn]['time'] = time.time() - start
|
||||
sock.close()
|
||||
return()
|
||||
|
||||
def main():
|
||||
for m in getMirrors():
|
||||
getHosts(m)
|
||||
getSpeeds()
|
||||
ranking = sorted(mirrors.keys(), key = lambda k: (mirrors[k]['time']))
|
||||
for i in ranking:
|
||||
str_port = ':' + str(mirrors[i]['port'])
|
||||
if mirrors[i]['port'] in dflt_ports.values():
|
||||
str_port = ''
|
||||
print('{proto}://{auth}{0}{p}{path}'.format(i,
|
||||
**mirrors[i],
|
||||
p = str_port))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
155
centos/list_files_package.py
Executable file
155
centos/list_files_package.py
Executable file
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Supports CentOS 6.9 and up, untested on lower versions.
|
||||
# Lets you get a list of files for a given package name(s) without installing
|
||||
# any extra packages (such as yum-utils for repoquery).
|
||||
|
||||
# NOTE: If you're on CentOS 6.x, since it uses such an ancient version of python you need to either install
|
||||
# python-argparse OR just resign to using it for all packages with none of the features.
|
||||
try:
|
||||
import argparse
|
||||
has_argparse = True
|
||||
except ImportError:
|
||||
has_argparse = False
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
# For when CentOS/RHEL switch to python 3 by default (if EVER).
|
||||
import sys
|
||||
pyver = sys.version_info
|
||||
try:
|
||||
import rpm
|
||||
except ImportError:
|
||||
exit('This script only runs on the system-provided Python on RHEL/CentOS/other RPM-based distros.')
|
||||
|
||||
def all_pkgs():
|
||||
# Gets a list of all packages.
|
||||
pkgs = []
|
||||
trns = rpm.TransactionSet()
|
||||
for p in trns.dbMatch():
|
||||
pkgs.append(p['name'])
|
||||
pkgs = list(sorted(set(pkgs)))
|
||||
return(pkgs)
|
||||
|
||||
class FileGetter(object):
|
||||
def __init__(self, symlinks = True, verbose = False, *args, **kwargs):
|
||||
self.symlinks = symlinks
|
||||
self.verbose = verbose
|
||||
self.trns = rpm.TransactionSet()
|
||||
self.files = {}
|
||||
for p in kwargs['pkgs']:
|
||||
if p not in self.files.keys():
|
||||
self.getFiles(p)
|
||||
if kwargs['rpm_files']:
|
||||
self.getLocalFiles(kwargs['rpm_files'])
|
||||
|
||||
def getLocalFiles(self, rpm_files):
|
||||
# Needed because the rpm module can't handle arbitrary rpm files??? If it can, someone let me know.
|
||||
# According to http://rpm5.org/docs/api/classRpmhdr.html#_details I can.
|
||||
import yum
|
||||
for r in rpm_files:
|
||||
pkg = yum.YumLocalPackage(ts = self.trns,
|
||||
filename = r)
|
||||
_pkgnm = pkg.hdr.sprintf('%{NAME}')
|
||||
if _pkgnm in self.files:
|
||||
continue
|
||||
if self.verbose:
|
||||
self.files[_pkgnm] = {}
|
||||
else:
|
||||
self.files[_pkgnm] = []
|
||||
for f in pkg.hdr.fiFromHeader():
|
||||
_symlink = (True if re.search('^0+$', f[12]) else False)
|
||||
if self.verbose:
|
||||
if _symlink:
|
||||
if self.symlinks:
|
||||
self.files[_pkgnm][f[0]] = '(symbolic link or directory)'
|
||||
continue
|
||||
self.files[_pkgnm][f[0]] = f[12]
|
||||
else:
|
||||
# Skip if it is a symlink but they aren't enabled
|
||||
if _symlink and not self.symlinks:
|
||||
continue
|
||||
else:
|
||||
self.files[_pkgnm].append(f[0])
|
||||
self.files[_pkgnm].sort()
|
||||
return()
|
||||
|
||||
def getFiles(self, pkgnm):
|
||||
for pkg in self.trns.dbMatch('name', pkgnm):
|
||||
# The canonical package name
|
||||
_pkgnm = pkg.sprintf('%{NAME}')
|
||||
# Return just a list of files, or a dict of filepath:hash if verbose is enabled.
|
||||
if self.verbose:
|
||||
self.files[_pkgnm] = {}
|
||||
else:
|
||||
self.files[_pkgnm] = []
|
||||
for f in pkg.fiFromHeader():
|
||||
_symlink = (True if re.search('^0+$', f[12]) else False)
|
||||
if self.verbose:
|
||||
if _symlink:
|
||||
if self.symlinks:
|
||||
self.files[_pkgnm][f[0]] = '(symbolic link)'
|
||||
continue
|
||||
self.files[_pkgnm][f[0]] = f[12]
|
||||
else:
|
||||
# Skip if it is a symlink but they aren't enabled
|
||||
if _symlink and not self.symlinks:
|
||||
continue
|
||||
else:
|
||||
self.files[_pkgnm].append(f[0])
|
||||
self.files[_pkgnm].sort()
|
||||
return()
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = ('This script allows you get a list of files for a given package '
|
||||
'name(s) without installing any extra packages (such as yum-utils '
|
||||
'for repoquery). It is highly recommended to use at least one '
|
||||
'-r/--rpm, -p/--package, or both.'))
|
||||
args.add_argument('-l', '--ignore-symlinks',
|
||||
dest = 'symlinks',
|
||||
action = 'store_false',
|
||||
help = ('If specified, don\'t report files that are symlinks in the RPM'))
|
||||
args.add_argument('-v', '--verbose',
|
||||
dest = 'verbose',
|
||||
action = 'store_true',
|
||||
help = ('If specified, include the hashes of the files'))
|
||||
args.add_argument('-r', '--rpm',
|
||||
dest = 'rpm_files',
|
||||
metavar = 'PATH/TO/RPM',
|
||||
action = 'append',
|
||||
default = [],
|
||||
help = ('If specified, use this RPM file instead of the system\'s RPM database. Can be '
|
||||
'specified multiple times'))
|
||||
args.add_argument('-p', '--package',
|
||||
dest = 'pkgs',
|
||||
#nargs = 1,
|
||||
metavar = 'PKGNAME',
|
||||
action = 'append',
|
||||
default = [],
|
||||
help = ('If specified, restrict the list of packages to check against to only this package. Can '
|
||||
'be specified multiple times. HIGHLY RECOMMENDED'))
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
if has_argparse:
|
||||
args = vars(parseArgs().parse_args())
|
||||
args['rpm_files'] = [os.path.abspath(os.path.expanduser(i)) for i in args['rpm_files']]
|
||||
if not any((args['rpm_files'], args['pkgs'])):
|
||||
prompt_str = ('You have not specified any package names.\nThis means we will get file lists for EVERY SINGLE '
|
||||
'installed package.\nThis is a LOT of output and can take a few moments.\nIf this was a mistake, '
|
||||
'you can hit ctrl-c now.\nOtherwise, hit the enter key to continue.\n')
|
||||
sys.stderr.write(prompt_str)
|
||||
if pyver.major >= 3:
|
||||
input()
|
||||
elif pyver.major == 2:
|
||||
raw_input()
|
||||
args['pkgs'] = all_pkgs()
|
||||
else:
|
||||
args = {'pkgs': all_pkgs(),
|
||||
'rpm_files': []}
|
||||
gf = FileGetter(**args)
|
||||
print(json.dumps(gf.files, indent = 4))
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
192
centos/list_pkgs.py
Executable file
192
centos/list_pkgs.py
Executable file
@@ -0,0 +1,192 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Supports CentOS 6.9 and up, untested on lower versions.
|
||||
# Lets you dump a list of installed packages for backup purposes
|
||||
# Reference: https://blog.fpmurphy.com/2011/08/programmatically-retrieve-rpm-package-details.html
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import datetime
|
||||
import io
|
||||
import re
|
||||
import sys
|
||||
try:
|
||||
import yum
|
||||
except ImportError:
|
||||
exit('This script only runs on RHEL/CentOS/other yum-based distros.')
|
||||
# Detect RH version.
|
||||
ver_re = re.compile('^(centos( linux)? release) ([0-9\.]+) .*$', re.IGNORECASE)
|
||||
# distro module isn't stdlib, and platform.linux_distribution() (AND platform.distro()) are both deprecated in 3.7.
|
||||
# So we get hacky.
|
||||
with open('/etc/redhat-release', 'r') as f:
|
||||
ver = [int(i) for i in ver_re.sub('\g<3>', f.read().strip()).split('.')]
|
||||
import pprint
|
||||
|
||||
repo_re = re.compile('^@')
|
||||
|
||||
class PkgIndexer(object):
|
||||
def __init__(self, **args):
|
||||
self.pkgs = []
|
||||
self.args = args
|
||||
self.yb = yum.YumBase()
|
||||
# Make the Yum API shut the heck up.
|
||||
self.yb.preconf.debuglevel = 0
|
||||
self.yb.preconf.errorlevel = 0
|
||||
self._pkgs = self._pkglst()
|
||||
self._build_pkginfo()
|
||||
if self.args['report'] == 'csv':
|
||||
self._gen_csv()
|
||||
elif self.args['report'] == 'json':
|
||||
self._gen_json()
|
||||
elif self.args['report'] == 'xml':
|
||||
self._gen_xml()
|
||||
|
||||
def _pkglst(self):
|
||||
pkgs = []
|
||||
# Get the list of packages
|
||||
if self.args['reason'] != 'all':
|
||||
for p in sorted(self.yb.rpmdb.returnPackages()):
|
||||
if 'reason' not in p.yumdb_info:
|
||||
continue
|
||||
reason = getattr(p.yumdb_info, 'reason')
|
||||
if reason == self.args['reason']:
|
||||
pkgs.append(p)
|
||||
else:
|
||||
pkgs = sorted(self.yb.rpmdb.returnPackages())
|
||||
return(pkgs)
|
||||
|
||||
def _build_pkginfo(self):
|
||||
for p in self._pkgs:
|
||||
_pkg = {'name': p.name,
|
||||
'desc': p.summary,
|
||||
'version': p.ver,
|
||||
'release': p.release,
|
||||
'arch': p.arch,
|
||||
'built': datetime.datetime.fromtimestamp(p.buildtime),
|
||||
'installed': datetime.datetime.fromtimestamp(p.installtime),
|
||||
'repo': repo_re.sub('', p.ui_from_repo),
|
||||
'sizerpm': p.packagesize,
|
||||
'sizedisk': p.installedsize}
|
||||
self.pkgs.append(_pkg)
|
||||
|
||||
def _gen_csv(self):
|
||||
if self.args['plain']:
|
||||
_fields = ['name']
|
||||
else:
|
||||
_fields = ['name', 'version', 'release', 'arch', 'desc', 'built',
|
||||
'installed', 'repo', 'sizerpm', 'sizedisk']
|
||||
import csv
|
||||
if sys.hexversion >= 0x30000f0:
|
||||
_buf = io.StringIO()
|
||||
else:
|
||||
_buf = io.BytesIO()
|
||||
_csv = csv.writer(_buf, delimiter = self.args['sep_char'])
|
||||
if self.args['header']:
|
||||
if self.args['plain']:
|
||||
_csv.writerow(['Name'])
|
||||
else:
|
||||
_csv.writerow(['Name', 'Version', 'Release', 'Architecture', 'Description', 'Build Time',
|
||||
'Install Time', 'Repository', 'Size (RPM)', 'Size (On-Disk)'])
|
||||
_csv = csv.DictWriter(_buf, fieldnames = _fields, extrasaction = 'ignore', delimiter = self.args['sep_char'])
|
||||
for p in self.pkgs:
|
||||
_csv.writerow(p)
|
||||
_buf.seek(0, 0)
|
||||
self.report = _buf.read().replace('\r\n', '\n')
|
||||
return()
|
||||
|
||||
def _gen_json(self):
|
||||
import json
|
||||
if self.args['plain']:
|
||||
self.report = json.dumps([p['name'] for p in self.pkgs], indent = 4)
|
||||
else:
|
||||
self.report = json.dumps(self.pkgs, default = str, indent = 4)
|
||||
return()
|
||||
|
||||
def _gen_xml(self):
|
||||
from lxml import etree
|
||||
_xml = etree.Element('packages')
|
||||
for p in self.pkgs:
|
||||
_attrib = copy.deepcopy(p)
|
||||
for i in ('built', 'installed', 'sizerpm', 'sizedisk'):
|
||||
_attrib[i] = str(_attrib[i])
|
||||
if self.args['plain']:
|
||||
_pkg = etree.Element('package', attrib = {'name': p['name']})
|
||||
else:
|
||||
_pkg = etree.Element('package', attrib = _attrib)
|
||||
_xml.append(_pkg)
|
||||
#del(_attrib['name']) # I started to make it a more complex, nested structure... is that necessary?
|
||||
if self.args['header']:
|
||||
self.report = etree.tostring(_xml, pretty_print = True, xml_declaration = True, encoding = 'UTF-8')
|
||||
else:
|
||||
self.report = etree.tostring(_xml, pretty_print = True)
|
||||
return()
|
||||
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser(description = ('This script lets you dump the list of installed packages'))
|
||||
args.add_argument('-p', '--plain',
|
||||
dest = 'plain',
|
||||
action = 'store_true',
|
||||
help = 'If specified, only create a list of plain package names (i.e. don\'t include extra '
|
||||
'information)')
|
||||
args.add_argument('-n', '--no-header',
|
||||
dest = 'header',
|
||||
action = 'store_false',
|
||||
help = 'If specified, do not print column headers/XML headers')
|
||||
args.add_argument('-s', '--separator',
|
||||
dest = 'sep_char',
|
||||
default = ',',
|
||||
help = 'The separator used to split fields in the output (default: ,) (only used for CSV '
|
||||
'reports)')
|
||||
rprt = args.add_mutually_exclusive_group()
|
||||
rprt.add_argument('-c', '--csv',
|
||||
dest = 'report',
|
||||
default = 'csv',
|
||||
action = 'store_const',
|
||||
const = 'csv',
|
||||
help = 'Generate CSV output (this is the default). See -n/--no-header, -s/--separator')
|
||||
rprt.add_argument('-x', '--xml',
|
||||
dest = 'report',
|
||||
default = 'csv',
|
||||
action = 'store_const',
|
||||
const = 'xml',
|
||||
help = 'Generate XML output (requires the LXML module: yum install python-lxml)')
|
||||
rprt.add_argument('-j', '--json',
|
||||
dest = 'report',
|
||||
default = 'csv',
|
||||
action = 'store_const',
|
||||
const = 'json',
|
||||
help = 'Generate JSON output')
|
||||
rsn = args.add_mutually_exclusive_group()
|
||||
rsn.add_argument('-a', '--all',
|
||||
dest = 'reason',
|
||||
default = 'all',
|
||||
action = 'store_const',
|
||||
const = 'all',
|
||||
help = ('Parse/report all packages that are currently installed. '
|
||||
'Conflicts with -u/--user and -d/--dep. '
|
||||
'This is the default'))
|
||||
rsn.add_argument('-u', '--user',
|
||||
dest = 'reason',
|
||||
default = 'all',
|
||||
action = 'store_const',
|
||||
const = 'user',
|
||||
help = ('Parse/report only packages which were explicitly installed. '
|
||||
'Conflicts with -a/--all and -d/--dep'))
|
||||
rsn.add_argument('-d', '--dep',
|
||||
dest = 'reason',
|
||||
default = 'all',
|
||||
action = 'store_const',
|
||||
const = 'dep',
|
||||
help = ('Parse/report only packages which were installed to satisfy a dependency. '
|
||||
'Conflicts with -a/--all and -u/--user'))
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
args = vars(parseArgs().parse_args())
|
||||
p = PkgIndexer(**args)
|
||||
print(p.report)
|
||||
return()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
119
git/remotehooks.py
Executable file
119
git/remotehooks.py
Executable file
@@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import ast # Needed for localhost cmd strings
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
modules = {}
|
||||
try:
|
||||
import git
|
||||
modules['git'] = True
|
||||
except ImportError:
|
||||
import subprocess
|
||||
modules['git'] = False
|
||||
try:
|
||||
import paramiko
|
||||
import socket
|
||||
modules['ssh'] = True
|
||||
except ImportError:
|
||||
modules['ssh'] = False
|
||||
|
||||
|
||||
|
||||
repos = {}
|
||||
repos['bdisk'] = {'remotecmds': {'g.rainwreck.com': {'gitbot': {'cmds': ['git -C /var/lib/gitbot/clonerepos/BDisk pull',
|
||||
'git -C /var/lib/gitbot/clonerepos/BDisk pull --tags',
|
||||
'asciidoctor /var/lib/gitbot/clonerepos/BDisk/docs/manual/HEAD.adoc -o /srv/http/bdisk/index.html']}}}}
|
||||
repos['test'] = {'remotecmds': {'g.rainwreck.com': {'gitbot': {'cmds': ['echo $USER']}}}}
|
||||
repos['games-site'] = {'remotecmds': {'games.square-r00t.net':
|
||||
{'gitbot':
|
||||
{'cmds': ['cd /srv/http/games-site && git pull']}}}}
|
||||
repos['aif-ng'] = {'cmds': [['asciidoctor', '/opt/git/repo.checkouts/aif-ng/docs/README.adoc', '-o', '/srv/http/aif/index.html']]}
|
||||
|
||||
def execHook(gitinfo = False):
|
||||
if not gitinfo:
|
||||
gitinfo = getGitInfo()
|
||||
repo = gitinfo['repo'].lower()
|
||||
print('Executing hooks for {0}:{1}...'.format(repo, gitinfo['branch']))
|
||||
print('This commit: {0}\nLast commit: {1}'.format(gitinfo['currev'], gitinfo['oldrev']))
|
||||
# Execute local commands first
|
||||
if 'cmds' in repos[repo].keys():
|
||||
for cmd in repos[repo]['cmds']:
|
||||
print('\tExecuting {0}...'.format(' '.join(cmd)))
|
||||
subprocess.call(cmd)
|
||||
if 'remotecmds' in repos[repo].keys():
|
||||
for host in repos[repo]['remotecmds'].keys():
|
||||
if 'port' in repos[repo]['remotecmds'][host].keys():
|
||||
port = int(repos[repo]['remotecmds'][host]['port'])
|
||||
else:
|
||||
port = 22
|
||||
for user in repos[repo]['remotecmds'][host].keys():
|
||||
print('{0}@{1}:'.format(user, host))
|
||||
if paramikomodule:
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(host, username = user, port = port)
|
||||
try:
|
||||
for cmd in repos[repo]['remotecmds'][host][user]['cmds']:
|
||||
print('\tExecuting \'{0}\'...'.format(cmd))
|
||||
stdin, stdout, stderr = ssh.exec_command(cmd)
|
||||
stdout = stdout.read().decode('utf-8')
|
||||
stderr = stderr.read().decode('utf-8')
|
||||
print(stdout)
|
||||
if stderr != '':
|
||||
print(stderr)
|
||||
except paramiko.AuthenticationException:
|
||||
print('({0}@{1}) AUTHENTICATION FAILED!'.format(user, host))
|
||||
except paramiko.BadHostKeyException:
|
||||
print('({0}@{1}) INCORRECT HOSTKEY!'.format(user, host))
|
||||
except paramiko.SSHException:
|
||||
print('({0}@{1}) FAILED TO ESTABLISH SSH!'.format(user, host))
|
||||
except socket.error:
|
||||
print('({0}@{1}) SOCKET CONNECTION FAILURE! (DNS, timeout/firewall, etc.)'.format(user, host))
|
||||
else:
|
||||
for cmd in repos[repo]['remotecmds'][host][user]['cmds']:
|
||||
try:
|
||||
print('\tExecuting \'{0}\'...'.format(cmd))
|
||||
subprocess.call(['ssh', '{0}@{1}'.format(user, host), cmd])
|
||||
except:
|
||||
print('({0}@{1}) An error occurred!'.format(user, host))
|
||||
|
||||
def getGitInfo():
|
||||
refs = sys.argv[1].split('/')
|
||||
gitinfo = {}
|
||||
if refs[1] == 'tags':
|
||||
gitinfo['branch'] = False
|
||||
gitinfo['tag'] = refs[2]
|
||||
elif refs[1] == 'heads':
|
||||
gitinfo['branch'] = refs[2]
|
||||
gitinfo['tag'] = False
|
||||
gitinfo['repo'] = os.environ['GL_REPO']
|
||||
gitinfo['user'] = os.environ['GL_USER']
|
||||
clientinfo = os.environ['SSH_CONNECTION'].split()
|
||||
gitinfo['ssh'] = {'client': {'ip': clientinfo[0], 'port': clientinfo[1]},
|
||||
'server': {'ip': clientinfo[2], 'port': clientinfo[3]},
|
||||
'user': os.environ['USER']
|
||||
}
|
||||
if os.environ['GIT_DIR'] == '.':
|
||||
gitinfo['dir'] = os.environ['PWD']
|
||||
else:
|
||||
#gitinfo['dir'] = os.path.join(os.environ['GL_REPO_BASE'], gitinfo['repo'], '.git')
|
||||
gitinfo['dir'] = os.path.abspath(os.path.expanduser(os.environ['GIT_DIR']))
|
||||
if gitmodule:
|
||||
# This is preferred, because it's a lot more faster and a lot more flexible.
|
||||
#https://gitpython.readthedocs.io/en/stable
|
||||
gitobj = git.Repo(gitinfo['dir'])
|
||||
commits = list(gitobj.iter_commits(gitobj.head.ref.name, max_count = 2))
|
||||
else:
|
||||
commits = subprocess.check_output(['git', 'rev-parse', 'HEAD..HEAD^1']).decode('utf-8').splitlines()
|
||||
gitinfo['oldrev'] = re.sub('^\^', '', commits[1])
|
||||
gitinfo['currev'] = re.sub('^\^', '', commits[0])
|
||||
return(gitinfo)
|
||||
#sys.exit(0)
|
||||
|
||||
def main():
|
||||
execHook()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
69
git/remotehooks2.py
Executable file
69
git/remotehooks2.py
Executable file
@@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
# Can we use paramiko for remotecmds?
|
||||
try:
|
||||
import paramiko
|
||||
import socket
|
||||
has_ssh = True
|
||||
except ImportError:
|
||||
has_ssh = False
|
||||
# Can we use the python git module?
|
||||
try:
|
||||
import git # "python-gitpython" in Arch; https://github.com/gitpython-developers/gitpython
|
||||
has_git = True
|
||||
except ImportError:
|
||||
has_git = False
|
||||
|
||||
|
||||
class repoHooks(object):
|
||||
def __init__(self):
|
||||
with open(os.path.join(os.environ['HOME'],
|
||||
'.gitolite',
|
||||
'local',
|
||||
'hooks',
|
||||
'repo-specific',
|
||||
'githooks.json'), 'r') as f:
|
||||
self.cfg = json.loads(f.read())
|
||||
self.repos = list(self.cfg.keys())
|
||||
self.env = os.environ.copy()
|
||||
if 'GIT_DIR' in self.env.keys():
|
||||
del(self.env['GIT_DIR'])
|
||||
self.repo = self.env['GL_REPO']
|
||||
|
||||
def remoteExec(self):
|
||||
for _host in self.repos[self.repo]['remotecmds'].keys():
|
||||
if len(_host.split(':')) == 2:
|
||||
_server, _port = [i.strip() for i in _host.split(':')]
|
||||
else:
|
||||
_port = 22
|
||||
_server = _host.split(':')[0]
|
||||
_h = self.repos[self.repo]['remotecmds'][_host]
|
||||
for _user in _h.keys():
|
||||
_u = _h[_user]
|
||||
if has_ssh:
|
||||
_ssh = paramiko.SSHClient()
|
||||
_ssh.load_system_host_keys()
|
||||
_ssh.missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
_ssh.connect(_server,
|
||||
int(_port),
|
||||
_user)
|
||||
for _cmd in _h.keys():
|
||||
pass # DO STUFF HERE
|
||||
else:
|
||||
return() # no-op; no paramiko
|
||||
|
||||
def localExec(self):
|
||||
pass
|
||||
|
||||
def main():
|
||||
h = repoHooks()
|
||||
if h.repo not in h.repos:
|
||||
return()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
27
git/sample.githooks.json
Normal file
27
git/sample.githooks.json
Normal file
@@ -0,0 +1,27 @@
|
||||
# remotehooks.py should go in your <gitolite repo>/local/hooks/repo-specific directory,
|
||||
# along with the (uncommented) format of this file configured for your particular hooks
|
||||
# "cmds" is a list of commands performed locally on the gitolite server,
|
||||
# "remotecmds" contains a recursive directory of commands to run remotely
|
||||
|
||||
{
|
||||
"<REPO_NAME>": {
|
||||
"remotecmds": {
|
||||
"<HOST_OR_IP_ADDRESS>": {
|
||||
"<USER>": {
|
||||
"cmds": [
|
||||
"<COMMAND_1>",
|
||||
"<COMMAND_2>"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"<REPO2_NAME>": {
|
||||
"cmds": [
|
||||
[
|
||||
"<LOCAL_COMMAND_1>",
|
||||
"<LOCAL_COMMAND_2>"
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
285
gpg/keystats.py
Executable file
285
gpg/keystats.py
Executable file
@@ -0,0 +1,285 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Get various information about an SKS keyserver from its status page
|
||||
# without opening a browser.
|
||||
# Requires BeautifulSoup4 and (optional but recommended) the lxml module.
|
||||
|
||||
# stdlib
|
||||
import argparse
|
||||
import datetime
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
from urllib.request import urlopen, urlparse
|
||||
# pypi/pip
|
||||
from bs4 import BeautifulSoup
|
||||
try:
|
||||
import lxml
|
||||
bs_parser = 'lxml'
|
||||
except ImportError:
|
||||
bs_parser = 'html.parser'
|
||||
|
||||
socket_orig = socket.getaddrinfo
|
||||
|
||||
def ForceProtov4(host, port, family = 0, socktype = 0, proto = 0,
|
||||
flags = 0):
|
||||
return(socket_orig(host, port, socket.AF_INET, socktype, proto, flags))
|
||||
|
||||
def ForceProtov6(host, port, family = 0, socktype = 0, proto = 0,
|
||||
flags = 0):
|
||||
return(socket_orig(host, port, socket.AF_INET6, socktype, proto, flags))
|
||||
|
||||
class KeyStats(object):
|
||||
def __init__(self, server, port = None, tls = True, netproto = None,
|
||||
proto = 'http', output = 'py', verbose = True):
|
||||
self.stats = {'server': {},
|
||||
'keys': 0}
|
||||
if verbose:
|
||||
self.stats['peers'] = {}
|
||||
self.stats['histograms'] = {}
|
||||
# Currently I only support scraping the stats page of the keyserver.
|
||||
# TODO: Can I do this directly via HKP/HKPS? Is there a python module
|
||||
# for it?
|
||||
self.port_dflts = {'http': {True: 443,
|
||||
False: 80,
|
||||
None: 80}}
|
||||
self.server = server
|
||||
self.tls = tls
|
||||
self.netproto = netproto
|
||||
# We need to do some... ugly, hacky stuff to *force* a particular
|
||||
# network stack (IPv4 vs. IPv6).
|
||||
# https://stackoverflow.com/a/6319043/733214
|
||||
if self.netproto:
|
||||
if self.netproto == 'ipv6':
|
||||
socket.getaddrinfo = ForceProtov6
|
||||
elif self.netproto == 'ipv4':
|
||||
socket.getaddrinfo = ForceProtov4
|
||||
self.verbose = verbose
|
||||
self.output = output
|
||||
self.proto = proto.lower()
|
||||
# TODO: would need to add add'l protocol support here.
|
||||
if self.proto in ('http', 'https'):
|
||||
self.proto = 'http'
|
||||
if not port:
|
||||
self.port = self.port_dflts[self.proto][self.tls]
|
||||
else:
|
||||
self.port = int(port)
|
||||
if self.proto == 'http':
|
||||
self.getStatsPage()
|
||||
|
||||
def getStatsPage(self):
|
||||
if self.proto is not 'http':
|
||||
# Something went wrong; this function shouldn't be used for
|
||||
# non-http.
|
||||
return()
|
||||
_str_map = {'Hostname': 'name',
|
||||
'Nodename': 'hostname',
|
||||
'Version': 'version',
|
||||
'Server contact': 'contact',
|
||||
'HTTP port': 'hkp_port',
|
||||
'Recon port': 'recon_port',
|
||||
'Debug level': 'debug'}
|
||||
_uri = 'pks/lookup?op=stats'
|
||||
_url = '{0}://{1}:{2}/{3}'.format(('https' if self.tls else 'http'),
|
||||
self.server,
|
||||
self.port,
|
||||
_uri)
|
||||
with urlopen(_url) as u:
|
||||
_webdata = u.read()
|
||||
_soup = BeautifulSoup(_webdata, bs_parser)
|
||||
for e in _soup.find_all('h2'):
|
||||
# General server info
|
||||
if e.text == 'Settings':
|
||||
t = e.find_next('table',
|
||||
attrs = {'summary': 'Keyserver Settings'})
|
||||
for r in t.find_all('tr'):
|
||||
h = None
|
||||
row = [re.sub(':$', '',
|
||||
i.text.strip()) for i in r.find_all('td')]
|
||||
h = row[0]
|
||||
if h in _str_map.keys():
|
||||
if _str_map[h] in ('debug', 'hkp_port', 'recon_port'):
|
||||
self.stats['server'][_str_map[h]] = int(row[1])
|
||||
elif _str_map[h] == 'version':
|
||||
self.stats['server'][_str_map[h]] = tuple(
|
||||
row[1].split('.'))
|
||||
else:
|
||||
self.stats['server'][_str_map[h]] = row[1]
|
||||
# "Gossip" (recon) peers list
|
||||
elif e.text == 'Gossip Peers' and self.verbose:
|
||||
self.stats['peers']['recon'] = []
|
||||
t = e.find_next('table',
|
||||
attrs = {'summary': 'Gossip Peers'})
|
||||
for r in t.find_all('tr'):
|
||||
_peer = list(r.children)[0].text.split()
|
||||
# A tuple consisting of host/name, port.
|
||||
self.stats['peers']['recon'].append((_peer[0],
|
||||
int(_peer[1])))
|
||||
# Mailsync peers list
|
||||
elif e.text == 'Outgoing Mailsync Peers' and self.verbose:
|
||||
self.stats['peers']['mailsync'] = []
|
||||
t = e.find_next('table', attrs = {'summary': 'Mailsync Peers'})
|
||||
for r in t.find_all('tr'):
|
||||
_address = list(r.children)[0].text.strip()
|
||||
self.stats['peers']['mailsync'].append(_address)
|
||||
# Number of keys
|
||||
elif e.text == 'Statistics':
|
||||
self.stats['keys'] = int(e.find_next('p').text.split()[-1])
|
||||
# Histograms
|
||||
for e in _soup.find_all('h3'):
|
||||
# Dailies
|
||||
if e.text == 'Daily Histogram' and self.verbose:
|
||||
_dfmt = '%Y-%m-%d'
|
||||
t = e.find_next('table', attrs = {'summary': 'Statistics'})
|
||||
for r in t.find_all('tr'):
|
||||
row = [i.text.strip() for i in r.find_all('td')]
|
||||
if row[0] == 'Time':
|
||||
continue
|
||||
_date = datetime.datetime.strptime(row[0], _dfmt)
|
||||
_new = int(row[1])
|
||||
_updated = int(row[2])
|
||||
# JSON can't convert datetime objects to strings
|
||||
# automatically like PyYAML can.
|
||||
if self.output == 'json':
|
||||
k = str(_date)
|
||||
else:
|
||||
k = _date
|
||||
self.stats['histograms'][k] = {'total': {'new': _new,
|
||||
'updated': \
|
||||
_updated},
|
||||
'hourly': {}}
|
||||
# Hourlies
|
||||
elif e.text == 'Hourly Histogram' and self.verbose:
|
||||
_dfmt = '%Y-%m-%d %H'
|
||||
t = e.find_next('table', attrs = {'summary': 'Statistics'})
|
||||
for r in t.find_all('tr'):
|
||||
row = [i.text.strip() for i in r.find_all('td')]
|
||||
if row[0] == 'Time':
|
||||
continue
|
||||
_date = datetime.datetime.strptime(row[0], _dfmt)
|
||||
_new = int(row[1])
|
||||
_updated = int(row[2])
|
||||
_day = datetime.datetime(year = _date.year,
|
||||
month = _date.month,
|
||||
day = _date.day)
|
||||
if self.output == 'json':
|
||||
k1 = str(_day)
|
||||
k2 = str(_date)
|
||||
else:
|
||||
k1 = _day
|
||||
k2 = _date
|
||||
self.stats['histograms'][k1]['hourly'][k2] = {'new': _new,
|
||||
'updated': \
|
||||
_updated}
|
||||
return()
|
||||
|
||||
def print(self):
|
||||
if self.output == 'json':
|
||||
import json
|
||||
print(json.dumps(self.stats,
|
||||
#indent = 4,
|
||||
default = str))
|
||||
elif self.output == 'yaml':
|
||||
has_yaml = False
|
||||
if 'YAML_MOD' in os.environ.keys():
|
||||
_mod = os.environ['YAML_MOD']
|
||||
try:
|
||||
import importlib
|
||||
yaml = importlib.import_module(_mod)
|
||||
has_yaml = True
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
raise RuntimeError(('Module "{0}" is not ' +
|
||||
'installed').format(_mod))
|
||||
else:
|
||||
try:
|
||||
import yaml
|
||||
has_yaml = True
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import pyaml as yaml
|
||||
has_yaml = True
|
||||
except ImportError:
|
||||
pass
|
||||
if not has_yaml:
|
||||
raise RuntimeError(('You must have the PyYAML or pyaml ' +
|
||||
'module installed to use YAML ' +
|
||||
'formatting'))
|
||||
print(yaml.dump(self.stats))
|
||||
elif self.output == 'py':
|
||||
import pprint
|
||||
pprint.pprint(self.stats)
|
||||
return()
|
||||
|
||||
def parseArgs():
|
||||
args = argparse.ArgumentParser()
|
||||
args.add_argument('-i', '--insecure',
|
||||
dest = 'tls',
|
||||
action = 'store_false',
|
||||
help = ('If specified, do not use TLS encryption ' +
|
||||
'querying the server (default is to use TLS)'))
|
||||
args.add_argument('-P', '--port',
|
||||
dest = 'port',
|
||||
type = int,
|
||||
default = None,
|
||||
help = ('The port number to use. If not specified, ' +
|
||||
'use the default port per the normal protocol ' +
|
||||
'(i.e. for HTTPS, use 443)'))
|
||||
fmt = args.add_mutually_exclusive_group()
|
||||
fmt.add_argument('-j', '--json',
|
||||
default = 'py',
|
||||
dest = 'output',
|
||||
action = 'store_const',
|
||||
const = 'json',
|
||||
help = ('Output the data in JSON format'))
|
||||
fmt.add_argument('-y', '--yaml',
|
||||
default = 'py',
|
||||
dest = 'output',
|
||||
action = 'store_const',
|
||||
const = 'yaml',
|
||||
help = ('Output the data in YAML format (requires ' +
|
||||
'PyYAML or pyaml module). You can prefer which ' +
|
||||
'one by setting an environment variable, ' +
|
||||
'YAML_MOD, to "yaml" or "pyaml" (for PyYAML or ' +
|
||||
'pyaml respectively); otherwise preference ' +
|
||||
'will be PyYAML > pyaml'))
|
||||
fmt.add_argument('-p', '--python',
|
||||
default = 'py',
|
||||
dest = 'output',
|
||||
action = 'store_const',
|
||||
const = 'py',
|
||||
help = ('Output the data in pythonic format (default)'))
|
||||
args.add_argument('-v', '--verbose',
|
||||
dest = 'verbose',
|
||||
action = 'store_true',
|
||||
help = ('If specified, print out ALL info (peers, ' +
|
||||
'histogram, etc.), not just the settings/' +
|
||||
'number of keys/contact info/server info'))
|
||||
proto_grp = args.add_mutually_exclusive_group()
|
||||
proto_grp.add_argument('-4', '--ipv4',
|
||||
dest = 'netproto',
|
||||
default = None,
|
||||
action = 'store_const',
|
||||
const = 'ipv4',
|
||||
help = ('If specified, force IPv4 (default is ' +
|
||||
'system\'s preference)'))
|
||||
proto_grp.add_argument('-6', '--ipv6',
|
||||
dest = 'netproto',
|
||||
default = None,
|
||||
action = 'store_const',
|
||||
const = 'ipv6',
|
||||
help = ('If specified, force IPv6 (default is ' +
|
||||
'system\'s preference)'))
|
||||
args.add_argument('server',
|
||||
help = ('The keyserver ((sub)domain, IP address, etc.)'))
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
args = vars(parseArgs().parse_args())
|
||||
import pprint
|
||||
#pprint.pprint(args)
|
||||
ks = KeyStats(**args)
|
||||
ks.print()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
199
gpg/sksdump.py
199
gpg/sksdump.py
@@ -3,6 +3,7 @@
|
||||
# Thanks to Matt Rude and https://gist.github.com/mattrude/b0ac735d07b0031bb002 so I can know what the hell I'm doing.
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
import configparser
|
||||
import datetime
|
||||
import getpass
|
||||
@@ -16,8 +17,10 @@ NOWstr = NOW.strftime('%Y-%m-%d')
|
||||
|
||||
# TODO:
|
||||
# - cleanup/rotation should be optional
|
||||
# - turn into a class so we can more easily share vars across functions
|
||||
# - also, create the "CURRENT" symlink *AFTER* the dump completes?
|
||||
|
||||
cfgfile = os.path.join(os.environ['HOME'], '.sksdump.ini')
|
||||
cfgfile = os.path.join(os.environ['HOME'], '.config', 'optools', 'sksdump.ini')
|
||||
|
||||
def getDefaults():
|
||||
# Hardcoded defaults
|
||||
@@ -28,56 +31,78 @@ def getDefaults():
|
||||
'logfile': '/var/log/sksdump.log',
|
||||
'days': 1,
|
||||
'dumpkeys': 15000},
|
||||
'sync': {'throttle': 0},
|
||||
'paths': {'basedir': '/var/lib/sks',
|
||||
'destdir': '/srv/http/sks/dumps',
|
||||
'rsync': 'root@mirror.square-r00t.net:/srv/http/sks/dumps'},
|
||||
'rsync': ('root@mirror.square-r00t.net:' +
|
||||
'/srv/http/sks/dumps'),
|
||||
'sksbin': '/usr/bin/sks'},
|
||||
'runtime': {'nodump': None, 'nocompress': None, 'nosync': None}}
|
||||
## Build out the default .ini.
|
||||
dflt_str = ('# IMPORTANT: This script uses certain permissions functions that require some forethought.\n' +
|
||||
'# You can either run as root, which is the "easy" way, OR you can run as the sks user.\n' +
|
||||
'# Has to be one or the other; you\'ll SERIOUSLY mess things up otherwise.\n' +
|
||||
'# If you run as the sks user, MAKE SURE the following is set in your sudoers\n' +
|
||||
'# (where SKSUSER is the username sks runs as):\n#\tCmnd_Alias SKSCMDS = ' +
|
||||
'/usr/bin/systemctl start sks-db,\\\n#\t\t/usr/bin/systemctl stop sks-db,\\\n#\t\t' +
|
||||
'/usr/bin/systemctl start sks-recon,\\\n#\t\t/usr/bin/systemctl stop sks-recon\n#\t' +
|
||||
'SKSUSER ALL = NOPASSWD: SKSCMDS\n\n')
|
||||
dflt_str += ('# This was written for systemd systems only. Tweaking would be needed for non-systemd systems\n' +
|
||||
'# (since every non-systemd uses their own init system callables...)\n\n')
|
||||
# [system]
|
||||
d = dflt['system']
|
||||
dflt_str += ('## SKSDUMP CONFIG FILE ##\n\n# This section controls various system configuration.\n' +
|
||||
'[system]\n# This should be the user SKS runs as.\nuser = {0}\n# This is the group that' +
|
||||
'SKS runs as.\ngroup = {1}\n# If None, don\'t compress dumps.\n# If one of: ' +
|
||||
'xz, gz, bz2, or lrz (for lrzip) then use that compression algo.\ncompress = {2}\n' +
|
||||
'# These services will be started/stopped, in order, before/after dumps. ' +
|
||||
'Comma-separated.\nsvcs = {3}\n# The path to the logfile.\nlogfile = {4}\n# The number ' +
|
||||
'of days of rotated key dumps. If None, don\'t rotate.\ndays = {5}\n# How many keys to include in each ' +
|
||||
'dump file.\ndumpkeys = {6}\n\n').format(d['user'],
|
||||
d['group'],
|
||||
d['compress'],
|
||||
','.join(d['svcs']),
|
||||
d['logfile'],
|
||||
d['days'],
|
||||
d['dumpkeys'])
|
||||
# [paths]
|
||||
d = dflt['paths']
|
||||
dflt_str += ('# This section controls where stuff goes and where we should find it.\n[paths]\n# ' +
|
||||
'Where your SKS DB is.\nbasedir = {0}\n# This is the base directory where the dumps should go.\n' +
|
||||
'# There will be a sub-directory created for each date.\ndestdir = {1}\n# The ' +
|
||||
'path for rsyncing the dumps. If None, don\'t rsync.\nrsync = {2}\n\n').format(d['basedir'],
|
||||
d['destdir'],
|
||||
d['rsync'])
|
||||
# [runtime]
|
||||
d = dflt['runtime']
|
||||
dflt_str += ('# This section controls runtime options. These can be overridden at the commandline.\n' +
|
||||
'# They take no values; they\'re merely options.\n[runtime]\n# Don\'t dump any keys.\n' +
|
||||
'# Useful for dedicated in-transit/prep boxes.\n;nodump\n# Don\'t compress the dumps, even if ' +
|
||||
'we have a compression scheme specified in [system:compress].\n;nocompress\n# Don\'t sync to' +
|
||||
'another server/path, even if one is specified in [paths:rsync].\n;nosync\n')
|
||||
dflt_b64 = ("""IyBJTVBPUlRBTlQ6IFRoaXMgc2NyaXB0IHVzZXMgY2VydGFpbiBwZXJtaXNz
|
||||
aW9ucyBmdW5jdGlvbnMgdGhhdCByZXF1aXJlIHNvbWUKIyBmb3JldGhvdWdo
|
||||
dC4KIyBZb3UgY2FuIGVpdGhlciBydW4gYXMgcm9vdCwgd2hpY2ggaXMgdGhl
|
||||
ICJlYXN5IiB3YXksIE9SIHlvdSBjYW4gcnVuIGFzIHRoZQojIHNrcyB1c2Vy
|
||||
IChvci4uLiB3aGF0ZXZlciB1c2VyIHlvdXIgU0tTIGluc3RhbmNlIHJ1bnMg
|
||||
YXMpLgojIEl0IGhhcyB0byBiZSBvbmUgb3IgdGhlIG90aGVyOyB5b3UnbGwg
|
||||
U0VSSU9VU0xZIG1lc3MgdGhpbmdzIHVwIG90aGVyd2lzZS4KIyBJZiB5b3Ug
|
||||
cnVuIGFzIHRoZSBza3MgdXNlciwgTUFLRSBTVVJFIHRoZSBmb2xsb3dpbmcg
|
||||
aXMgc2V0IGluIHlvdXIgc3Vkb2VycwojICh3aGVyZSBTS1NVU0VSIGlzIHRo
|
||||
ZSB1c2VybmFtZSBza3MgcnVucyBhcyk6CiMJQ21uZF9BbGlhcyBTS1NDTURT
|
||||
ID0gL3Vzci9iaW4vc3lzdGVtY3RsIHN0YXJ0IHNrcy1kYixcCiMJICAgICAg
|
||||
ICAgICAgICAgICAgICAgL3Vzci9iaW4vc3lzdGVtY3RsIHN0b3Agc2tzLWRi
|
||||
LFwKIyAgICAgICAgICAgICAgICAgICAgICAgIC91c3IvYmluL3N5c3RlbWN0
|
||||
bCBzdGFydCBza3MtcmVjb24sXAojCQkgICAgICAgICAgICAgICAgIC91c3Iv
|
||||
YmluL3N5c3RlbWN0bCBzdG9wIHNrcy1yZWNvbgojCVNLU1VTRVIgQUxMID0g
|
||||
Tk9QQVNTV0Q6IFNLU0NNRFMKCiMgVGhpcyB3YXMgd3JpdHRlbiBmb3Igc3lz
|
||||
dGVtZCBzeXN0ZW1zIG9ubHkuIFR3ZWFraW5nIHdvdWxkIGJlIG5lZWRlZCBm
|
||||
b3IKIyBub24tc3lzdGVtZCBzeXN0ZW1zIChzaW5jZSBldmVyeSBub24tc3lz
|
||||
dGVtZCB1c2VzIHRoZWlyIG93biBpbml0IHN5c3RlbQojIGNhbGxhYmxlcy4u
|
||||
LikKCiMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMj
|
||||
IyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMKCiMgVGhp
|
||||
cyBzZWN0aW9uIGNvbnRyb2xzIHZhcmlvdXMgc3lzdGVtIGNvbmZpZ3VyYXRp
|
||||
b24uCltzeXN0ZW1dCgojIFRoaXMgc2hvdWxkIGJlIHRoZSB1c2VyIFNLUyBy
|
||||
dW5zIGFzLgp1c2VyID0gc2tzCgojIFRoaXMgaXMgdGhlIGdyb3VwIHRoYXQg
|
||||
U0tTIHJ1bnMgYXMuCmdyb3VwID0gc2tzCgojIElmIGVtcHR5LCBkb24ndCBj
|
||||
b21wcmVzcyBkdW1wcy4KIyBJZiBvbmUgb2Y6IHh6LCBneiwgYnoyLCBvciBs
|
||||
cnogKGZvciBscnppcCkgdGhlbiB1c2UgdGhhdCBjb21wcmVzc2lvbiBhbGdv
|
||||
LgojIE5vdGUgdGhhdCBscnppcCByZXF1aXJlcyBleHRyYSBpbnN0YWxsYXRp
|
||||
b24uCmNvbXByZXNzID0geHoKCiMgVGhlc2Ugc2VydmljZXMgd2lsbCBiZSBz
|
||||
dG9wcGVkL3N0YXJ0ZWQsIGluIG9yZGVyLCBiZWZvcmUvYWZ0ZXIgZHVtcHMu
|
||||
IElmIG1vcmUKIyB0aGFuIG9uZSwgc2VwZXJhdGUgYnkgY29tbWFzLgpzdmNz
|
||||
ID0gc2tzLWRiLHNrcy1yZWNvbgoKIyBUaGUgcGF0aCB0byB0aGUgbG9nZmls
|
||||
ZS4KbG9nZmlsZSA9IC92YXIvbG9nL3Nrc2R1bXAubG9nCgojIFRoZSBudW1i
|
||||
ZXIgb2YgZGF5cyBvZiByb3RhdGVkIGtleSBkdW1wcy4gSWYgZW1wdHksIGRv
|
||||
bid0IHJvdGF0ZS4KZGF5cyA9IDEKCiMgSG93IG1hbnkga2V5cyB0byBpbmNs
|
||||
dWRlIGluIGVhY2ggZHVtcCBmaWxlLgpkdW1wa2V5cyA9IDE1MDAwCgoKIyBU
|
||||
aGlzIHNlY3Rpb24gY29udHJvbHMgc3luYyBzZXR0aW5ncy4KW3N5bmNdCgoj
|
||||
IFRoaXMgc2V0dGluZyBpcyB3aGF0IHRoZSBzcGVlZCBzaG91bGQgYmUgdGhy
|
||||
b3R0bGVkIHRvLCBpbiBLaUIvcy4gSWYgZW1wdHkgb3IKIyAwLCBwZXJmb3Jt
|
||||
IG5vIHRocm90dGxpbmcuCnRocm90dGxlID0gMAoKCiMgVGhpcyBzZWN0aW9u
|
||||
IGNvbnRyb2xzIHdoZXJlIHN0dWZmIGdvZXMgYW5kIHdoZXJlIHdlIHNob3Vs
|
||||
ZCBmaW5kIGl0LgpbcGF0aHNdCgojIFdoZXJlIHlvdXIgU0tTIERCIGlzLgpi
|
||||
YXNlZGlyID0gL3Zhci9saWIvc2tzCgojIFRoaXMgaXMgdGhlIGJhc2UgZGly
|
||||
ZWN0b3J5IHdoZXJlIHRoZSBkdW1wcyBzaG91bGQgZ28uCiMgVGhlcmUgd2ls
|
||||
bCBiZSBhIHN1Yi1kaXJlY3RvcnkgY3JlYXRlZCBmb3IgZWFjaCBkYXRlLgpk
|
||||
ZXN0ZGlyID0gL3Nydi9odHRwL3Nrcy9kdW1wcwoKIyBUaGUgcGF0aCBmb3Ig
|
||||
cnN5bmNpbmcgdGhlIGR1bXBzLiBJZiBlbXB0eSwgZG9uJ3QgcnN5bmMuCnJz
|
||||
eW5jID0gcm9vdEBtaXJyb3Iuc3F1YXJlLXIwMHQubmV0Oi9zcnYvaHR0cC9z
|
||||
a3MvZHVtcHMKCiMgVGhlIHBhdGggdG8gdGhlIHNrcyBiaW5hcnkgdG8gdXNl
|
||||
Lgpza3NiaW4gPSAvdXNyL2Jpbi9za3MKCgojIFRoaXMgc2VjdGlvbiBjb250
|
||||
cm9scyBydW50aW1lIG9wdGlvbnMuIFRoZXNlIGNhbiBiZSBvdmVycmlkZGVu
|
||||
IGF0IHRoZQojIGNvbW1hbmRsaW5lLiBUaGV5IHRha2Ugbm8gdmFsdWVzOyB0
|
||||
aGV5J3JlIG1lcmVseSBvcHRpb25zLgpbcnVudGltZV0KCiMgRG9uJ3QgZHVt
|
||||
cCBhbnkga2V5cy4KIyBVc2VmdWwgZm9yIGRlZGljYXRlZCBpbi10cmFuc2l0
|
||||
L3ByZXAgYm94ZXMuCjtub2R1bXAKCiMgRG9uJ3QgY29tcHJlc3MgdGhlIGR1
|
||||
bXBzLCBldmVuIGlmIHdlIGhhdmUgYSBjb21wcmVzc2lvbiBzY2hlbWUgc3Bl
|
||||
Y2lmaWVkIGluCiMgdGhlIFtzeXN0ZW06Y29tcHJlc3NdIHNlY3Rpb246ZGly
|
||||
ZWN0aXZlLgo7bm9jb21wcmVzcwoKIyBEb24ndCBzeW5jIHRvIGFub3RoZXIg
|
||||
c2VydmVyL3BhdGgsIGV2ZW4gaWYgb25lIGlzIHNwZWNpZmllZCBpbiBbcGF0
|
||||
aHM6cnN5bmNdLgo7bm9zeW5j""")
|
||||
realcfg = configparser.ConfigParser(defaults = dflt, allow_no_value = True)
|
||||
if not os.path.isfile(cfgfile):
|
||||
with open(cfgfile, 'w') as f:
|
||||
f.write(dflt_str)
|
||||
f.write(base64.b64decode(dflt_b64).decode('utf-8'))
|
||||
realcfg.read(cfgfile)
|
||||
return(realcfg)
|
||||
|
||||
@@ -115,7 +140,10 @@ def destPrep(args):
|
||||
_dir = os.path.join(thisdir, d)
|
||||
if os.path.isdir(_dir):
|
||||
if len(os.listdir(_dir)) == 0:
|
||||
os.rmdir(os.path.join(thisdir, d))
|
||||
try:
|
||||
os.rmdir(os.path.join(thisdir, d))
|
||||
except NotADirectoryError:
|
||||
pass # in case it grabs the "current" symlink
|
||||
#try:
|
||||
# os.removedirs(sks['destdir']) # Remove empty dirs
|
||||
#except:
|
||||
@@ -124,18 +152,23 @@ def destPrep(args):
|
||||
if getpass.getuser() == 'root':
|
||||
uid = getpwnam(args['user']).pw_uid
|
||||
gid = getgrnam(args['group']).gr_gid
|
||||
for d in (args['destdir'], nowdir): # we COULD set it as part of the os.makedirs, but iirc it doesn't set it for existing dirs
|
||||
# we COULD set it as part of the os.makedirs, but iirc it doesn't set
|
||||
# it for existing dirs.
|
||||
for d in (args['destdir'], nowdir):
|
||||
os.chown(d, uid, gid)
|
||||
if os.path.isdir(curdir):
|
||||
os.remove(curdir)
|
||||
os.symlink(NOWstr, curdir, target_is_directory = True)
|
||||
try:
|
||||
os.symlink(NOWstr, curdir, target_is_directory = True)
|
||||
except FileExistsError:
|
||||
pass # Ignore if it was set earlier
|
||||
return()
|
||||
|
||||
def dumpDB(args):
|
||||
destPrep(args)
|
||||
os.chdir(args['basedir'])
|
||||
svcMgmt('stop', args)
|
||||
cmd = ['sks',
|
||||
cmd = [args['sksbin'],
|
||||
'dump',
|
||||
str(args['dumpkeys']), # How many keys per dump?
|
||||
os.path.join(args['destdir'], NOWstr), # Where should it go?
|
||||
@@ -154,7 +187,9 @@ def compressDB(args):
|
||||
if not args['compress']:
|
||||
return()
|
||||
curdir = os.path.join(args['destdir'], NOWstr)
|
||||
for thisdir, dirs, files in os.walk(curdir): # I use os.walk here because we might handle this differently in the future...
|
||||
# I use os.walk here because we might handle this differently in the
|
||||
# future...
|
||||
for thisdir, dirs, files in os.walk(curdir):
|
||||
files.sort()
|
||||
for f in files:
|
||||
fullpath = os.path.join(thisdir, f)
|
||||
@@ -163,22 +198,30 @@ def compressDB(args):
|
||||
# However, I can't do this on memory-constrained systems for lrzip.
|
||||
# See: https://github.com/kata198/python-lrzip/issues/1
|
||||
with open(args['logfile'], 'a') as f:
|
||||
f.write('===== {0} Now compressing {1} =====\n'.format(str(datetime.datetime.utcnow()), fullpath))
|
||||
f.write('===== {0} Now compressing {1} =====\n'.format(
|
||||
str(datetime.datetime.utcnow()),
|
||||
fullpath))
|
||||
if args['compress'].lower() == 'gz':
|
||||
import gzip
|
||||
with open(fullpath, 'rb') as fh_in, gzip.open(newfile, 'wb') as fh_out:
|
||||
with open(fullpath, 'rb') as fh_in, gzip.open(newfile,
|
||||
'wb') as fh_out:
|
||||
fh_out.writelines(fh_in)
|
||||
elif args['compress'].lower() == 'xz':
|
||||
import lzma
|
||||
with open(fullpath, 'rb') as fh_in, lzma.open(newfile, 'wb', preset = 9|lzma.PRESET_EXTREME) as fh_out:
|
||||
with open(fullpath, 'rb') as fh_in, \
|
||||
lzma.open(newfile,
|
||||
'wb',
|
||||
preset = 9|lzma.PRESET_EXTREME) as fh_out:
|
||||
fh_out.writelines(fh_in)
|
||||
elif args['compress'].lower() == 'bz2':
|
||||
import bz2
|
||||
with open(fullpath, 'rb') as fh_in, bz2.open(newfile, 'wb') as fh_out:
|
||||
with open(fullpath, 'rb') as fh_in, bz2.open(newfile,
|
||||
'wb') as fh_out:
|
||||
fh_out.writelines(fh_in)
|
||||
elif args['compress'].lower() == 'lrz':
|
||||
import lrzip
|
||||
with open(fullpath, 'rb') as fh_in, open(newfile, 'wb') as fh_out:
|
||||
with open(fullpath, 'rb') as fh_in, open(newfile,
|
||||
'wb') as fh_out:
|
||||
fh_out.write(lrzip.compress(fh_in.read()))
|
||||
os.remove(fullpath)
|
||||
if getpass.getuser() == 'root':
|
||||
@@ -195,8 +238,11 @@ def syncDB(args):
|
||||
'--delete',
|
||||
os.path.join(args['destdir'], '.'),
|
||||
args['rsync']]
|
||||
if args['throttle'] > 0.0:
|
||||
cmd.insert(-1, '--bwlimit={0}'.format(str(args['throttle'])))
|
||||
with open(args['logfile'], 'a') as f:
|
||||
f.write('===== {0} Rsyncing to mirror =====\n'.format(str(datetime.datetime.utcnow())))
|
||||
f.write('===== {0} Rsyncing to mirror =====\n'.format(
|
||||
str(datetime.datetime.utcnow())))
|
||||
with open(args['logfile'], 'a') as f:
|
||||
subprocess.run(cmd, stdout = f, stderr = f)
|
||||
return()
|
||||
@@ -205,9 +251,12 @@ def parseArgs():
|
||||
cfg = getDefaults()
|
||||
system = cfg['system']
|
||||
paths = cfg['paths']
|
||||
sync = cfg['sync']
|
||||
runtime = cfg['runtime']
|
||||
args = argparse.ArgumentParser(description = 'sksdump - a tool for dumping the SKS Database',
|
||||
epilog = 'brent s. || 2017 || https://square-r00t.net')
|
||||
args = argparse.ArgumentParser(description = ('sksdump - a tool for ' +
|
||||
'dumping an SKS Database'),
|
||||
epilog = ('brent s. || 2018 || ' +
|
||||
'https://square-r00t.net'))
|
||||
args.add_argument('-u',
|
||||
'--user',
|
||||
default = system['user'],
|
||||
@@ -228,7 +277,9 @@ def parseArgs():
|
||||
'--services',
|
||||
default = system['svcs'],
|
||||
dest = 'svcs',
|
||||
help = 'A comma-separated list of services that will be stopped/started for the dump (in the provided order).')
|
||||
help = ('A comma-separated list of services that will ' +
|
||||
'be stopped/started for the dump (in the ' +
|
||||
'provided order).'))
|
||||
args.add_argument('-l',
|
||||
'--log',
|
||||
default = system['logfile'],
|
||||
@@ -251,16 +302,32 @@ def parseArgs():
|
||||
default = paths['basedir'],
|
||||
dest = 'basedir',
|
||||
help = 'The directory which holds your SKS DB.')
|
||||
args.add_argument('-x',
|
||||
'--sks-binary',
|
||||
default = paths['sksbin'],
|
||||
dest = 'sksbin',
|
||||
help = ('The path to the SKS binary/executable to use ' +
|
||||
'to perform the dump.'))
|
||||
args.add_argument('-e',
|
||||
'--destdir',
|
||||
default = paths['destdir'],
|
||||
dest = 'destdir',
|
||||
help = 'The directory where the dumps should be saved (a sub-directory with the date will be created).')
|
||||
help = ('The directory where the dumps should be ' +
|
||||
'saved (a sub-directory with the date will be ' +
|
||||
'created).'))
|
||||
args.add_argument('-r',
|
||||
'--rsync',
|
||||
default = paths['rsync'],
|
||||
dest = 'rsync',
|
||||
help = 'The remote (user@host:/path/) or local (/path/) path to use to sync the dumps to.')
|
||||
help = ('The remote (user@host:/path/) or local '+
|
||||
'(/path/) path to use to sync the dumps to.'))
|
||||
args.add_argument('-t',
|
||||
'--throttle',
|
||||
default = float(sync['throttle']),
|
||||
dest = 'throttle',
|
||||
type = float,
|
||||
help = ('The amount in KiB/s to throttle the rsync ' +
|
||||
'to. Use 0 for no throttling.'))
|
||||
args.add_argument('-D',
|
||||
'--no-dump',
|
||||
dest = 'nodump',
|
||||
@@ -272,7 +339,8 @@ def parseArgs():
|
||||
dest = 'nocompress',
|
||||
action = 'store_true',
|
||||
default = ('nocompress' in runtime),
|
||||
help = 'Don\'t compress the DB dumps (default is to compress)')
|
||||
help = ('Don\'t compress the DB dumps (default is to ' +
|
||||
'compress)'))
|
||||
args.add_argument('-S',
|
||||
'--no-sync',
|
||||
dest = 'nosync',
|
||||
@@ -287,7 +355,8 @@ def main():
|
||||
if getpass.getuser() not in ('root', args['user']):
|
||||
exit('ERROR: You must be root or {0}!'.format(args['user']))
|
||||
with open(args['logfile'], 'a') as f:
|
||||
f.write('===== {0} STARTING =====\n'.format(str(datetime.datetime.utcnow())))
|
||||
f.write('===== {0} STARTING =====\n'.format(
|
||||
str(datetime.datetime.utcnow())))
|
||||
if not args['nodump']:
|
||||
dumpDB(args)
|
||||
if not args['nocompress']:
|
||||
@@ -295,7 +364,11 @@ def main():
|
||||
if not args['nosync']:
|
||||
syncDB(args)
|
||||
with open(args['logfile'], 'a') as f:
|
||||
f.write('===== {0} DONE =====\n'.format(str(datetime.datetime.utcnow())))
|
||||
f.write('===== {0} DONE =====\n'.format(
|
||||
str(datetime.datetime.utcnow())))
|
||||
with open(os.path.join(args['destdir'], 'LAST_COMPLETED_DUMP.txt'),
|
||||
'w') as f:
|
||||
f.write(str(datetime.datetime.utcnow()) + ' UTC\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
4
ldap/loglevel.py
Normal file
4
ldap/loglevel.py
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# https://www.openldap.org/doc/admin24/slapdconfig.html#loglevel%20%3Clevel%3E
|
||||
# https://www.zytrax.com/books/ldap/ch6/#loglevel
|
||||
109
lib/python/logger.py
Executable file
109
lib/python/logger.py
Executable file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# The logfile.
|
||||
dflt_logfile = '/var/log/optools/optools.log'
|
||||
|
||||
# The default log level. Can be one of (in increasing levels of output):
|
||||
# critical
|
||||
# error
|
||||
# warning
|
||||
# info
|
||||
# debug
|
||||
# "debug" may log sensitive information! Do *not* use it unless ABSOLUTELY
|
||||
# NECESSARY.
|
||||
dflt_loglevel = 'warning'
|
||||
|
||||
# stdlib
|
||||
import datetime
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
|
||||
class log(object):
|
||||
def __init__(self, loglvl = dflt_loglevel, logfile = dflt_logfile,
|
||||
logname = 'optools'):
|
||||
# Loglevel mappings.
|
||||
self.loglvls = {'critical': logging.CRITICAL,
|
||||
'error': logging.ERROR,
|
||||
'warning': logging.WARNING,
|
||||
'info': logging.INFO,
|
||||
'debug': logging.DEBUG}
|
||||
self.loglvl = loglvl.lower()
|
||||
if self.loglvl not in self.loglvls:
|
||||
raise ValueError(('{0} is not one of: ' +
|
||||
'{1}').format(loglvl,
|
||||
', '.join(self.loglvls.keys())))
|
||||
self.Logger = logging.getLogger(logname)
|
||||
self.logfile = os.path.abspath(os.path.expanduser(logfile))
|
||||
try:
|
||||
os.makedirs(os.path.dirname(self.logfile),
|
||||
exist_ok = True,
|
||||
mode = 0o700)
|
||||
except Exception as e:
|
||||
# Make this non-fatal since we also log to journal for systemd?
|
||||
raise e
|
||||
self.chkSystemd()
|
||||
self.journald()
|
||||
self.Logger.setLevel(self.loglvls[self.loglvl])
|
||||
self.log_handlers()
|
||||
|
||||
def chkSystemd(self):
|
||||
# Add journald support if we're on systemd.
|
||||
# We probably are since we're most likely on Arch, but we don't want to
|
||||
# make assumptions.
|
||||
self.systemd = False
|
||||
_sysd_chk = ['/run/systemd/system',
|
||||
'/dev/.run/systemd',
|
||||
'/dev/.systemd']
|
||||
for _ in _sysd_chk:
|
||||
if os.path.isdir(_):
|
||||
self.systemd = True
|
||||
break
|
||||
return()
|
||||
|
||||
def journald(self):
|
||||
if not self.systemd:
|
||||
return()
|
||||
try:
|
||||
from systemd import journal
|
||||
except ImportError:
|
||||
try:
|
||||
import pip
|
||||
pip.main(['install', '--user', 'systemd'])
|
||||
from systemd import journal
|
||||
except Exception as e:
|
||||
# Build failed. Missing gcc, disk too full, whatever.
|
||||
self.systemd = False
|
||||
return()
|
||||
|
||||
def log_handlers(self):
|
||||
# Log formats
|
||||
if self.systemd:
|
||||
_jrnlfmt = logging.Formatter(fmt = ('{levelname}: {message} ' +
|
||||
'({filename}:{lineno})'),
|
||||
style = '{',
|
||||
datefmt = '%Y-%m-%d %H:%M:%S')
|
||||
_logfmt = logging.Formatter(fmt = ('{asctime}:{levelname}: {message} (' +
|
||||
'{filename}:{lineno})'),
|
||||
style = '{',
|
||||
datefmt = '%Y-%m-%d %H:%M:%S')
|
||||
# Add handlers
|
||||
_dflthandler = logging.handlers.RotatingFileHandler(self.logfile,
|
||||
encoding = 'utf8',
|
||||
# 1GB
|
||||
maxBytes = 1073741824,
|
||||
backupCount = 5)
|
||||
_dflthandler.setFormatter(_logfmt)
|
||||
_dflthandler.setLevel(self.loglvls[self.loglvl])
|
||||
if self.systemd:
|
||||
from systemd import journal
|
||||
try:
|
||||
h = journal.JournaldLogHandler()
|
||||
except AttributeError: # Uses the other version
|
||||
h = journal.JournalHandler()
|
||||
h.setFormatter(_jrnlfmt)
|
||||
h.setLevel(self.loglvls[self.loglvl])
|
||||
self.Logger.addHandler(h)
|
||||
self.Logger.addHandler(_dflthandler)
|
||||
self.Logger.info('Logging initialized')
|
||||
return()
|
||||
1
libvirt/README
Normal file
1
libvirt/README
Normal file
@@ -0,0 +1 @@
|
||||
These projects/scripts have been moved to https://git.square-r00t.net/LibvirtTools/.
|
||||
2
mumble/.gitignore
vendored
Normal file
2
mumble/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/docs
|
||||
/testcertimport.py
|
||||
563
mumble/Mumble.proto
Normal file
563
mumble/Mumble.proto
Normal file
@@ -0,0 +1,563 @@
|
||||
// Copyright 2005-2017 The Mumble Developers. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the LICENSE file at the root of the
|
||||
// Mumble source tree or at <https://www.mumble.info/LICENSE>.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package MumbleProto;
|
||||
|
||||
option optimize_for = SPEED;
|
||||
|
||||
message Version {
|
||||
// 2-byte Major, 1-byte Minor and 1-byte Patch version number.
|
||||
optional uint32 version = 1;
|
||||
// Client release name.
|
||||
optional string release = 2;
|
||||
// Client OS name.
|
||||
optional string os = 3;
|
||||
// Client OS version.
|
||||
optional string os_version = 4;
|
||||
}
|
||||
|
||||
// Not used. Not even for tunneling UDP through TCP.
|
||||
message UDPTunnel {
|
||||
// Not used.
|
||||
required bytes packet = 1;
|
||||
}
|
||||
|
||||
// Used by the client to send the authentication credentials to the server.
|
||||
message Authenticate {
|
||||
// UTF-8 encoded username.
|
||||
optional string username = 1;
|
||||
// Server or user password.
|
||||
optional string password = 2;
|
||||
// Additional access tokens for server ACL groups.
|
||||
repeated string tokens = 3;
|
||||
// A list of CELT bitstream version constants supported by the client.
|
||||
repeated int32 celt_versions = 4;
|
||||
optional bool opus = 5 [default = false];
|
||||
}
|
||||
|
||||
// Sent by the client to notify the server that the client is still alive.
|
||||
// Server must reply to the packet with the same timestamp and its own
|
||||
// good/late/lost/resync numbers. None of the fields is strictly required.
|
||||
message Ping {
|
||||
// Client timestamp. Server should not attempt to decode.
|
||||
optional uint64 timestamp = 1;
|
||||
// The amount of good packets received.
|
||||
optional uint32 good = 2;
|
||||
// The amount of late packets received.
|
||||
optional uint32 late = 3;
|
||||
// The amount of packets never received.
|
||||
optional uint32 lost = 4;
|
||||
// The amount of nonce resyncs.
|
||||
optional uint32 resync = 5;
|
||||
// The total amount of UDP packets received.
|
||||
optional uint32 udp_packets = 6;
|
||||
// The total amount of TCP packets received.
|
||||
optional uint32 tcp_packets = 7;
|
||||
// UDP ping average.
|
||||
optional float udp_ping_avg = 8;
|
||||
// UDP ping variance.
|
||||
optional float udp_ping_var = 9;
|
||||
// TCP ping average.
|
||||
optional float tcp_ping_avg = 10;
|
||||
// TCP ping variance.
|
||||
optional float tcp_ping_var = 11;
|
||||
}
|
||||
|
||||
// Sent by the server when it rejects the user connection.
|
||||
message Reject {
|
||||
enum RejectType {
|
||||
// The rejection reason is unknown (details should be available
|
||||
// in Reject.reason).
|
||||
None = 0;
|
||||
// The client attempted to connect with an incompatible version.
|
||||
WrongVersion = 1;
|
||||
// The user name supplied by the client was invalid.
|
||||
InvalidUsername = 2;
|
||||
// The client attempted to authenticate as a user with a password but it
|
||||
// was wrong.
|
||||
WrongUserPW = 3;
|
||||
// The client attempted to connect to a passworded server but the password
|
||||
// was wrong.
|
||||
WrongServerPW = 4;
|
||||
// Supplied username is already in use.
|
||||
UsernameInUse = 5;
|
||||
// Server is currently full and cannot accept more users.
|
||||
ServerFull = 6;
|
||||
// The user did not provide a certificate but one is required.
|
||||
NoCertificate = 7;
|
||||
AuthenticatorFail = 8;
|
||||
}
|
||||
// Rejection type.
|
||||
optional RejectType type = 1;
|
||||
// Human readable rejection reason.
|
||||
optional string reason = 2;
|
||||
}
|
||||
|
||||
// ServerSync message is sent by the server when it has authenticated the user
|
||||
// and finished synchronizing the server state.
|
||||
message ServerSync {
|
||||
// The session of the current user.
|
||||
optional uint32 session = 1;
|
||||
// Maximum bandwidth that the user should use.
|
||||
optional uint32 max_bandwidth = 2;
|
||||
// Server welcome text.
|
||||
optional string welcome_text = 3;
|
||||
// Current user permissions in the root channel.
|
||||
optional uint64 permissions = 4;
|
||||
}
|
||||
|
||||
// Sent by the client when it wants a channel removed. Sent by the server when
|
||||
// a channel has been removed and clients should be notified.
|
||||
message ChannelRemove {
|
||||
required uint32 channel_id = 1;
|
||||
}
|
||||
|
||||
// Used to communicate channel properties between the client and the server.
|
||||
// Sent by the server during the login process or when channel properties are
|
||||
// updated. Client may use this message to update said channel properties.
|
||||
message ChannelState {
|
||||
// Unique ID for the channel within the server.
|
||||
optional uint32 channel_id = 1;
|
||||
// channel_id of the parent channel.
|
||||
optional uint32 parent = 2;
|
||||
// UTF-8 encoded channel name.
|
||||
optional string name = 3;
|
||||
// A collection of channel id values of the linked channels. Absent during
|
||||
// the first channel listing.
|
||||
repeated uint32 links = 4;
|
||||
// UTF-8 encoded channel description. Only if the description is less than
|
||||
// 128 bytes
|
||||
optional string description = 5;
|
||||
// A collection of channel_id values that should be added to links.
|
||||
repeated uint32 links_add = 6;
|
||||
// A collection of channel_id values that should be removed from links.
|
||||
repeated uint32 links_remove = 7;
|
||||
// True if the channel is temporary.
|
||||
optional bool temporary = 8 [default = false];
|
||||
// Position weight to tweak the channel position in the channel list.
|
||||
optional int32 position = 9 [default = 0];
|
||||
// SHA1 hash of the description if the description is 128 bytes or more.
|
||||
optional bytes description_hash = 10;
|
||||
// Maximum number of users allowed in the channel. If this value is zero,
|
||||
// the maximum number of users allowed in the channel is given by the
|
||||
// server's "usersperchannel" setting.
|
||||
optional uint32 max_users = 11;
|
||||
}
|
||||
|
||||
// Used to communicate user leaving or being kicked. May be sent by the client
|
||||
// when it attempts to kick a user. Sent by the server when it informs the
|
||||
// clients that a user is not present anymore.
|
||||
message UserRemove {
|
||||
// The user who is being kicked, identified by their session, not present
|
||||
// when no one is being kicked.
|
||||
required uint32 session = 1;
|
||||
// The user who initiated the removal. Either the user who performs the kick
|
||||
// or the user who is currently leaving.
|
||||
optional uint32 actor = 2;
|
||||
// Reason for the kick, stored as the ban reason if the user is banned.
|
||||
optional string reason = 3;
|
||||
// True if the kick should result in a ban.
|
||||
optional bool ban = 4;
|
||||
}
|
||||
|
||||
// Sent by the server when it communicates new and changed users to client.
|
||||
// First seen during login procedure. May be sent by the client when it wishes
|
||||
// to alter its state.
|
||||
message UserState {
|
||||
// Unique user session ID of the user whose state this is, may change on
|
||||
// reconnect.
|
||||
optional uint32 session = 1;
|
||||
// The session of the user who is updating this user.
|
||||
optional uint32 actor = 2;
|
||||
// User name, UTF-8 encoded.
|
||||
optional string name = 3;
|
||||
// Registered user ID if the user is registered.
|
||||
optional uint32 user_id = 4;
|
||||
// Channel on which the user is.
|
||||
optional uint32 channel_id = 5;
|
||||
// True if the user is muted by admin.
|
||||
optional bool mute = 6;
|
||||
// True if the user is deafened by admin.
|
||||
optional bool deaf = 7;
|
||||
// True if the user has been suppressed from talking by a reason other than
|
||||
// being muted.
|
||||
optional bool suppress = 8;
|
||||
// True if the user has muted self.
|
||||
optional bool self_mute = 9;
|
||||
// True if the user has deafened self.
|
||||
optional bool self_deaf = 10;
|
||||
// User image if it is less than 128 bytes.
|
||||
optional bytes texture = 11;
|
||||
// The positional audio plugin identifier.
|
||||
// Positional audio information is only sent to users who share
|
||||
// identical plugin contexts.
|
||||
//
|
||||
// This value is not trasmitted to clients.
|
||||
optional bytes plugin_context = 12;
|
||||
// The user's plugin-specific identity.
|
||||
// This value is not transmitted to clients.
|
||||
optional string plugin_identity = 13;
|
||||
// User comment if it is less than 128 bytes.
|
||||
optional string comment = 14;
|
||||
// The hash of the user certificate.
|
||||
optional string hash = 15;
|
||||
// SHA1 hash of the user comment if it 128 bytes or more.
|
||||
optional bytes comment_hash = 16;
|
||||
// SHA1 hash of the user picture if it 128 bytes or more.
|
||||
optional bytes texture_hash = 17;
|
||||
// True if the user is a priority speaker.
|
||||
optional bool priority_speaker = 18;
|
||||
// True if the user is currently recording.
|
||||
optional bool recording = 19;
|
||||
}
|
||||
|
||||
// Relays information on the bans. The client may send the BanList message to
|
||||
// either modify the list of bans or query them from the server. The server
|
||||
// sends this list only after a client queries for it.
|
||||
message BanList {
|
||||
message BanEntry {
|
||||
// Banned IP address.
|
||||
required bytes address = 1;
|
||||
// The length of the subnet mask for the ban.
|
||||
required uint32 mask = 2;
|
||||
// User name for identification purposes (does not affect the ban).
|
||||
optional string name = 3;
|
||||
// The certificate hash of the banned user.
|
||||
optional string hash = 4;
|
||||
// Reason for the ban (does not affect the ban).
|
||||
optional string reason = 5;
|
||||
// Ban start time.
|
||||
optional string start = 6;
|
||||
// Ban duration in seconds.
|
||||
optional uint32 duration = 7;
|
||||
}
|
||||
// List of ban entries currently in place.
|
||||
repeated BanEntry bans = 1;
|
||||
// True if the server should return the list, false if it should replace old
|
||||
// ban list with the one provided.
|
||||
optional bool query = 2 [default = false];
|
||||
}
|
||||
|
||||
// Used to send and broadcast text messages.
|
||||
message TextMessage {
|
||||
// The message sender, identified by its session.
|
||||
optional uint32 actor = 1;
|
||||
// Target users for the message, identified by their session.
|
||||
repeated uint32 session = 2;
|
||||
// The channels to which the message is sent, identified by their
|
||||
// channel_ids.
|
||||
repeated uint32 channel_id = 3;
|
||||
// The root channels when sending message recursively to several channels,
|
||||
// identified by their channel_ids.
|
||||
repeated uint32 tree_id = 4;
|
||||
// The UTF-8 encoded message. May be HTML if the server allows.
|
||||
required string message = 5;
|
||||
}
|
||||
|
||||
message PermissionDenied {
|
||||
enum DenyType {
|
||||
// Operation denied for other reason, see reason field.
|
||||
Text = 0;
|
||||
// Permissions were denied.
|
||||
Permission = 1;
|
||||
// Cannot modify SuperUser.
|
||||
SuperUser = 2;
|
||||
// Invalid channel name.
|
||||
ChannelName = 3;
|
||||
// Text message too long.
|
||||
TextTooLong = 4;
|
||||
// The flux capacitor was spelled wrong.
|
||||
H9K = 5;
|
||||
// Operation not permitted in temporary channel.
|
||||
TemporaryChannel = 6;
|
||||
// Operation requires certificate.
|
||||
MissingCertificate = 7;
|
||||
// Invalid username.
|
||||
UserName = 8;
|
||||
// Channel is full.
|
||||
ChannelFull = 9;
|
||||
NestingLimit = 10;
|
||||
}
|
||||
// The denied permission when type is Permission.
|
||||
optional uint32 permission = 1;
|
||||
// channel_id for the channel where the permission was denied when type is
|
||||
// Permission.
|
||||
optional uint32 channel_id = 2;
|
||||
// The user who was denied permissions, identified by session.
|
||||
optional uint32 session = 3;
|
||||
// Textual reason for the denial.
|
||||
optional string reason = 4;
|
||||
// Type of the denial.
|
||||
optional DenyType type = 5;
|
||||
// The name that is invalid when type is UserName.
|
||||
optional string name = 6;
|
||||
}
|
||||
|
||||
message ACL {
|
||||
message ChanGroup {
|
||||
// Name of the channel group, UTF-8 encoded.
|
||||
required string name = 1;
|
||||
// True if the group has been inherited from the parent (Read only).
|
||||
optional bool inherited = 2 [default = true];
|
||||
// True if the group members are inherited.
|
||||
optional bool inherit = 3 [default = true];
|
||||
// True if the group can be inherited by sub channels.
|
||||
optional bool inheritable = 4 [default = true];
|
||||
// Users explicitly included in this group, identified by user_id.
|
||||
repeated uint32 add = 5;
|
||||
// Users explicitly removed from this group in this channel if the group
|
||||
// has been inherited, identified by user_id.
|
||||
repeated uint32 remove = 6;
|
||||
// Users inherited, identified by user_id.
|
||||
repeated uint32 inherited_members = 7;
|
||||
}
|
||||
message ChanACL {
|
||||
// True if this ACL applies to the current channel.
|
||||
optional bool apply_here = 1 [default = true];
|
||||
// True if this ACL applies to the sub channels.
|
||||
optional bool apply_subs = 2 [default = true];
|
||||
// True if the ACL has been inherited from the parent.
|
||||
optional bool inherited = 3 [default = true];
|
||||
// ID of the user that is affected by this ACL.
|
||||
optional uint32 user_id = 4;
|
||||
// ID of the group that is affected by this ACL.
|
||||
optional string group = 5;
|
||||
// Bit flag field of the permissions granted by this ACL.
|
||||
optional uint32 grant = 6;
|
||||
// Bit flag field of the permissions denied by this ACL.
|
||||
optional uint32 deny = 7;
|
||||
}
|
||||
// Channel ID of the channel this message affects.
|
||||
required uint32 channel_id = 1;
|
||||
// True if the channel inherits its parent's ACLs.
|
||||
optional bool inherit_acls = 2 [default = true];
|
||||
// User group specifications.
|
||||
repeated ChanGroup groups = 3;
|
||||
// ACL specifications.
|
||||
repeated ChanACL acls = 4;
|
||||
// True if the message is a query for ACLs instead of setting them.
|
||||
optional bool query = 5 [default = false];
|
||||
}
|
||||
|
||||
// Client may use this message to refresh its registered user information. The
|
||||
// client should fill the IDs or Names of the users it wants to refresh. The
|
||||
// server fills the missing parts and sends the message back.
|
||||
message QueryUsers {
|
||||
// user_ids.
|
||||
repeated uint32 ids = 1;
|
||||
// User names in the same order as ids.
|
||||
repeated string names = 2;
|
||||
}
|
||||
|
||||
// Used to initialize and resync the UDP encryption. Either side may request a
|
||||
// resync by sending the message without any values filled. The resync is
|
||||
// performed by sending the message with only the client or server nonce
|
||||
// filled.
|
||||
message CryptSetup {
|
||||
// Encryption key.
|
||||
optional bytes key = 1;
|
||||
// Client nonce.
|
||||
optional bytes client_nonce = 2;
|
||||
// Server nonce.
|
||||
optional bytes server_nonce = 3;
|
||||
}
|
||||
|
||||
message ContextActionModify {
|
||||
enum Context {
|
||||
// Action is applicable to the server.
|
||||
Server = 0x01;
|
||||
// Action can target a Channel.
|
||||
Channel = 0x02;
|
||||
// Action can target a User.
|
||||
User = 0x04;
|
||||
}
|
||||
enum Operation {
|
||||
Add = 0;
|
||||
Remove = 1;
|
||||
}
|
||||
// The action name.
|
||||
required string action = 1;
|
||||
// The display name of the action.
|
||||
optional string text = 2;
|
||||
// Context bit flags defining where the action should be displayed.
|
||||
optional uint32 context = 3;
|
||||
optional Operation operation = 4;
|
||||
}
|
||||
|
||||
// Sent by the client when it wants to initiate a Context action.
|
||||
message ContextAction {
|
||||
// The target User for the action, identified by session.
|
||||
optional uint32 session = 1;
|
||||
// The target Channel for the action, identified by channel_id.
|
||||
optional uint32 channel_id = 2;
|
||||
// The action that should be executed.
|
||||
required string action = 3;
|
||||
}
|
||||
|
||||
// Lists the registered users.
|
||||
message UserList {
|
||||
message User {
|
||||
// Registered user ID.
|
||||
required uint32 user_id = 1;
|
||||
// Registered user name.
|
||||
optional string name = 2;
|
||||
optional string last_seen = 3;
|
||||
optional uint32 last_channel = 4;
|
||||
}
|
||||
// A list of registered users.
|
||||
repeated User users = 1;
|
||||
}
|
||||
|
||||
// Sent by the client when it wants to register or clear whisper targets.
|
||||
//
|
||||
// Note: The first available target ID is 1 as 0 is reserved for normal
|
||||
// talking. Maximum target ID is 30.
|
||||
message VoiceTarget {
|
||||
message Target {
|
||||
// Users that are included as targets.
|
||||
repeated uint32 session = 1;
|
||||
// Channel that is included as a target.
|
||||
optional uint32 channel_id = 2;
|
||||
// ACL group that is included as a target.
|
||||
optional string group = 3;
|
||||
// True if the voice should follow links from the specified channel.
|
||||
optional bool links = 4 [default = false];
|
||||
// True if the voice should also be sent to children of the specific
|
||||
// channel.
|
||||
optional bool children = 5 [default = false];
|
||||
}
|
||||
// Voice target ID.
|
||||
optional uint32 id = 1;
|
||||
// The receivers that this voice target includes.
|
||||
repeated Target targets = 2;
|
||||
}
|
||||
|
||||
// Sent by the client when it wants permissions for a certain channel. Sent by
|
||||
// the server when it replies to the query or wants the user to resync all
|
||||
// channel permissions.
|
||||
message PermissionQuery {
|
||||
// channel_id of the channel for which the permissions are queried.
|
||||
optional uint32 channel_id = 1;
|
||||
// Channel permissions.
|
||||
optional uint32 permissions = 2;
|
||||
// True if the client should drop its current permission information for all
|
||||
// channels.
|
||||
optional bool flush = 3 [default = false];
|
||||
}
|
||||
|
||||
// Sent by the server to notify the users of the version of the CELT codec they
|
||||
// should use. This may change during the connection when new users join.
|
||||
message CodecVersion {
|
||||
// The version of the CELT Alpha codec.
|
||||
required int32 alpha = 1;
|
||||
// The version of the CELT Beta codec.
|
||||
required int32 beta = 2;
|
||||
// True if the user should prefer Alpha over Beta.
|
||||
required bool prefer_alpha = 3 [default = true];
|
||||
optional bool opus = 4 [default = false];
|
||||
}
|
||||
|
||||
// Used to communicate user stats between the server and clients.
|
||||
message UserStats {
|
||||
message Stats {
|
||||
// The amount of good packets received.
|
||||
optional uint32 good = 1;
|
||||
// The amount of late packets received.
|
||||
optional uint32 late = 2;
|
||||
// The amount of packets never received.
|
||||
optional uint32 lost = 3;
|
||||
// The amount of nonce resyncs.
|
||||
optional uint32 resync = 4;
|
||||
}
|
||||
|
||||
// User whose stats these are.
|
||||
optional uint32 session = 1;
|
||||
// True if the message contains only mutable stats (packets, ping).
|
||||
optional bool stats_only = 2 [default = false];
|
||||
// Full user certificate chain of the user certificate in DER format.
|
||||
repeated bytes certificates = 3;
|
||||
// Packet statistics for packets received from the client.
|
||||
optional Stats from_client = 4;
|
||||
// Packet statistics for packets sent by the server.
|
||||
optional Stats from_server = 5;
|
||||
|
||||
// Amount of UDP packets sent.
|
||||
optional uint32 udp_packets = 6;
|
||||
// Amount of TCP packets sent.
|
||||
optional uint32 tcp_packets = 7;
|
||||
// UDP ping average.
|
||||
optional float udp_ping_avg = 8;
|
||||
// UDP ping variance.
|
||||
optional float udp_ping_var = 9;
|
||||
// TCP ping average.
|
||||
optional float tcp_ping_avg = 10;
|
||||
// TCP ping variance.
|
||||
optional float tcp_ping_var = 11;
|
||||
|
||||
// Client version.
|
||||
optional Version version = 12;
|
||||
// A list of CELT bitstream version constants supported by the client of this
|
||||
// user.
|
||||
repeated int32 celt_versions = 13;
|
||||
// Client IP address.
|
||||
optional bytes address = 14;
|
||||
// Bandwith used by this client.
|
||||
optional uint32 bandwidth = 15;
|
||||
// Connection duration.
|
||||
optional uint32 onlinesecs = 16;
|
||||
// Duration since last activity.
|
||||
optional uint32 idlesecs = 17;
|
||||
// True if the user has a strong certificate.
|
||||
optional bool strong_certificate = 18 [default = false];
|
||||
optional bool opus = 19 [default = false];
|
||||
}
|
||||
|
||||
// Used by the client to request binary data from the server. By default large
|
||||
// comments or textures are not sent within standard messages but instead the
|
||||
// hash is. If the client does not recognize the hash it may request the
|
||||
// resource when it needs it. The client does so by sending a RequestBlob
|
||||
// message with the correct fields filled with the user sessions or channel_ids
|
||||
// it wants to receive. The server replies to this by sending a new
|
||||
// UserState/ChannelState message with the resources filled even if they would
|
||||
// normally be transmitted as hashes.
|
||||
message RequestBlob {
|
||||
// sessions of the requested UserState textures.
|
||||
repeated uint32 session_texture = 1;
|
||||
// sessions of the requested UserState comments.
|
||||
repeated uint32 session_comment = 2;
|
||||
// channel_ids of the requested ChannelState descriptions.
|
||||
repeated uint32 channel_description = 3;
|
||||
}
|
||||
|
||||
// Sent by the server when it informs the clients on server configuration
|
||||
// details.
|
||||
message ServerConfig {
|
||||
// The maximum bandwidth the clients should use.
|
||||
optional uint32 max_bandwidth = 1;
|
||||
// Server welcome text.
|
||||
optional string welcome_text = 2;
|
||||
// True if the server allows HTML.
|
||||
optional bool allow_html = 3;
|
||||
// Maximum text message length.
|
||||
optional uint32 message_length = 4;
|
||||
// Maximum image message length.
|
||||
optional uint32 image_message_length = 5;
|
||||
// The maximum number of users allowed on the server.
|
||||
optional uint32 max_users = 6;
|
||||
}
|
||||
|
||||
// Sent by the server to inform the clients of suggested client configuration
|
||||
// specified by the server administrator.
|
||||
message SuggestConfig {
|
||||
// Suggested client version.
|
||||
optional uint32 version = 1;
|
||||
// True if the administrator suggests positional audio to be used on this
|
||||
// server.
|
||||
optional bool positional = 2;
|
||||
// True if the administrator suggests push to talk to be used on this server.
|
||||
optional bool push_to_talk = 3;
|
||||
}
|
||||
823
mumble/MurmurRPC.proto
Normal file
823
mumble/MurmurRPC.proto
Normal file
@@ -0,0 +1,823 @@
|
||||
// Copyright 2005-2017 The Mumble Developers. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the LICENSE file at the root of the
|
||||
// Mumble source tree or at <https://www.mumble.info/LICENSE>.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package MurmurRPC;
|
||||
|
||||
// Note about embedded messages:
|
||||
//
|
||||
// To help save bandwidth, the protocol does not always send complete embedded
|
||||
// messages (i.e. an embeddded message with all of the fields filled in). These
|
||||
// incomplete messages only contain enough identifying information to get more
|
||||
// information from the message's corresponding "Get" method. For example:
|
||||
//
|
||||
// User.server only ever contains the server ID. Calling ServerGet(User.server)
|
||||
// will return a Server message with the server's status and uptime.
|
||||
|
||||
message Void {
|
||||
}
|
||||
|
||||
message Version {
|
||||
// 2-byte Major, 1-byte Minor and 1-byte Patch version number.
|
||||
optional uint32 version = 1;
|
||||
// Client release name.
|
||||
optional string release = 2;
|
||||
// Client OS name.
|
||||
optional string os = 3;
|
||||
// Client OS version.
|
||||
optional string os_version = 4;
|
||||
}
|
||||
|
||||
message Uptime {
|
||||
// The number of seconds from the starting time.
|
||||
optional uint64 secs = 1;
|
||||
}
|
||||
|
||||
message Server {
|
||||
// The unique server ID.
|
||||
required uint32 id = 1;
|
||||
// Is the server currently running?
|
||||
optional bool running = 2;
|
||||
// The update of the server.
|
||||
optional Uptime uptime = 3;
|
||||
|
||||
message Event {
|
||||
enum Type {
|
||||
UserConnected = 0;
|
||||
UserDisconnected = 1;
|
||||
UserStateChanged = 2;
|
||||
UserTextMessage = 3;
|
||||
ChannelCreated = 4;
|
||||
ChannelRemoved = 5;
|
||||
ChannelStateChanged = 6;
|
||||
};
|
||||
// The server on which the event happened.
|
||||
optional Server server = 1;
|
||||
// The type of event that happened.
|
||||
optional Type type = 2;
|
||||
// The user tied to the event (if applicable).
|
||||
optional User user = 3;
|
||||
// The text message tied to the event (if applicable).
|
||||
optional TextMessage message = 4;
|
||||
// The channel tied to the event (if applicable).
|
||||
optional Channel channel = 5;
|
||||
}
|
||||
|
||||
message Query {
|
||||
}
|
||||
|
||||
message List {
|
||||
// The servers.
|
||||
repeated Server servers = 1;
|
||||
}
|
||||
}
|
||||
|
||||
message Event {
|
||||
enum Type {
|
||||
ServerStopped = 0;
|
||||
ServerStarted = 1;
|
||||
};
|
||||
// The server for which the event happened.
|
||||
optional Server server = 1;
|
||||
// The type of event that happened.
|
||||
optional Type type = 2;
|
||||
}
|
||||
|
||||
message ContextAction {
|
||||
enum Context {
|
||||
Server = 0x01;
|
||||
Channel = 0x02;
|
||||
User = 0x04;
|
||||
};
|
||||
// The server on which the action is.
|
||||
optional Server server = 1;
|
||||
// The context in which the action is.
|
||||
optional uint32 context = 2;
|
||||
// The action name.
|
||||
optional string action = 3;
|
||||
// The user-visible descriptive name of the action.
|
||||
optional string text = 4;
|
||||
// The user that triggered the ContextAction.
|
||||
optional User actor = 5;
|
||||
// The user on which the ContextAction was triggered.
|
||||
optional User user = 6;
|
||||
// The channel on which the ContextAction was triggered.
|
||||
optional Channel channel = 7;
|
||||
}
|
||||
|
||||
message TextMessage {
|
||||
// The server on which the TextMessage originates.
|
||||
optional Server server = 1;
|
||||
// The user who sent the message.
|
||||
optional User actor = 2;
|
||||
// The users to whom the message is sent.
|
||||
repeated User users = 3;
|
||||
// The channels to which the message is sent.
|
||||
repeated Channel channels = 4;
|
||||
// The channels to which the message is sent, including the channels'
|
||||
// ancestors.
|
||||
repeated Channel trees = 5;
|
||||
// The message body that is sent.
|
||||
optional string text = 6;
|
||||
|
||||
message Filter {
|
||||
enum Action {
|
||||
// Accept the message.
|
||||
Accept = 0;
|
||||
// Reject the message with a permission error.
|
||||
Reject = 1;
|
||||
// Silently drop the message.
|
||||
Drop = 2;
|
||||
}
|
||||
// The server on which the message originated.
|
||||
optional Server server = 1;
|
||||
// The action to perform for the message.
|
||||
optional Action action = 2;
|
||||
// The text message.
|
||||
optional TextMessage message = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message Log {
|
||||
// The server on which the log message was generated.
|
||||
optional Server server = 1;
|
||||
// The unix timestamp of when the message was generated.
|
||||
optional int64 timestamp = 2;
|
||||
// The log message.
|
||||
optional string text = 3;
|
||||
|
||||
message Query {
|
||||
// The server whose logs will be queried.
|
||||
optional Server server = 1;
|
||||
// The minimum log index to receive.
|
||||
optional uint32 min = 2;
|
||||
// The maximum log index to receive.
|
||||
optional uint32 max = 3;
|
||||
}
|
||||
|
||||
message List {
|
||||
// The server where the log entries are from.
|
||||
optional Server server = 1;
|
||||
// The total number of logs entries on the server.
|
||||
optional uint32 total = 2;
|
||||
// The minimum log index that was sent.
|
||||
optional uint32 min = 3;
|
||||
// The maximum log index that was sent.
|
||||
optional uint32 max = 4;
|
||||
// The log entries.
|
||||
repeated Log entries = 5;
|
||||
}
|
||||
}
|
||||
|
||||
message Config {
|
||||
// The server for which the configuration is for.
|
||||
optional Server server = 1;
|
||||
// The configuration keys and values.
|
||||
map<string, string> fields = 2;
|
||||
|
||||
message Field {
|
||||
// The server for which the configuration field is for.
|
||||
optional Server server = 1;
|
||||
// The field key.
|
||||
optional string key = 2;
|
||||
// The field value.
|
||||
optional string value = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message Channel {
|
||||
// The server on which the channel exists.
|
||||
optional Server server = 1;
|
||||
// The unique channel identifier.
|
||||
optional uint32 id = 2;
|
||||
// The channel name.
|
||||
optional string name = 3;
|
||||
// The channel's parent.
|
||||
optional Channel parent = 4;
|
||||
// Linked channels.
|
||||
repeated Channel links = 5;
|
||||
// The channel's description.
|
||||
optional string description = 6;
|
||||
// Is the channel temporary?
|
||||
optional bool temporary = 7;
|
||||
// The position in which the channel should appear in a sorted list.
|
||||
optional int32 position = 8;
|
||||
|
||||
message Query {
|
||||
// The server on which the channels are.
|
||||
optional Server server = 1;
|
||||
}
|
||||
|
||||
message List {
|
||||
// The server on which the channels are.
|
||||
optional Server server = 1;
|
||||
// The channels.
|
||||
repeated Channel channels = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message User {
|
||||
// The server to which the user is connected.
|
||||
optional Server server = 1;
|
||||
// The user's session ID.
|
||||
optional uint32 session = 2;
|
||||
// The user's registered ID.
|
||||
optional uint32 id = 3;
|
||||
// The user's name.
|
||||
optional string name = 4;
|
||||
// Is the user muted?
|
||||
optional bool mute = 5;
|
||||
// Is the user deafened?
|
||||
optional bool deaf = 6;
|
||||
// Is the user suppressed?
|
||||
optional bool suppress = 7;
|
||||
// Is the user a priority speaker?
|
||||
optional bool priority_speaker = 8;
|
||||
// Has the user muted him/herself?
|
||||
optional bool self_mute = 9;
|
||||
// Has the user muted him/herself?
|
||||
optional bool self_deaf = 10;
|
||||
// Is the user recording?
|
||||
optional bool recording = 11;
|
||||
// The channel the user is in.
|
||||
optional Channel channel = 12;
|
||||
// How long the user has been connected to the server.
|
||||
optional uint32 online_secs = 13;
|
||||
// How long the user has been idle on the server.
|
||||
optional uint32 idle_secs = 14;
|
||||
// How many bytes per second is the user transmitting to the server.
|
||||
optional uint32 bytes_per_sec = 15;
|
||||
// The user's client version.
|
||||
optional Version version = 16;
|
||||
// The user's plugin context.
|
||||
optional bytes plugin_context = 17;
|
||||
// The user's plugin identity.
|
||||
optional string plugin_identity = 18;
|
||||
// The user's comment.
|
||||
optional string comment = 19;
|
||||
// The user's texture.
|
||||
optional bytes texture = 20;
|
||||
// The user's IP address.
|
||||
optional bytes address = 21;
|
||||
// Is the user in TCP-only mode?
|
||||
optional bool tcp_only = 22;
|
||||
// The user's UDP ping in milliseconds.
|
||||
optional float udp_ping_msecs = 23;
|
||||
// The user's TCP ping in milliseconds.
|
||||
optional float tcp_ping_msecs = 24;
|
||||
|
||||
message Query {
|
||||
// The server whose users will be queried.
|
||||
optional Server server = 1;
|
||||
}
|
||||
|
||||
message List {
|
||||
// The server to which the users are connected.
|
||||
optional Server server = 1;
|
||||
// The users.
|
||||
repeated User users = 2;
|
||||
}
|
||||
|
||||
message Kick {
|
||||
// The server to which the user is connected.
|
||||
optional Server server = 1;
|
||||
// The user to kick.
|
||||
optional User user = 2;
|
||||
// The user who performed the kick.
|
||||
optional User actor = 3;
|
||||
// The reason for why the user is being kicked.
|
||||
optional string reason = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Tree {
|
||||
// The server which the tree represents.
|
||||
optional Server server = 1;
|
||||
// The current channel.
|
||||
optional Channel channel = 2;
|
||||
// Channels below the current channel.
|
||||
repeated Tree children = 3;
|
||||
// The users in the current channel.
|
||||
repeated User users = 4;
|
||||
|
||||
message Query {
|
||||
// The server to query.
|
||||
optional Server server = 1;
|
||||
}
|
||||
}
|
||||
|
||||
message Ban {
|
||||
// The server on which the ban is applied.
|
||||
optional Server server = 1;
|
||||
// The banned IP address.
|
||||
optional bytes address = 2;
|
||||
// The number of leading bits in the address to which the ban applies.
|
||||
optional uint32 bits = 3;
|
||||
// The name of the banned user.
|
||||
optional string name = 4;
|
||||
// The certificate hash of the banned user.
|
||||
optional string hash = 5;
|
||||
// The reason for the ban.
|
||||
optional string reason = 6;
|
||||
// The ban start time (in epoch form).
|
||||
optional int64 start = 7;
|
||||
// The ban duration.
|
||||
optional int64 duration_secs = 8;
|
||||
|
||||
message Query {
|
||||
// The server whose bans to query.
|
||||
optional Server server = 1;
|
||||
}
|
||||
|
||||
message List {
|
||||
// The server for which the bans apply.
|
||||
optional Server server = 1;
|
||||
// The bans.
|
||||
repeated Ban bans = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ACL {
|
||||
enum Permission {
|
||||
None = 0x00;
|
||||
Write = 0x01;
|
||||
Traverse = 0x02;
|
||||
Enter = 0x04;
|
||||
Speak = 0x08;
|
||||
Whisper = 0x100;
|
||||
MuteDeafen = 0x10;
|
||||
Move = 0x20;
|
||||
MakeChannel = 0x40;
|
||||
MakeTemporaryChannel = 0x400;
|
||||
LinkChannel = 0x80;
|
||||
TextMessage = 0x200;
|
||||
|
||||
Kick = 0x10000;
|
||||
Ban = 0x20000;
|
||||
Register = 0x40000;
|
||||
RegisterSelf = 0x80000;
|
||||
}
|
||||
|
||||
message Group {
|
||||
// The ACL group name.
|
||||
optional string name = 1;
|
||||
// Is the group inherited?
|
||||
optional bool inherited = 2;
|
||||
// Does the group inherit members?
|
||||
optional bool inherit = 3;
|
||||
// Can this group be inherited by its children?
|
||||
optional bool inheritable = 4;
|
||||
|
||||
// The users explicitly added by this group.
|
||||
repeated DatabaseUser users_add = 5;
|
||||
// The users explicitly removed by this group.
|
||||
repeated DatabaseUser users_remove = 6;
|
||||
// All of the users who are part of this group.
|
||||
repeated DatabaseUser users = 7;
|
||||
}
|
||||
|
||||
// Does the ACL apply to the current channel?
|
||||
optional bool apply_here = 3;
|
||||
// Does the ACL apply to the current channel's sub-channels?
|
||||
optional bool apply_subs = 4;
|
||||
// Was the ACL inherited?
|
||||
optional bool inherited = 5;
|
||||
|
||||
// The user to whom the ACL applies.
|
||||
optional DatabaseUser user = 6;
|
||||
// The group to whom the ACL applies.
|
||||
optional ACL.Group group = 7;
|
||||
|
||||
// The permissions granted by the ACL (bitmask of ACL.Permission).
|
||||
optional uint32 allow = 8;
|
||||
// The permissions denied by the ACL (bitmask of ACL.Permission).
|
||||
optional uint32 deny = 9;
|
||||
|
||||
message Query {
|
||||
// The server where the user and channel exist.
|
||||
optional Server server = 1;
|
||||
// The user to query.
|
||||
optional User user = 2;
|
||||
// The channel to query.
|
||||
optional Channel channel = 3;
|
||||
}
|
||||
|
||||
message List {
|
||||
// The server on which the ACLs exist.
|
||||
optional Server server = 1;
|
||||
// The channel to which the ACL refers.
|
||||
optional Channel channel = 2;
|
||||
// The ACLs part of the given channel.
|
||||
repeated ACL acls = 3;
|
||||
// The groups part of the given channel.
|
||||
repeated ACL.Group groups = 4;
|
||||
// Should ACLs be inherited from the parent channel.
|
||||
optional bool inherit = 5;
|
||||
}
|
||||
|
||||
message TemporaryGroup {
|
||||
// The server where the temporary group exists.
|
||||
optional Server server = 1;
|
||||
// The channel to which the temporary user group is added.
|
||||
optional Channel channel = 2;
|
||||
// The user who is added to the group.
|
||||
optional User user = 3;
|
||||
// The name of the temporary group.
|
||||
optional string name = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Authenticator {
|
||||
message Request {
|
||||
// An authentication request for a connecting user.
|
||||
message Authenticate {
|
||||
// The user's name.
|
||||
optional string name = 1;
|
||||
// The user's password.
|
||||
optional string password = 2;
|
||||
// The user's certificate chain in DER format.
|
||||
repeated bytes certificates = 3;
|
||||
// The hexadecimal hash of the user's certificate.
|
||||
optional string certificate_hash = 4;
|
||||
// If the user is connecting with a strong certificate.
|
||||
optional bool strong_certificate = 5;
|
||||
}
|
||||
|
||||
// A request for information about a user, given by either the user's ID
|
||||
// or name.
|
||||
message Find {
|
||||
// The user's ID used for lookup.
|
||||
optional uint32 id = 1;
|
||||
// The user's name used for lookup.
|
||||
optional string name = 2;
|
||||
}
|
||||
|
||||
// A query of all the registered users, optionally filtered by the given
|
||||
// filter string.
|
||||
message Query {
|
||||
// A user name filter (% is often used as a wildcard)
|
||||
optional string filter = 1;
|
||||
}
|
||||
|
||||
// A request for a new user registration.
|
||||
message Register {
|
||||
// The database user to register.
|
||||
optional DatabaseUser user = 1;
|
||||
}
|
||||
|
||||
// A request for deregistering a registered user.
|
||||
message Deregister {
|
||||
// The database user to deregister.
|
||||
optional DatabaseUser user = 1;
|
||||
}
|
||||
|
||||
// A request to update a registered user's information. The information
|
||||
// provided should be merged with existing data.
|
||||
message Update {
|
||||
// The database user to update.
|
||||
optional DatabaseUser user = 1;
|
||||
}
|
||||
|
||||
optional Authenticate authenticate = 1;
|
||||
optional Find find = 2;
|
||||
optional Query query = 3;
|
||||
optional Register register = 4;
|
||||
optional Deregister deregister = 5;
|
||||
optional Update update = 6;
|
||||
}
|
||||
|
||||
message Response {
|
||||
// The initialization for the authenticator stream. This message must be
|
||||
// sent before authentication requests will start streaming.
|
||||
message Initialize {
|
||||
optional Server server = 1;
|
||||
}
|
||||
|
||||
enum Status {
|
||||
// The request should fallthrough to murmur's default action.
|
||||
Fallthrough = 0;
|
||||
// The request was successful.
|
||||
Success = 1;
|
||||
// The request failed; there was some error.
|
||||
Failure = 2;
|
||||
// A temporary failure prevented the request from succeeding (e.g. a
|
||||
// database was unavailable).
|
||||