16 Commits

Author SHA1 Message Date
brent saner
64a7648fbc v1.16.2
...why didn't my docs regen hook run?
2026-01-28 09:20:34 -05:00
brent saner
9cce861b2e v1.16.1
FIXED:
* *Some* documentation weirdness on pkg.go dev rendering. It still uses
  the Markdown render by default, and it seems if you use anchor links
  in a bulletpoint list, pandoc just says "lol screw you"...

ADDED:
* tplx/sprigx tpl function `osHostname`
2026-01-28 09:16:18 -05:00
brent saner
927ad08057 v1.16.0
ADDED:
* tplx/sprigx
2026-01-24 13:41:54 -05:00
brent saner
2edbc9306d v1.15.4
FIXED:
* Docs error
2026-01-07 19:15:21 -05:00
brent saner
bb71be187f v1.15.3
FIXED:
* Properly parse into map, add *All* variants
2026-01-07 19:02:52 -05:00
brent saner
834395c050 v1.15.2
ADDED:
* Better docs for remap
* Added returner convenience funcs for remap

FIXED:
* Proper resliced remap.ReMap.MapString
2026-01-06 02:54:38 -05:00
brent saner
ef56898d6b v1.15.1
ADDED:
* timex, for some floaty-UNIX-y things
2025-12-23 18:57:28 -05:00
brent saner
006cf39fa1 v1.15.0
ADDED:
* tplx, for one-shotting/shortcutting templating
2025-12-23 17:26:50 -05:00
brent saner
145c32268e v1.14.0
ADDED:
* iox package
* mapsx package
* netx/inetcksum package
2025-12-18 04:47:31 -05:00
brent saner
6ddfcdb416 v1.13.0
ADDED:
* stringsx functions
2025-11-30 16:53:56 -05:00
brent saner
79f10b7611 v1.12.1
FIXED:
* Aaaannnddd need to make the Windows multilogger AddDefaultLogger
  use the right/matching parameters as well.
2025-11-22 17:19:41 -05:00
brent saner
01adbfc605 v1.12.0
FIXED:
* logging package on Windows had a non-conformant GetLogger().
  While this fix technically breaks API, this was a horribly broken
  thing so I'm including it as a minor bump instead of major and
  thus breaking SemVer. Too bad, so sad, deal with it; Go modules
  have versioning for a reason.
  The previous logging.GetLogger() behavior on Windows has been moved
  to logging.GetLoggerWindows().
2025-11-22 15:53:38 -05:00
brent saner
b1d8ea34a6 v1.11.0
ADDED:
* `stringsx` package
** `stringsx.Indent()`, to indent/prefix multiline strings
** `stringsx.Redact()`, to mask strings
** `stringsx.TrimLines()`, like strings.TrimSpace() but multiline
** `stringsx.TrimSpaceLeft()`, like strings.TrimSpace() but only to the
    left of a string.
** `stringsx.TrimSpaceRight()`, like strings.TrimSpace() but only to the
    right of a string.
2025-11-14 01:02:59 -05:00
brent saner
e101758187 v1.10.3
ADDED:
* netx now has a ton of netmask conversion functions for IPv4 netmasks.
  (IPv6 doesn't really *have* netmasks, so it was intentionally
  excluded).
2025-10-13 15:56:07 -04:00
brent saner
3c49a5b70a v1.10.2
FIXED:
* Windows logging needs to import bitmnask
2025-09-09 08:50:47 -04:00
brent saner
965657d1b2 v1.10.1
FIXED:
* Missed a Reset on the inetcksum.InetChecksumSimple.
2025-09-05 18:55:01 -04:00
65 changed files with 7249 additions and 143 deletions

31
.githooks/pre-commit/01-docgen Executable file
View File

@@ -0,0 +1,31 @@
#!/bin/bash
orig="${PWD}"
if ! command -v asciidoctor &> /dev/null;
then
exit 0
fi
set -e
for f in $(find . -type f -iname "README.adoc"); do
filename=$(basename -- "${f}")
docsdir=$(dirname -- "${f}")
nosuffix="${filename%.*}"
pfx="${docsdir}/${nosuffix}"
newf="${pfx}.html"
asciidoctor -a ROOTDIR="${orig}/" -o "${newf}" "${f}"
echo "Generated ${newf} from ${f}"
git add "${newf}"
if command -v pandoc &> /dev/null;
then
newf="${pfx}.md"
asciidoctor -a ROOTDIR="${orig}/" -b docbook -o - "${f}" | pandoc -f docbook -t markdown_strict -o "${newf}"
echo "Generated ${newf} from ${f}"
git add "${newf}"
fi
cd ${orig}
done
echo "Regenerated docs"

22
go.mod
View File

@@ -1,15 +1,27 @@
module r00t2.io/goutils
go 1.24.5
go 1.25
require (
github.com/coreos/go-systemd/v22 v22.5.0
github.com/Masterminds/sprig/v3 v3.3.0
github.com/coreos/go-systemd/v22 v22.6.0
github.com/davecgh/go-spew v1.1.1
github.com/google/uuid v1.6.0
golang.org/x/sys v0.34.0
r00t2.io/sysutils v1.14.0
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
golang.org/x/sys v0.39.0
r00t2.io/sysutils v1.15.1
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/djherbis/times v1.6.0 // indirect
golang.org/x/sync v0.16.0 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/sync v0.19.0 // indirect
)

59
go.sum
View File

@@ -1,13 +1,56 @@
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
r00t2.io/sysutils v1.14.0/go.mod h1:ZJ7gZxFVQ7QIokQ5fPZr7wl0XO5Iu+LqtE8j3ciRINw=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
r00t2.io/sysutils v1.15.1 h1:0EVZZAxTFqQN6jjfjqUKkXye0LMshUA5MO7l3Wd6wH8=
r00t2.io/sysutils v1.15.1/go.mod h1:T0iOnaZaSG5NE1hbXTqojRZc0ia/u8TB73lV7zhMz58=

View File

@@ -1,4 +1,7 @@
/*
Package iox includes extensions to the stdlib `io` module.
Not everything in here is considered fully stabilized yet,
but it should be usable.
*/
package iox

View File

@@ -6,4 +6,12 @@ import (
var (
ErrBufTooSmall error = errors.New("buffer too small; buffer size must be > 0")
ErrChunkTooBig error = errors.New("chunk too big for method")
ErrChunkTooSmall error = errors.New("chunk too small for buffer")
ErrInvalidChunkSize error = errors.New("an invalid chunk size was passed")
ErrNilCtx error = errors.New("a nil context was passed")
ErrNilReader error = errors.New("a nil reader was passed")
ErrNilWriter error = errors.New("a nil writer was passed")
ErrShortRead error = errors.New("a read was cut short with no EOF")
ErrShortWrite error = errors.New("a write was cut short with no error")
)

View File

@@ -1,20 +1,21 @@
package iox
import (
`context`
`io`
)
/*
CopyBufN is a mix between io.CopyN and io.CopyBuffer.
CopyBufN is a mix between [io.CopyN] and [io.CopyBuffer].
Despite what the docs may suggest, io.CopyN does NOT *read* n bytes from src AND write n bytes to dst.
Despite what the docs may suggest, [io.CopyN] does NOT *read* n bytes from src AND write n bytes to dst.
Instead, it always reads 32 KiB from src, and writes n bytes to dst.
There are, of course, cases where this is deadfully undesired.
There are cases where this is dreadfully undesired.
One can, of course, use io.CopyBuffer, but this is a bit annoying since you then have to provide a buffer yourself.
One can, of course, use [io.CopyBuffer], but this is a bit annoying since you then have to provide a buffer yourself.
This convenience-wraps io.CopyBuffer to have a similar signature to io.CopyN but properly uses n for both reading and writing.
This convenience-wraps [io.CopyBuffer] to have a similar signature to [io.CopyN] but properly uses n for both reading and writing.
*/
func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
@@ -32,10 +33,215 @@ func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
return
}
// CopyBufWith allows for specifying a buffer allocator function, otherwise acts as CopyBufN.
func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
// CopyCtxBufN copies from `src` to `dst`, `n` bytes at a time, interruptible by `ctx`.
func CopyCtxBufN(ctx context.Context, dst io.Writer, src io.Reader, n int64) (written int64, err error) {
written, err = io.CopyBuffer(dst, src, bufFunc())
var nr int
var nw int
var end bool
var buf []byte
if ctx == nil {
err = ErrNilCtx
return
}
if n <= 0 {
err = ErrBufTooSmall
return
}
endCopy:
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
buf = make([]byte, n)
nr, err = src.Read(buf)
if err == io.EOF {
err = nil
end = true
} else if err != nil {
return
}
buf = buf[:nr]
if nw, err = dst.Write(buf); err != nil {
written += int64(nw)
return
}
written += int64(nw)
if len(buf) != nw {
err = io.ErrShortWrite
return
}
if end {
break endCopy
}
}
}
return
}
/*
CopyBufWith allows for specifying a buffer allocator function, otherwise acts as [CopyBufN].
bufFunc *MUST NOT* return a nil or len == 0 buffer. [ErrBufTooSmall] will be returned if it does.
This uses a fixed buffer size from a single call to `bufFunc`.
If you need something with dynamic buffer sizing according to some state, use [CopyBufWithDynamic] instead.
(Note that CopyBufWithDynamic is generally a little slower, but it should only be noticeable on very large amounts of data.)
*/
func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
var buf []byte = bufFunc()
if buf == nil || len(buf) == 0 {
err = ErrBufTooSmall
return
}
written, err = io.CopyBuffer(dst, src, buf)
return
}
/*
CopyBufWithDynamic is like [CopyBufWith] except it will call bufFunc after each previous buffer is written.
That is to say (using a particularly contrived example):
import time
func dynBuf() (b []byte) {
var t time.Time = time.Now()
b = make([]byte, t.Seconds())
return
}
Then:
CopyBufWithDynamic(w, r, dynBuf)
will use a buffer sized to the seconds of the time it reads in/writes out the next buffer, whereas with [CopyBufWith]:
CopyBufWith(w, r, dynBuf)
would use a *fixed* buffer size of whatever the seconds was equal to at the time of the *first call* to dynBuf.
`src` MUST return an [io.EOF] when its end is reached, but (as per e.g. [io.CopyBuffer]) the io.EOF error will not
be returned from CopyBufWithDynamic. (Any/all other errors encountered will be returned, however, and copying will
immediately cease.)
*/
func CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
var nr int
var nw int
var end bool
var buf []byte
for {
buf = bufFunc()
if buf == nil || len(buf) == 0 {
err = ErrBufTooSmall
return
}
nr, err = src.Read(buf)
if err == io.EOF {
err = nil
end = true
} else if err != nil {
return
}
buf = buf[:nr]
if nw, err = dst.Write(buf); err != nil {
written += int64(nw)
return
}
written += int64(nw)
if len(buf) != nw {
err = ErrShortWrite
return
}
if end {
break
}
}
return
}
// NewChunker returns a [ChunkLocker] ready to use.
func NewChunker(chunkSize uint) (c *ChunkLocker, err error) {
c = &ChunkLocker{}
err = c.SetChunkLen(chunkSize)
return
}
// NewCtxIO returns a [CtxIO].
func NewCtxIO(ctx context.Context, r io.Reader, w io.Writer, chunkSize uint) (c *CtxIO, err error) {
if r == nil {
err = ErrNilReader
return
}
if w == nil {
err = ErrNilWriter
return
}
if chunkSize == 0 {
err = ErrInvalidChunkSize
return
}
if ctx == nil {
err = ErrNilCtx
return
}
c = &CtxIO{
r: r,
w: w,
l: ChunkLocker{
chunkLen: chunkSize,
},
ctx: ctx,
}
return
}
/*
NewXIO returns a nil [XIO].
A weird "feature" of Golang is that a nil XIO is perfectly fine to use;
it's completely stateless and only has pointer receivers that only work with passed in
values so `new(XIO)` is completely unnecessary (as is NewXCopier).
In other words, this works fine:
var xc *iox.XIO
if n, err = xc.Copy(w, r); err != nil {
return
}
This function is just to maintain cleaner-looking code if you should so need it,
or want an XIO without declaring one:
if n, err = iox.NewXCopier().Copy(w, r); err != nil {
return
}
*/
func NewXIO() (x *XIO) {
// No-op lel
return
}

28
iox/funcs_chunklocker.go Normal file
View File

@@ -0,0 +1,28 @@
package iox
// GetChunkLen returns the current chunk size/length in bytes.
func (c *ChunkLocker) GetChunkLen() (size uint) {
c.lock.RLock()
defer c.lock.RUnlock()
size = c.chunkLen
return
}
// SetChunkLen sets the current chunk size/length in bytes.
func (c *ChunkLocker) SetChunkLen(size uint) (err error) {
if size == 0 {
err = ErrInvalidChunkSize
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.chunkLen = size
return
}

173
iox/funcs_ctxio.go Normal file
View File

@@ -0,0 +1,173 @@
package iox
import (
`bytes`
`context`
`io`
`math`
)
func (c *CtxIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) {
if c.l.chunkLen > math.MaxInt64 {
err = ErrChunkTooBig
}
return CopyCtxBufN(c.ctx, dst, src, int64(c.l.chunkLen))
}
func (c *CtxIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
if n <= 0 {
err = ErrBufTooSmall
return
}
return CopyCtxBufN(c.ctx, dst, src, n)
}
func (c *CtxIO) GetChunkLen() (size uint) {
return c.l.GetChunkLen()
}
func (c *CtxIO) Read(p []byte) (n int, err error) {
var nr int64
if nr, err = c.ReadWithContext(c.ctx, p); err != nil {
if nr > math.MaxInt {
n = math.MaxInt
} else {
n = int(nr)
}
return
}
if nr > math.MaxInt {
n = math.MaxInt
} else {
n = int(nr)
}
return
}
func (c *CtxIO) ReadWithContext(ctx context.Context, p []byte) (n int64, err error) {
var nr int
var off int
var buf []byte
if p == nil || len(p) == 0 {
return
}
if c.buf.Len() == 0 {
err = io.EOF
return
}
if c.l.chunkLen > uint(len(p)) {
// Would normally be a single chunk, so one-shot it.
nr, err = c.buf.Read(p)
n = int64(nr)
return
}
// Chunk over it.
endRead:
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
/*
off(set) is the index of the *next position* to write to.
Therefore the last offset == len(p),
therefore:
* if off == len(p), "done" (return no error, do *not* read from buf)
* if off + c.l.chunkLen > len(p), buf should be len(p) - off instead
*/
if off == len(p) {
break endRead
}
if uint(off)+c.l.chunkLen > uint(len(p)) {
buf = make([]byte, len(p)-off)
} else {
buf = make([]byte, c.l.chunkLen)
}
nr, err = c.buf.Read(buf)
n += int64(nr)
if nr > 0 {
off += nr
copy(p[off:], buf[:nr])
}
if err == io.EOF {
break endRead
} else if err != nil {
return
}
}
}
return
}
func (c *CtxIO) SetChunkLen(size uint) (err error) {
return c.l.SetChunkLen(size)
}
func (c *CtxIO) SetContext(ctx context.Context) (err error) {
if ctx == nil {
err = ErrNilCtx
return
}
c.ctx = ctx
return
}
func (c *CtxIO) Write(p []byte) (n int, err error) {
var nw int64
if c.l.chunkLen > math.MaxInt64 {
err = ErrChunkTooBig
return
}
if nw, err = c.WriteNWithContext(c.ctx, p, int64(c.l.chunkLen)); err != nil {
if nw > math.MaxInt {
n = math.MaxInt
} else {
n = int(nw)
}
return
}
if nw > math.MaxInt {
n = math.MaxInt
} else {
n = int(nw)
}
return
}
func (c *CtxIO) WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error) {
return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), n)
}
func (c *CtxIO) WriteRune(r rune) (n int, err error) {
// We don't even bother listening for the ctx.Done because it's a single rune.
n, err = c.buf.WriteRune(r)
return
}
func (c *CtxIO) WriteWithContext(ctx context.Context, p []byte) (n int64, err error) {
if c.l.chunkLen > math.MaxInt64 {
err = ErrChunkTooBig
return
}
return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), int64(c.l.chunkLen))
}

40
iox/funcs_xio.go Normal file
View File

@@ -0,0 +1,40 @@
package iox
import (
`io`
)
// Copy copies [io.Reader] `src` to [io.Writer] `dst`. It implements [Copier].
func (x *XIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) {
return io.Copy(dst, src)
}
// CopyBuffer copies [io.Reader] `src` to [io.Writer] `dst` using buffer `buf`. It implements [CopyBufferer].
func (x *XIO) CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
return io.CopyBuffer(dst, src, buf)
}
// CopyBufWith copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc`. It implements [SizedCopyBufferInvoker].
func (x *XIO) CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
return CopyBufWith(dst, src, bufFunc)
}
// CopyBufWithDynamic copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc` for each chunk. It implements [DynamicSizedCopyBufferInvoker].
func (x *XIO) CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
return CopyBufWithDynamic(dst, src, bufFunc)
}
/*
CopyBufN reads buffered bytes from [io.Reader] `src` and copies to [io.Writer] `dst`
using the synchronous buffer size `n`.
It implements [SizedCopyBufferer].
*/
func (x *XIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
return CopyBufN(dst, src, n)
}
// CopyN copies from [io.Reader] `src` to [io.Writer] `w`, `n` bytes at a time. It implements [SizedCopier].
func (x *XIO) CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
return io.CopyN(dst, src, n)
}

View File

@@ -1,8 +1,209 @@
package iox
import (
`bytes`
`context`
`io`
`sync`
)
type (
// RuneWriter matches the behavior of *(bytes.Buffer).WriteRune and *(bufio.Writer).WriteRune
/*
RuneWriter matches the behavior of [bytes.Buffer.WriteRune] and [bufio.Writer.WriteRune].
(Note that this package does not have a "RuneReader"; see [io.RuneReader] instead.)
*/
RuneWriter interface {
WriteRune(r rune) (n int, err error)
}
// Copier matches the signature/behavior of [io.Copy]. Implemented by [XIO].
Copier interface {
Copy(dst io.Writer, src io.Reader) (written int64, err error)
}
// CopyBufferer matches the signature/behavior of [io.CopyBuffer]. Implemented by [XIO].
CopyBufferer interface {
CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error)
}
// SizedCopier matches the signature/behavior of [io.CopyN]. Implemented by [XIO].
SizedCopier interface {
CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
}
// SizedCopyBufferer matches the signature/behavior of [CopyBufN]. Implemented by [XIO].
SizedCopyBufferer interface {
CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
}
// SizedCopyBufferInvoker matches the signature/behavior of [CopyBufWith]. Implemented by [XIO].
SizedCopyBufferInvoker interface {
CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error)
}
// DynamicSizedCopyBufferInvoker matches the signature/behavior of [CopyBufWithDynamic]. Implemented by [XIO].
DynamicSizedCopyBufferInvoker interface {
CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error)
}
/*
Chunker is used by both [ContextReader] and [ContextWriter] to set/get the current chunk size.
Chunking is inherently required to be specified in order to interrupt reads/writes/copies with a [context.Context].
Implementations *must* use a [sync.RWMutex] to get (RLock) and set (Lock) the chunk size.
The chunk size *must not* be directly accessible to maintain concurrency safety assumptions.
*/
Chunker interface {
// GetChunkLen returns the current chunk size/length in bytes.
GetChunkLen() (size uint)
// SetChunkLen sets the current chunk size/length in bytes.
SetChunkLen(size uint) (err error)
}
/*
ChunkReader implements a chunking reader.
Third-party implementations *must* respect the chunk size locking (see [Chunker]).
The Read method should read in chunks of the internal chunk size.
*/
ChunkReader interface {
io.Reader
Chunker
}
/*
ChunkWriter implements a chunking writer.
Third-party implementations *must* respect the chunk size locking (see [Chunker]).
The Write method should write out in chunks of the internal chunk size.
*/
ChunkWriter interface {
io.Writer
Chunker
}
// ChunkReadWriter implements a chunking reader/writer.
ChunkReadWriter interface {
ChunkReader
ChunkWriter
}
/*
ContextSetter allows one to set an internal context.
A nil context should return an error.
*/
ContextSetter interface {
SetContext(context context.Context) (err error)
}
/*
ContextCopier is defined to allow for consumer-provided types. See [CtxIO] for a package-provided type.
The Copy method should use an internal context and chunk size
(and thus wrap [CopyCtxBufN] internally on an external call to Copy, etc.).
*/
ContextCopier interface {
Copier
Chunker
ContextSetter
SizedCopyBufferer
}
/*
ContextReader is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type.
The Read method should use an internal context and chunk size.
The ReadWithContext method should use an internal chunk size.
*/
ContextReader interface {
ChunkReader
ContextSetter
ReadWithContext(ctx context.Context, p []byte) (n int64, err error)
}
/*
ContextWriter is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type.
The Write method should use an internal context.
The WriteWithContext should use an internal chunk size.
*/
ContextWriter interface {
ChunkWriter
ContextSetter
WriteWithContext(ctx context.Context, p []byte) (n int64, err error)
WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error)
}
/*
ContextReadWriter is primarily here to allow for consumer-provided types.
See [CtxIO] for a package-provided type.
*/
ContextReadWriter interface {
ContextReader
ContextWriter
}
)
type (
// ChunkLocker implements [Chunker].
ChunkLocker struct {
lock sync.RWMutex
chunkLen uint
}
/*
CtxIO is a type used to demonstrate "stateful" I/O introduced by this package.
It implements:
* [Copier]
* [Chunker]
* [RuneWriter]
* [ChunkReader]
* [ChunkWriter]
* [ContextCopier]
* [ContextSetter]
* [ContextReader]
* [ContextWriter]
* [ChunkReadWriter]
* [ContextReadWriter]
* [SizedCopyBufferer]
Unlike [XIO], it must be non-nil (see [NewCtxIO]) since it maintains state
(though technically, one does not need to call [NewCtxIO] if they call
[CtxIO.SetChunkLen] and [CtxIO.SetContext] before any other methods).
[CtxIO.Read] and other Read methods writes to an internal buffer,
and [CtxIO.Write] and other Write methods writes out from it.
*/
CtxIO struct {
r io.Reader
w io.Writer
l ChunkLocker
buf bytes.Buffer
ctx context.Context
}
/*
XIO is a type used to demonstrate "stateless" I/O introduced by this package.
It implements:
* [Copier]
* [CopyBufferer]
* [SizedCopier]
* [SizedCopyBufferer]
* [SizedCopyBufferInvoker]
* [DynamicSizedCopyBufferInvoker]
Unlike [CtxIO], the zero-value is ready to use since it holds no state
or configuration whatsoever.
A nil XIO is perfectly usable but if you want something more idiomatic,
see [NewXIO].
*/
XIO struct{}
)

View File

@@ -1,3 +1,5 @@
- logging probably needs mutexes
- macOS support beyond the legacy NIX stuff. it apparently uses something called "ULS", "Unified Logging System".
-- https://developer.apple.com/documentation/os/logging
-- https://developer.apple.com/documentation/os/generating-log-messages-from-your-code

View File

@@ -3,6 +3,8 @@ package logging
import (
`os`
`path/filepath`
`r00t2.io/goutils/bitmask`
)
// Flags for logger configuration. These are used internally.

View File

@@ -21,7 +21,7 @@ import (
Only the first logPaths entry that "works" will be used, later entries will be ignored.
Currently this will almost always return a WinLogger.
*/
func (m *MultiLogger) AddDefaultLogger(identifier string, eventIDs *WinEventID, logFlags int, logPaths ...string) (err error) {
func (m *MultiLogger) AddDefaultLogger(identifier string, logFlags int, logPaths ...string) (err error) {
var l Logger
var exists bool
@@ -36,9 +36,9 @@ func (m *MultiLogger) AddDefaultLogger(identifier string, eventIDs *WinEventID,
}
if logPaths != nil {
l, err = GetLogger(m.EnableDebug, m.Prefix, eventIDs, logFlags, logPaths...)
l, err = GetLogger(m.EnableDebug, m.Prefix, logFlags, logPaths...)
} else {
l, err = GetLogger(m.EnableDebug, m.Prefix, eventIDs, logFlags)
l, err = GetLogger(m.EnableDebug, m.Prefix, logFlags)
}
if err != nil {
return

View File

@@ -10,32 +10,63 @@ import (
)
/*
GetLogger returns an instance of Logger that best suits your system's capabilities. Note that this is a VERY generalized interface to the Windows Event Log.
GetLogger returns an instance of Logger that best suits your system's capabilities.
Note that this is a VERY generalized interface to the Windows Event Log to conform with multiplatform compat.
You'd have a little more flexibility with [GetLoggerWindows] (this function wraps that one).
If you need more custom behavior than that, I recommend using [golang.org/x/sys/windows/svc/eventlog] directly
(or using another logging module).
If `enableDebug` is true, debug messages (which according to your program may or may not contain sensitive data) are rendered and written (otherwise they are ignored).
The `prefix` correlates to the `source` parameter in [GetLoggerWindows], and this function inherently uses [DefaultEventID],
but otherwise it remains the same as [GetLoggerWindows] - refer to it for documentation on the other parameters.
If you call [GetLogger], you will only get a single ("best") logger your system supports.
If you want to log to multiple [Logger] destinations at once (or want to log to an explicit [Logger] type),
use [GetMultiLogger].
*/
func GetLogger(enableDebug bool, prefix string, logConfigFlags int, logPaths ...string) (logger Logger, err error) {
if logger, err = GetLoggerWindows(enableDebug, prefix, DefaultEventID, logConfigFlags, logPaths...); err != nil {
return
}
return
}
/*
GetLoggerWindows returns an instance of Logger that best suits your system's capabilities.
This is a slightly less (but still quite) generalized interface to the Windows Event Log than [GetLogger].
If you require more robust logging capabilities (e.g. custom event IDs per uniquely identifiable event),
you will want to set up your own logger (golang.org/x/sys/windows/svc/eventlog).
you will want to set up your own logger via [golang.org/x/sys/windows/svc/eventlog].
If enableDebug is true, debug messages (which according to your program may or may not contain sensitive data) are rendered and written (otherwise they are ignored).
If `enableDebug` is true, debug messages (which according to your program may or may not contain sensitive data)
are rendered and written (otherwise they are ignored).
A blank source will return an error as it's used as the source name. Other functions, struct fields, etc. will refer to this as the "prefix".
A blank `source` will return an error as it's used as the source name.
Throughout the rest of this documentation you will see this referred to as the `prefix` to remain platform-agnostic.
A pointer to a WinEventID struct may be specified for eventIDs to map extended logging levels (as Windows only supports three levels natively).
A pointer to a [WinEventID] struct may be specified for `eventIDs` to map extended logging levels
(as Windows only supports three levels natively).
If it is nil, a default one (DefaultEventID) will be used.
logConfigFlags is the corresponding flag(s) OR'd for StdLogger.LogFlags / FileLogger.StdLogger.LogFlags if either is selected. See StdLogger.LogFlags and
https://pkg.go.dev/log#pkg-constants for details.
`logConfigFlags` is the corresponding flag(s) OR'd for [StdLogger.LogFlags] (and/or the [StdLogger.LogFlags] for [FileLogger])
if either is selected. See [StdLogger.LogFlags] and [stdlib log's constants] for details.
logPaths is an (optional) list of strings to use as paths to test for writing. If the file can be created/written to,
it will be used (assuming you have no higher-level loggers available).
`logPaths` is an (optional) list of strings to use as paths to test for writing.
If the file can be created/written to, it will be used (assuming you have no higher-level loggers available).
Only the first logPaths entry that "works" will be used, later entries will be ignored.
Currently this will almost always return a WinLogger.
Only the first `logPaths` entry that "works" will be used, later entries will be ignored.
Currently this will almost always return a [WinLogger].
If you call GetLogger, you will only get a single ("best") logger your system supports.
If you want to log to multiple Logger destinations at once (or want to log to an explicit Logger type),
use GetMultiLogger.
If you call [GetLoggerWindows], you will only get a single ("best") logger your system supports.
If you want to log to multiple [Logger] destinations at once (or want to log to an explicit [Logger] type),
use [GetMultiLogger].
[stdlib log's constants]: https://pkg.go.dev/log#pkg-constants
*/
func GetLogger(enableDebug bool, source string, eventIDs *WinEventID, logConfigFlags int, logPaths ...string) (logger Logger, err error) {
func GetLoggerWindows(enableDebug bool, source string, eventIDs *WinEventID, logConfigFlags int, logPaths ...string) (logger Logger, err error) {
var logPath string
var logFlags bitmask.MaskBit

View File

@@ -124,7 +124,7 @@ func TestDefaultLogger(t *testing.T) {
t.Fatalf("error when closing handler for temporary log file '%v': %v", tempfile.Name(), err.Error())
}
if l, err = GetLogger(true, TestLogPrefix, DefaultEventID, logFlags, tempfilePath); err != nil {
if l, err = GetLoggerWindows(true, TestLogPrefix, DefaultEventID, logFlags, tempfilePath); err != nil {
t.Fatalf("error when spawning default Windows logger via GetLogger: %v", err.Error())
}

View File

@@ -35,7 +35,7 @@ func TestMultiLogger(t *testing.T) {
t.Fatalf("error when adding FileLogger to MultiLogger: %v", err.Error())
}
if err = l.AddDefaultLogger("DefaultLogger", DefaultEventID, logFlags, tempfilePath); err != nil {
if err = l.AddDefaultLogger("DefaultLogger", logFlags, tempfilePath); err != nil {
t.Fatalf("error when adding default logger to MultiLogger: %v", err.Error())
}

4
mapsx/doc.go Normal file
View File

@@ -0,0 +1,4 @@
/*
Package mapsx includes functions that probably should have been in [maps] but aren't.
*/
package mapsx

9
mapsx/errs.go Normal file
View File

@@ -0,0 +1,9 @@
package mapsx
import (
`errors`
)
var (
ErrNotFound = errors.New("key not found")
)

43
mapsx/funcs.go Normal file
View File

@@ -0,0 +1,43 @@
package mapsx
/*
Get mimics Python's [dict.get()] behavior, returning value `v` if key `k`
is not found in map `m`.
See also [GetOk], [Must].
[dict.get()]: https://docs.python.org/3/library/stdtypes.html#dict.get
*/
func Get[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V) {
val, _ = GetOk(m, k, v)
return
}
// GetOk is like [Get] but also explicitly indicates whether `k` was found or not. See also [Must].
func GetOk[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V, found bool) {
if val, found = m[k]; !found {
val = v
}
return
}
/*
Must, unlike [Get] or [GetOk], requires that `k` be in map `m`.
A panic with error [ErrNotFound] will be raised if `k` is not present.
Otherwise the found value will be returned.
*/
func Must[Map ~map[K]V, K comparable, V any](m Map, k K) (val V) {
var ok bool
if val, ok = m[k]; !ok {
panic(ErrNotFound)
}
return
}

21
multierr/TODO Normal file
View File

@@ -0,0 +1,21 @@
- add unwrapping
https://go.dev/blog/go1.13-errors#the-unwrap-method
- add As method, takes a ptr to a slice of []error to return the first matching error type (errors.As) for each?
- add AsAll [][]error ptr param for multiple errors per type?
- add Map, returns map[string][]error, where key is k:
var sb strings.Builder
t = reflect.TypeOf(err)
if t.PkgPath() != "" {
sb.WriteString(t.PkgPath())
} else {
sb.WriteString("<UNKNOWN>")
}
sb.WriteString(".")
if t.Name() != "" {
sb.WriteString(t.Name())
} else {
sb.WriteString("<UNKNOWN>")
}
k = sb.String()
- support generics for similar to above?
- this might allow for "error filtering"

13
netx/consts_nix.go Normal file
View File

@@ -0,0 +1,13 @@
//go:build !windows
package netx
import (
`golang.org/x/sys/unix`
)
const (
AFUnspec uint16 = unix.AF_UNSPEC
AFInet uint16 = unix.AF_INET
AFInet6 uint16 = unix.AF_INET6
)

13
netx/consts_windows.go Normal file
View File

@@ -0,0 +1,13 @@
//go:build windows
package netx
import (
`golang.org/x/sys/windows`
)
const (
AFUnspec uint16 = windows.AF_UNSPEC
AFInet uint16 = windows.AF_INET
AFInet6 uint16 = windows.AF_INET6
)

10
netx/errors.go Normal file
View File

@@ -0,0 +1,10 @@
package netx
import (
`errors`
)
var (
ErrBadMask4Str error = errors.New("netx: unknown/bad IPv4 netmask dotted quad")
ErrBadNetFam error = errors.New("netx: unknown/bad IP network family")
)

410
netx/funcs.go Normal file
View File

@@ -0,0 +1,410 @@
package netx
import (
`math/bits`
`net`
`net/netip`
`strconv`
`strings`
`go4.org/netipx`
)
/*
AddrRfc returns an RFC-friendly string from an IP address ([net/netip.Addr]).
If addr is an IPv4 address, it will simply be the string representation (e.g. "203.0.113.1").
If addr is an IPv6 address, it will be enclosed in brackets (e.g. "[2001:db8::1]").
If the version can't be determined, rfcStr will be an empty string.
*/
func AddrRfc(addr netip.Addr) (rfcStr string) {
if addr.Is4() {
rfcStr = addr.String()
} else if addr.Is6() {
rfcStr = "[" + addr.String() + "]"
}
return
}
/*
Cidr4ToIPMask takes an IPv4 CIDR/bit size/prefix length and returns the [net.IPMask].
It's (essentially) the inverse of [net.IPMask.Size].
See also:
* [Cidr4ToMask]
* [Cidr4ToStr]
Inverse of [IPMask4ToCidr].
*/
func Cidr4ToIPMask(cidr uint8) (ipMask net.IPMask, err error) {
if cidr > 32 {
err = ErrBadNetFam
return
}
ipMask = net.CIDRMask(int(cidr), 32)
return
}
/*
Cidr4ToMask takes an IPv4 CIDR/bit size/prefix length and returns the netmask *in bitmask form*.
See also:
* [Cidr4ToIPMask]
* [Cidr4ToStr]
Inverse of [Mask4ToCidr].
*/
func Cidr4ToMask(cidr uint8) (mask uint32, err error) {
if cidr > 32 {
err = ErrBadNetFam
return
}
// COULD do (1 << 32) - (1 << (32 - ip.Bits())) instead but in EXTREME edge cases that could cause an overflow.
// We're basically converting the CIDR size ("number of bits"/"number of ones") to an integer mask ("number AS bits")
mask = uint32(0xffffffff) << uint32(32-cidr)
return
}
/*
Cidr4ToStr is a convenience wrapper around [IPMask4ToStr]([Cidr4ToMask](cidr)).
See also:
* [Cidr4ToIPMask]
* [Cidr4ToMask]
Inverse of [Mask4StrToCidr].
*/
func Cidr4ToStr(cidr uint8) (maskStr string, err error) {
var ipMask net.IPMask
if ipMask, err = Cidr4ToIPMask(cidr); err != nil {
return
}
if maskStr, err = IPMask4ToStr(ipMask); err != nil {
return
}
return
}
/*
GetAddrFamily returns the network family of a [net/netip.Addr].
See also [GetIpFamily].
If addr is not a "valid" IP address or the version can't be determined, family will be AFUnspec (usually 0x00/0).
*/
func GetAddrFamily(addr netip.Addr) (family uint16) {
family = AFUnspec
if !addr.IsValid() {
return
}
if addr.Is4() {
family = AFInet
} else if addr.Is6() {
family = AFInet6
} else {
return
}
return
}
/*
GetIpFamily returns the network family of a [net.IP].
See also [GetAddrFamily].
If ip is not a "valid" IP address or the version can't be determined,
family will be [golang.org/x/sys/unix.AF_UNSPEC] or [golang.org/x/sys/windows.AF_UNSPEC] depending on platform (usually 0x00/0).
*/
func GetIpFamily(ip net.IP) (family uint16) {
var ok bool
var addr netip.Addr
if addr, ok = netipx.FromStdIP(ip); !ok {
return
}
family = GetAddrFamily(addr)
return
}
/*
IpRfc returns an RFC-friendly string from an IP address ([net.IP]).
If ip is an IPv4 address, it will simmply be the string representation (e.g. "203.0.113.1").
If ip is an IPv6 address, it will be enclosed in brackets (e.g. "[2001:db8::1]").
If the version can't be determined, rfcStr will be an empty string.
*/
func IpRfc(ip net.IP) (rfcStr string) {
if ip.To4() != nil {
rfcStr = ip.To4().String()
} else if ip.To16() != nil {
rfcStr = "[" + ip.To16().String() + "]"
}
return
}
/*
IPMask4ToCidr returns a CIDR prefix size/bit size/bit length from a [net.IPMask].
See also:
* [IPMask4ToMask]
* [IPMask4ToStr]
Inverse of [Cidr4ToIPMask].
*/
func IPMask4ToCidr(ipMask net.IPMask) (cidr uint8, err error) {
var ones int
var total int
ones, total = ipMask.Size()
if total != 32 {
err = ErrBadNetFam
return
}
if ones > 32 {
err = ErrBadNetFam
return
}
cidr = uint8(ones)
return
}
/*
IPMask4ToMask returns the mask *in bitmask form* from a [net.IPMask].
See also:
* [IPMask4ToCidr]
* [IPMask4ToStr]
Inverse of [Mask4ToIPMask].
*/
func IPMask4ToMask(ipMask net.IPMask) (mask uint32, err error) {
var cidr uint8
if cidr, err = IPMask4ToCidr(ipMask); err != nil {
return
}
if mask, err = Cidr4ToMask(cidr); err != nil {
return
}
return
}
/*
IPMask4ToStr returns a string representation of an IPv4 netmask (e.g. "255.255.255.0" for a /24) from a [net.IPMask].
See also:
* [IPMask4ToCidr]
* [IPMask4ToMask]
Inverse of [Mask4StrToIPMask].
*/
func IPMask4ToStr(ipMask net.IPMask) (maskStr string, err error) {
var idx int
var b []byte
var quads []string = make([]string, 4)
b = []byte(ipMask)
if len(b) != 4 {
err = ErrBadNetFam
return
}
for idx = 0; idx < len(b); idx++ {
quads[idx] = strconv.Itoa(int(b[idx]))
}
maskStr = strings.Join(quads, ".")
return
}
/*
Mask4ToCidr converts an IPv4 netmask *in bitmask form* to a CIDR prefix size/bit size/bit length.
See also:
* [Mask4ToIPMask]
* [Mask4ToStr]
Inverse of [Cidr4ToMask].
*/
func Mask4ToCidr(mask uint32) (cidr uint8, err error) {
cidr = 32 - uint8(bits.LeadingZeros32(mask))
return
}
/*
Mask4ToIPMask returns mask *in bitmask form* as a [net.IPMask].
See also:
* [Mask4ToCidr]
* [Mask4ToStr]
Inverse of [IPMask4ToMask].
*/
func Mask4ToIPMask(mask uint32) (ipMask net.IPMask, err error) {
var cidr uint8
if cidr, err = Mask4ToCidr(mask); err != nil {
return
}
ipMask = net.CIDRMask(int(cidr), 32)
return
}
/*
Mask4ToStr returns a string representation of an IPv4 netmask (e.g. "255.255.255.0" for a /24) from a netmask *in bitmask form*.
See also:
* [Mask4ToCidr]
* [Mask4ToIPMask]
Inverse of [Mask4StrToMask].
*/
func Mask4ToStr(mask uint32) (maskStr string, err error) {
var ipMask net.IPMask
if ipMask, err = Mask4ToIPMask(mask); err != nil {
return
}
if maskStr, err = IPMask4ToStr(ipMask); err != nil {
return
}
return
}
/*
Mask4StrToCidr parses a "dotted-quad" IPv4 netmask (e.g. "255.255.255.0" for a /24) and returns am IPv4 CIDR/bit size/prefix length.
See also:
* [Mask4StrToIPMask]
* [Mask4StrToMask]
Inverse of [Cidr4ToMaskStr].
*/
func Mask4StrToCidr(maskStr string) (cidr uint8, err error) {
var ipMask net.IPMask
if ipMask, err = Mask4StrToIPMask(maskStr); err != nil {
return
}
if cidr, err = IPMask4ToCidr(ipMask); err != nil {
return
}
return
}
/*
Mask4StrToIPMask parses a "dotted-quad" IPv4 netmask (e.g. "255.255.255.0" for a /24) and returns a [net.IPMask].
See also:
* [Mask4StrToCidr]
* [Mask4StrToMask]
Inverse of [IPMask4ToStr].
*/
func Mask4StrToIPMask(maskStr string) (mask net.IPMask, err error) {
var idx int
var s string
var u64 uint64
var b []byte = make([]byte, 4)
var sl []string = strings.Split(maskStr, ".")
if len(sl) != 4 {
err = ErrBadMask4Str
return
}
// A net.IPMask is just a []byte.
for idx = 0; idx < len(sl); idx++ {
s = sl[idx]
if u64, err = strconv.ParseUint(s, 10, 8); err != nil {
return
}
b[idx] = byte(u64)
}
mask = net.IPMask(b)
return
}
/*
Mask4StrToMask parses a "dotted-quad" IPv4 netmask (e.g. "255.255.255.0" for a /24) and returns a netmask *in bitmask form*.
See also:
* [Mask4StrToCidr]
* [Mask4StrToIPMask]
Inverse of [Mask4ToStr].
*/
func Mask4StrToMask(maskStr string) (mask uint32, err error) {
var ipMask net.IPMask
if ipMask, err = Mask4StrToIPMask(maskStr); err != nil {
return
}
if mask, err = IPMask4ToMask(ipMask); err != nil {
return
}
return
}

134
netx/funcs_test.go Normal file
View File

@@ -0,0 +1,134 @@
package netx
import (
`math`
`net`
`net/netip`
"testing"
)
func TestFuncsIP(t *testing.T) {
var err error
var ip net.IP
var addr netip.Addr
var ipFamily uint16
var tgtFamily uint16
var addrFamily uint16
// IPv4 on even indexes, IPv6 on odd.
for idx, s := range []string{
"203.0.113.10",
"2001:db8::203:0:113:10",
} {
if ip = net.ParseIP(s); ip == nil {
t.Fatalf("ip %s not valid", s)
}
if addr, err = netip.ParseAddr(s); err != nil {
t.Fatalf("addr %s not valid", s)
}
ipFamily = GetIpFamily(ip)
addrFamily = GetAddrFamily(addr)
if ipFamily == AFUnspec {
t.Fatalf("GetIpFamily: Failed on IP %s (unspecified family)", s)
}
if addrFamily == AFUnspec {
t.Fatalf("GetAddrFamily: Failed on IP %s (unspecified family)", s)
}
switch idx%2 == 0 {
case true:
tgtFamily = AFInet
case false:
tgtFamily = AFInet6
}
if ipFamily != tgtFamily {
t.Fatalf("GetIpFamily: Failed on IP %s (expected %d, got %d)", s, AFInet, tgtFamily)
}
if addrFamily != tgtFamily {
t.Fatalf("GetAddrFamily: Failed on IP %s (expected %d, got %d)", s, AFInet, tgtFamily)
}
}
}
func TestFuncsMask(t *testing.T) {
var err error
var cidr uint8
var mask uint32
var maskStr string
var ipMask net.IPMask
var cidrTgt uint8 = 32
var maskTgt uint32 = math.MaxUint32
var maskStrTgt string = "255.255.255.255"
var ipMaskTgt net.IPMask = net.IPMask{255, 255, 255, 255}
// To CIDR
if cidr, err = Mask4ToCidr(maskTgt); err != nil {
t.Fatal(err)
} else if cidr != cidrTgt {
t.Fatalf("Mask4ToCidr: cidr %d != cidrTgt %d", cidr, cidrTgt)
}
if cidr, err = IPMask4ToCidr(ipMaskTgt); err != nil {
t.Fatal(err)
} else if cidr != cidrTgt {
t.Fatalf("IPMask4ToCidr: cidr %d != cidrTgt %d", cidr, cidrTgt)
}
if cidr, err = Mask4StrToCidr(maskStrTgt); err != nil {
t.Fatal(err)
} else if cidr != cidrTgt {
t.Fatalf("Mask4StrToCidr cidr %d != cidrTgt %d", cidr, cidrTgt)
}
// To net.IPMask
if ipMask, err = Cidr4ToIPMask(cidrTgt); err != nil {
t.Fatal(err)
} else if ipMaskTgt.String() != ipMask.String() {
t.Fatalf("Cidr4ToIPMask ipMask %s != ipMaskTgt %s", ipMask.String(), ipMaskTgt.String())
}
if ipMask, err = Mask4ToIPMask(maskTgt); err != nil {
t.Fatal(err)
} else if ipMaskTgt.String() != ipMask.String() {
t.Fatalf("Mask4ToIPMask ipMask %s != ipMaskTgt %s", ipMask.String(), ipMaskTgt.String())
}
if ipMask, err = Mask4StrToIPMask(maskStrTgt); err != nil {
t.Fatal(err)
} else if ipMaskTgt.String() != ipMask.String() {
t.Fatalf("Mask4StrToIPMask ipMask %s != ipMaskTgt %s", ipMask.String(), ipMaskTgt.String())
}
// To bitmask
if mask, err = Cidr4ToMask(cidrTgt); err != nil {
t.Fatal(err)
} else if mask != maskTgt {
t.Fatalf("Cidr4ToMask mask %d != maskTgt %d", mask, maskTgt)
}
if mask, err = IPMask4ToMask(ipMaskTgt); err != nil {
t.Fatal(err)
} else if mask != maskTgt {
t.Fatalf("IPMask4ToMask mask %d != maskTgt %d", mask, maskTgt)
}
if mask, err = Mask4StrToMask(maskStrTgt); err != nil {
t.Fatal(err)
} else if mask != maskTgt {
t.Fatalf("Mask4StrToMask mask %d != maskTgt %d", mask, maskTgt)
}
// To string
if maskStr, err = Cidr4ToStr(cidrTgt); err != nil {
t.Fatal(err)
} else if maskStr != maskStrTgt {
t.Fatalf("Cidr4ToStr maskStr %s != maskStrTgt %s", maskStr, maskStrTgt)
}
if maskStr, err = IPMask4ToStr(ipMaskTgt); err != nil {
t.Fatal(err)
} else if maskStr != maskStrTgt {
t.Fatalf("IPMask4ToStr maskStr %s != maskStrTgt %s", maskStr, maskStrTgt)
}
if maskStr, err = Mask4ToStr(maskTgt); err != nil {
t.Fatal(err)
} else if maskStr != maskStrTgt {
t.Fatalf("Mask4ToStr maskStr %s != maskStrTgt %s", maskStr, maskStrTgt)
}
}

View File

@@ -10,11 +10,20 @@ const (
)
const (
// cksumMask is AND'd with a checksum to get the "carried ones".
/*
cksumMask is AND'd with a checksum to get the "carried ones"
(the lower 16 bits before folding carries).
*/
cksumMask uint32 = 0x0000ffff
// cksumShift is used in the "carried-ones folding".
/*
cksumShift is used in the "carried-ones folding";
it's the number of bits to right-shift the carry-over.
*/
cksumShift uint32 = 0x00000010
// padShift is used to "pad out" a checksum for odd-length buffers by left-shifting.
/*
padShift is used to "pad out" a checksum for odd-length buffers by left-shifting.
It positions the high-byte of a 16-byte "word" (big-endian, as per ord below).
*/
padShift uint32 = 0x00000008
)

View File

@@ -13,11 +13,20 @@ It provides [InetChecksum], which can be used as a:
* [io.Writer]
* [io.WriterTo]
and is concurrency-safe.
and allows one to retrieve the actual bytes that were checksummed.
It is also fully concurrency-safe.
There is also an [InetChecksumSimple] provided, which is more
tailored for performance/resource usage at the cost of concurrency
safety and data retention.
tailored for performance/resource usage at the cost of no concurrency
safety and no data retention, which can be used as a:
* [hash.Hash]
* [io.ByteWriter]
* [io.StringWriter]
* [io.Writer]
If you don't need all these interfaces, a reasonable alternative may be
to use gVisor's [gvisor.dev/gvisor/pkg/tcpip/checksum] instead.
[RFC 1071]: https://datatracker.ietf.org/doc/html/rfc1071
[RFC 1141]: https://datatracker.ietf.org/doc/html/rfc1141

View File

@@ -7,8 +7,9 @@ import (
// New returns a new initialized [InetChecksum]. It will never panic.
func New() (i *InetChecksum) {
i = &InetChecksum{}
_ = i.Aligned()
i = &InetChecksum{
aligned: true,
}
return
}
@@ -21,15 +22,14 @@ b may be nil or 0-length; this will not cause an error.
func NewFromBytes(b []byte) (i *InetChecksum, copied int, err error) {
var cksum InetChecksum
var cptr *InetChecksum = &cksum
cksum.aligned = true
if b != nil && len(b) > 0 {
if copied, err = cksum.Write(b); err != nil {
if copied, err = cptr.Write(b); err != nil {
return
}
_ = i.Aligned()
} else {
i = New()
return
}
i = &cksum
@@ -48,7 +48,64 @@ func NewFromBuf(buf io.Reader) (i *InetChecksum, copied int64, err error) {
var cksum InetChecksum
_ = i.Aligned()
cksum.aligned = true
if buf != nil {
if copied, err = io.Copy(&cksum, buf); err != nil {
return
}
}
i = &cksum
return
}
// NewSimple returns a new initialized [InetChecksumSimple]. It will never panic.
func NewSimple() (i *InetChecksumSimple) {
i = &InetChecksumSimple{
aligned: true,
}
return
}
/*
NewSimpleFromBytes returns a new [InetChecksumSimple] initialized with explicit bytes.
b may be nil or 0-length; this will not cause an error.
*/
func NewSimpleFromBytes(b []byte) (i *InetChecksumSimple, copied int, err error) {
var cksum InetChecksumSimple
var cptr *InetChecksumSimple = &cksum
cksum.aligned = true
if b != nil && len(b) > 0 {
if copied, err = cptr.Write(b); err != nil {
return
}
}
i = &cksum
return
}
/*
NewSimpleFromBuf returns an [InetChecksumSimple] from a specified [io.Reader].
buf may be nil. If it isn't, NewSimpleFromBuf will call [io.Copy] on buf.
Note that this may exhaust your passed buf or advance its current seek position/offset,
depending on its type.
*/
func NewSimpleFromBuf(buf io.Reader) (i *InetChecksumSimple, copied int64, err error) {
var cksum InetChecksumSimple
cksum.aligned = true
if buf != nil {
if copied, err = io.Copy(&cksum, buf); err != nil {

View File

@@ -22,7 +22,7 @@ func (i *InetChecksum) Aligned() (aligned bool) {
defer i.alignLock.Unlock()
i.bufLock.RLock()
aligned = i.buf.Len()&2 == 0
aligned = i.buf.Len()%2 == 0
i.bufLock.RUnlock()
i.aligned = aligned
@@ -113,7 +113,7 @@ func (i *InetChecksum) Reset() {
i.sumLock.Lock()
i.lastLock.Lock()
i.aligned = false
i.aligned = true
i.alignLock.Unlock()
i.buf.Reset()
@@ -308,7 +308,7 @@ func (i *InetChecksum) WriteByte(c byte) (err error) {
}
if !i.disabledBuf {
if err = i.WriteByte(c); err != nil {
if err = i.buf.WriteByte(c); err != nil {
i.sum = origSum
i.aligned = origAligned
i.last = origLast

View File

@@ -22,6 +22,15 @@ func (i *InetChecksumSimple) BlockSize() (blockSize int) {
return
}
// Reset resets the state of an InetChecksumSimple.
func (i *InetChecksumSimple) Reset() {
i.last = 0x00
i.sum = 0
i.aligned = true
}
// Size returns how many bytes a checksum is. (It will always return 2.)
func (i *InetChecksumSimple) Size() (bufSize int) {
@@ -151,3 +160,13 @@ func (i *InetChecksumSimple) WriteByte(c byte) (err error) {
return
}
// WriteString checksums a string. It conforms to [io.StringWriter].
func (i *InetChecksumSimple) WriteString(s string) (n int, err error) {
if n, err = i.Write([]byte(s)); err != nil {
return
}
return
}

View File

@@ -17,8 +17,8 @@ type (
If [InetChecksum.Aligned] returns false, the checksum result of an
[InetChecksum.Sum] or [InetChecksum.Sum16] (or any other operation
returning a sum) will INCLUDE THE PAD NULL BYTE (which is only
applied *at the time of the Sum/Sum32 call) and is NOT applied to
the persistent underlying storage.
applied *at the time of the Sum/Sum32 call* and is NOT applied to
the persistent underlying storage).
InetChecksum differs from [InetChecksumSimple] in that it:

View File

@@ -1,4 +1,12 @@
/*
Package remap provides convenience functions around regular expressions, primarily offering maps for named capture groups.
Package remap provides convenience functions around regular expressions,
primarily offering maps for named capture groups.
It offers convenience equivalents of the following:
* [regexp.Compile] ([Compile])
* [regexp.CompilePOSIX] ([CompilePOSIX])
* [regexp.MustCompile] ([MustCompile])
* [regexp.MustCompilePOSIX] ([MustCompilePOSIX])
*/
package remap

11
remap/errs.go Normal file
View File

@@ -0,0 +1,11 @@
package remap
import (
`errors`
)
var (
ErrInvalidIdxPair error = errors.New("invalid index pair; [1] must be >= [0]")
ErrNoStr error = errors.New("no string to slice/reslice/subslice")
ErrShortStr error = errors.New("string too short to slice/reslice/subslice")
)

170
remap/funcs.go Normal file
View File

@@ -0,0 +1,170 @@
package remap
import (
"regexp"
)
/*
Compile is a convenience shorthand for:
var err error
var r *remap.ReMap = new(remap.ReMap)
if r.Regexp, err = regexp.Compile(expr); err != nil {
// ...
}
It corresponds to [regexp.Compile].
*/
func Compile(expr string) (r *ReMap, err error) {
var p *regexp.Regexp
if p, err = regexp.Compile(expr); err != nil {
return
}
r = &ReMap{
Regexp: p,
}
return
}
/*
CompilePOSIX is a convenience shorthand for:
var err error
var r *remap.ReMap = new(remap.ReMap)
if r.Regexp, err = regexp.CompilePOSIX(expr); err != nil {
// ...
}
It corresponds to [regexp.CompilePOSIX].
*/
func CompilePOSIX(expr string) (r *ReMap, err error) {
var p *regexp.Regexp
if p, err = regexp.CompilePOSIX(expr); err != nil {
return
}
r = &ReMap{
Regexp: p,
}
return
}
/*
MustCompile is a convenience shorthand for:
var r *remap.ReMap = &remap.ReMap{
Regexp: regexp.MustCompile(expr),
}
It corresponds to [regexp.MustCompile].
*/
func MustCompile(expr string) (r *ReMap) {
var err error
var p *regexp.Regexp
// We panic ourselves instead of wrapping regexp.MustCompile.
// Makes debuggers a little more explicit.
if p, err = regexp.Compile(expr); err != nil {
panic(err)
}
r = &ReMap{
Regexp: p,
}
return
}
/*
MustCompilePOSIX is a convenience shorthand for:
var r *remap.ReMap = &remap.ReMap{
Regexp: regexp.MustCompilePOSIX(expr),
}
It corresponds to [regexp.MustCompilePOSIX].
*/
func MustCompilePOSIX(expr string) (r *ReMap) {
var err error
var p *regexp.Regexp
// We panic ourselves instead of wrapping regexp.MustCompilePOSIX.
// Makes debuggers a little more explicit.
if p, err = regexp.CompilePOSIX(expr); err != nil {
panic(err)
}
r = &ReMap{
Regexp: p,
}
return
}
/*
strIdxSlicer takes string s, and returns the substring marked by idxPair,
where:
idxPair = [2]int{
<substring START POSITION>,
<substring END BOUNDARY>,
}
That is, to get `oo` from `foobar`,
idxPair = [2]int{1, 3}
# NOT:
#idxPair = [2]int{1, 2}
subStr will be empty and matched will be false if:
* idxPair[0] < 0
* idxPair[1] < 0
It will panic with [ErrShortStr] if:
* idxPair[0] > len(s)-1
* idxPair[1] > len(s)
It will panic with [ErrInvalidIdxPair] if:
* idxPair[0] > idxPair[1]
It will properly handle single-character addresses (i.e. idxPair[0] == idxPair[1]).
*/
func strIdxSlicer(s string, idxPair [2]int) (subStr string, matched bool) {
if idxPair[0] < 0 || idxPair[1] < 0 {
return
}
matched = true
if (idxPair[0] > (len(s) - 1)) ||
(idxPair[1] > len(s)) {
panic(ErrShortStr)
}
if idxPair[0] > idxPair[1] {
panic(ErrInvalidIdxPair)
}
if idxPair[0] == idxPair[1] {
// single character
subStr = string(s[idxPair[0]])
} else {
// multiple characters
subStr = s[idxPair[0]:idxPair[1]]
}
return
}

View File

@@ -5,9 +5,14 @@ Map returns a map[string][]<match bytes> for regexes with named capture groups m
Note that this supports non-unique group names; [regexp.Regexp] allows for patterns with multiple groups
using the same group name (though your IDE might complain; I know GoLand does).
It will panic if the embedded [regexp.Regexp] is nil.
Each match for each group is in a slice keyed under that group name, with that slice
ordered by the indexing done by the regex match itself.
This operates on only the first found match (like [regexp.Regexp.FindSubmatch]).
To operate on *all* matches, use [ReMap.MapAll].
In summary, the parameters are as follows:
# inclNoMatch
@@ -31,6 +36,7 @@ is provided but b does not match then matches will be:
If true (and inclNoMatch is true), instead of a single nil the group's values will be
a slice of nil values explicitly matching the number of times the group name is specified
in the pattern.
May be unpredictable if the same name is used multiple times for different capture groups across multiple patterns.
For example, if a pattern:
@@ -87,7 +93,7 @@ In detail, matches and/or its values may be nil or empty under the following con
IF inclNoMatch is true
IF inclNoMatchStrict is true
THEN matches[<group name>] is defined and non-nil, but populated with placeholder nils
(matches[<group name>] == [][]byte{nil[, nil...]})
(matches[<group name>] == [][]byte{nil[, nil, ...]})
ELSE
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
ELSE
@@ -109,7 +115,7 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
return
}
names = r.Regexp.SubexpNames()
names = r.Regexp.SubexpNames()[:]
matchBytes = r.Regexp.FindSubmatch(b)
if matchBytes == nil {
@@ -142,6 +148,9 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
if inclNoMatch {
if len(names) >= 1 {
for _, grpNm = range names {
if grpNm == "" {
continue
}
matches[grpNm] = nil
}
}
@@ -154,7 +163,7 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
grpNm = names[mIdx]
/*
Thankfully, it's actually a build error if a pattern specifies a named
capture group with an empty name.
capture group with an matched name.
So we don't need to worry about accounting for that,
and can just skip over grpNm == "" (which is an *unnamed* capture group).
*/
@@ -190,6 +199,9 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
// This *technically* should be completely handled above.
if inclNoMatch {
for _, grpNm = range names {
if grpNm == "" {
continue
}
if _, ok = tmpMap[grpNm]; !ok {
tmpMap[grpNm] = nil
}
@@ -204,13 +216,147 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
}
/*
MapString is exactly like ReMap.Map(), but operates on (and returns) strings instead.
(matches will always be nil if s == “.)
MapAll behaves exactly like [ReMap.Map] but will "squash"/consolidate *all* found matches, not just the first occurrence,
into the group name.
A small deviation, though; empty strings instead of nils (because duh) will occupy slice placeholders (if `inclNoMatchStrict` is specified).
You likely want to use this instead of [ReMap.Map] for multiline patterns.
*/
func (r *ReMap) MapAll(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (matches map[string][][]byte) {
var ok bool
var mIdx int
var isEmpty bool
var match []byte
var grpNm string
var names []string
var mbGrp [][]byte
var ptrnNms []string
var matchBytes [][][]byte
var tmpMap map[string][][]byte = make(map[string][][]byte)
if b == nil {
return
}
names = r.Regexp.SubexpNames()[:]
matchBytes = r.Regexp.FindAllSubmatch(b, -1)
if matchBytes == nil {
// b does not match pattern
if !mustMatch {
matches = make(map[string][][]byte)
}
return
}
if names == nil || len(names) == 0 || len(names) == 1 {
/*
no named capture groups;
technically only the last condition would be the case.
*/
if inclNoMatch {
matches = make(map[string][][]byte)
}
return
}
names = names[1:]
tmpMap = make(map[string][][]byte)
// From here, it behaves (sort of) like ReMap.Map
// except mbGrp is like matchBytes in Map.
for _, mbGrp = range matchBytes {
// Unlike ReMap.Map, we have to do a little additional logic.
isEmpty = false
ptrnNms = make([]string, 0, len(names))
if mbGrp == nil {
isEmpty = true
}
if !isEmpty {
if len(mbGrp) == 0 || len(mbGrp) == 1 {
/*
no submatches whatsoever.
*/
isEmpty = true
} else {
mbGrp = mbGrp[1:]
for mIdx, match = range mbGrp {
if mIdx > len(names) {
break
}
grpNm = names[mIdx]
if grpNm == "" {
continue
}
ptrnNms = append(ptrnNms, grpNm)
if match == nil {
// This specific group didn't match, but it matched the whole pattern.
if !inclNoMatch {
continue
}
if _, ok = tmpMap[grpNm]; !ok {
if !inclNoMatchStrict {
tmpMap[grpNm] = nil
} else {
tmpMap[grpNm] = [][]byte{nil}
}
} else {
if inclNoMatchStrict {
tmpMap[grpNm] = append(tmpMap[grpNm], nil)
}
}
continue
}
if _, ok = tmpMap[grpNm]; !ok {
tmpMap[grpNm] = make([][]byte, 0)
}
tmpMap[grpNm] = append(tmpMap[grpNm], match)
}
}
}
// I can't recall why I capture this.
_ = ptrnNms
}
// *Theoretically* all of these should be populated with at least a nil.
if inclNoMatch {
for _, grpNm = range names {
if grpNm == "" {
continue
}
if _, ok = tmpMap[grpNm]; !ok {
tmpMap[grpNm] = nil
}
}
}
if len(tmpMap) > 0 {
matches = tmpMap
}
return
}
/*
MapString is exactly like [ReMap.Map], but operates on (and returns) strings instead.
(matches will always be nil if s == "".)
It will panic if the embedded [regexp.Regexp] is nil.
This operates on only the first found match (like [regexp.Regexp.FindStringSubmatch]).
To operate on *all* matches, use [ReMap.MapStringAll].
A small deviation and caveat, though; empty strings instead of nils (because duh) will occupy slice placeholders (if `inclNoMatchStrict` is specified).
This unfortunately *does not provide any indication* if an empty string positively matched the pattern (a "hit") or if it was simply
not matched at all (a "miss"). If you need definitive determination between the two conditions, it is instead recommended to either
*not* use inclNoMatchStrict or to use ReMap.Map() instead and convert any non-nil values to strings after.
*not* use inclNoMatchStrict or to use [ReMap.Map] instead and convert any non-nil values to strings after.
Particularly:
@@ -233,8 +379,9 @@ is provided but s does not match then matches will be:
# inclNoMatchStrict
If true (and inclNoMatch is true), instead of a single nil the group's values will be
a slice of eempty string values explicitly matching the number of times the group name is specified
a slice of empty string values explicitly matching the number of times the group name is specified
in the pattern.
May be unpredictable if the same name is used multiple times for different capture groups across multiple patterns.
For example, if a pattern:
@@ -290,8 +437,8 @@ In detail, matches and/or its values may be nil or empty under the following con
IF <group name> does not have a match
IF inclNoMatch is true
IF inclNoMatchStrict is true
THEN matches[<group name>] is defined and non-nil, but populated with placeholder nils
(matches[<group name>] == []string{""[, ""...]})
THEN matches[<group name>] is defined and non-nil, but populated with placeholder strings
(matches[<group name>] == []string{""[, "", ...]})
ELSE
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
ELSE
@@ -304,27 +451,19 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
var ok bool
var endIdx int
var startIdx int
var chunkIdx int
var grpIdx int
var grpNm string
var names []string
var matchStr string
/*
A slice of indices or index pairs.
For each element `e` in idxChunks,
* if `e` is nil, no group match.
* if len(e) == 1, only a single character was matched.
* otherwise len(e) == 2, the start and end of the match.
*/
var idxChunks [][]int
var si stringIndexer
var matchIndices []int
var chunkIndices []int // always 2 elements; start pos and end pos
var tmpMap map[string][]string = make(map[string][]string)
/*
OK so this is a bit of a deviation.
It's not as straightforward as above, because there isn't an explicit way
like above to determine if a pattern was *matched as an empty string* vs.
like above to determine if a pattern was *matched as an matched string* vs.
*not matched*.
So instead do roundabout index-y things.
@@ -334,7 +473,8 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
return
}
/*
I'm not entirely sure how serious they are about "the slice should not be modified"...
I'm not entirely sure how serious they are about
"the slice should not be modified"...
DO NOT sort or dedupe `names`! If the same name for groups is duplicated,
it will be duplicated here in proper order and the ordering is tied to
@@ -351,7 +491,7 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
return
}
if names == nil || len(names) <= 1 {
if names == nil || len(names) == 0 || len(names) == 1 {
/*
No named capture groups;
technically only the last condition would be the case,
@@ -363,6 +503,7 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
}
return
}
names = names[1:]
if len(matchIndices) == 0 || len(matchIndices) == 1 {
/*
@@ -378,26 +519,34 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
matches = make(map[string][]string)
if inclNoMatch {
for _, grpNm = range names {
if grpNm != "" {
matches[grpNm] = nil
if grpNm == "" {
continue
}
matches[grpNm] = nil
}
}
return
}
/*
A reslice of `matchIndices` could technically start at 2 (as long as `names` is sliced [1:])
because they're in pairs: []int{<start>, <end>, <start>, <end>, ...}
and the first pair is the entire pattern match (un-resliced names[0]).
Thus the len(matchIndices) == 2*len(names), *even* if you
The reslice of `matchIndices` starts at 2 because they're in pairs:
[]int{<start>, <end>, <start>, <end>, ...}
and the first pair is the entire pattern match (un-resliced names[0],
un-resliced matchIndices[0]).
Thus the len(matchIndices) == 2*len(names) (*should*, that is), *even* if you reslice.
Keep in mind that since the first element of names is removed,
the first pair here is skipped.
This provides a bit more consistent readability, though.
we reslice matchIndices as well.
*/
idxChunks = make([][]int, len(names))
chunkIdx = 0
endIdx = 0
matchIndices = matchIndices[2:]
tmpMap = make(map[string][]string)
// Note that the second index is the *upper boundary*, not a *position in the string*
// so these indices are perfectly usable as-is as returned from the regexp methods.
// http://golang.org/ref/spec#Slice_expressions
for startIdx = 0; endIdx < len(matchIndices); startIdx += 2 {
endIdx = startIdx + 2
// This technically should never happen.
@@ -405,75 +554,253 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
endIdx = len(matchIndices)
}
chunkIndices = matchIndices[startIdx:endIdx]
if chunkIndices[0] == -1 || chunkIndices[1] == -1 {
// group did not match
chunkIndices = nil
} else {
if chunkIndices[0] == chunkIndices[1] {
chunkIndices = []int{chunkIndices[0]}
} else {
chunkIndices = matchIndices[startIdx:endIdx]
}
}
idxChunks[chunkIdx] = chunkIndices
chunkIdx++
if grpIdx >= len(names) {
break
}
// Now associate with names and pull the string sequence.
for chunkIdx, chunkIndices = range idxChunks {
grpNm = names[chunkIdx]
/*
Thankfully, it's actually a build error if a pattern specifies a named
capture group with an empty name.
So we don't need to worry about accounting for that,
and can just skip over grpNm == ""
(which is either an *unnamed* capture group
OR the first element in `names`, which is always
the entire match).
*/
if grpNm == "" {
si = stringIndexer{
group: grpIdx,
start: matchIndices[startIdx],
end: matchIndices[endIdx-1],
matched: true,
nm: names[grpIdx],
grpS: "",
s: &matchStr,
ptrn: r.Regexp,
}
grpIdx++
if si.nm == "" {
// unnamed capture group
continue
}
if chunkIndices == nil || len(chunkIndices) == 0 {
// group did not match
// sets si.matched and si.grpS
si.idxSlice(&s)
if !si.matched {
if !inclNoMatch {
continue
}
if _, ok = tmpMap[grpNm]; !ok {
if _, ok = tmpMap[si.nm]; !ok {
if !inclNoMatchStrict {
tmpMap[grpNm] = nil
tmpMap[si.nm] = nil
} else {
tmpMap[grpNm] = []string{""}
tmpMap[si.nm] = []string{""}
}
} else {
if inclNoMatchStrict {
tmpMap[grpNm] = append(tmpMap[grpNm], "")
tmpMap[si.nm] = append(tmpMap[si.nm], "")
}
}
continue
}
switch len(chunkIndices) {
case 1:
// Single character
matchStr = string(s[chunkIndices[0]])
case 2:
// Multiple characters
matchStr = s[chunkIndices[0]:chunkIndices[1]]
if _, ok = tmpMap[si.nm]; !ok {
tmpMap[si.nm] = make([]string, 0)
}
if _, ok = tmpMap[grpNm]; !ok {
tmpMap[grpNm] = make([]string, 0)
}
tmpMap[grpNm] = append(tmpMap[grpNm], matchStr)
tmpMap[si.nm] = append(tmpMap[si.nm], si.grpS)
}
// This *technically* should be completely handled above.
if inclNoMatch {
for _, grpNm = range names {
if grpNm == "" {
continue
}
if _, ok = tmpMap[grpNm]; !ok {
tmpMap[grpNm] = nil
}
}
}
if len(tmpMap) > 0 {
matches = tmpMap
}
return
}
/*
MapStringAll behaves exactly like [ReMap.MapString] but will "squash"/consolidate *all* found matches, not just the first occurrence,
into the group name.
You likely want to use this instead of [ReMap.MapString] for multiline patterns.
*/
func (r *ReMap) MapStringAll(s string, inclNoMatch, inclNoMatchStrict, mustMatch bool) (matches map[string][]string) {
var ok bool
var endIdx int
var startIdx int
var grpIdx int
var grpNm string
var names []string
var matchStr string
var si stringIndexer
var matchIndices []int
var allMatchIndices [][]int
var tmpMap map[string][]string = make(map[string][]string)
if s == "" {
return
}
names = r.Regexp.SubexpNames()[:]
allMatchIndices = r.Regexp.FindAllStringSubmatchIndex(s, -1)
if allMatchIndices == nil {
// s does not match pattern at all.
if !mustMatch {
matches = make(map[string][]string)
}
return
}
if names == nil || len(names) == 0 || len(names) == 1 {
/*
No named capture groups;
technically only the last condition would be the case,
as (regexp.Regexp).SubexpNames() will ALWAYS at the LEAST
return a `[]string{""}`.
*/
if inclNoMatch {
matches = make(map[string][]string)
}
return
}
names = names[1:]
if len(allMatchIndices) == 0 {
// No matches (and thus submatches) whatsoever.
// I think this is actually covered by the `if allMatchIndices == nil { ... }` above,
// but this is still here for safety and efficiency - early return on no matches to iterate.
matches = make(map[string][]string)
if inclNoMatch {
for _, grpNm = range names {
if grpNm == "" {
continue
}
matches[grpNm] = nil
}
}
return
}
// Do *NOT* trim/reslice allMatchIndices!
// The reslicing is done below, *inside* each matchIndices iteration!
tmpMap = make(map[string][]string)
// From here, it behaves (sort of) like ReMap.MapString.
// Build the strictly-paired chunk indexes and populate them.
// We are iterating over *match sets*; matchIndices here should be analgous
// to matchIndices in ReMap.MapString.
for _, matchIndices = range allMatchIndices {
if matchIndices == nil {
// I *think* the exception with the *All* variant here
// is the *entire* return (allMatchIndices) is nil if there
// aren't any matches; I can't imagine there'd be any feasible
// way it'd insert a nil *element* for an index mapping group.
// So just continuing here should be fine;
// this continue SHOULD be unreachable.
continue
}
// Reslice *here*, on the particular match index group.
// Grap the matchStr first; it's not currently *used* by anything but may in the future.
matchStr, ok = strIdxSlicer(
s,
*(*[2]int)(matchIndices[0:2]),
)
if len(matchIndices) == 0 || len(matchIndices) == 1 {
// No *sub*matches (capture groups) in this match, but it still matched the pattern.
if inclNoMatch {
for _, grpNm = range names {
if grpNm == "" {
continue
}
// We don't immediately return, though; we just stage out group names just in case.
// That's why we use tmpMap and not matches.
if _, ok = tmpMap[grpNm]; !ok {
tmpMap[grpNm] = nil
}
}
}
continue
}
matchIndices = matchIndices[2:]
// Reset from previous loop
endIdx = 0
grpIdx = 0
for startIdx = 0; endIdx < len(matchIndices); startIdx += 2 {
endIdx = startIdx + 2
if endIdx > len(matchIndices) {
endIdx = len(matchIndices)
}
if grpIdx >= len(names) {
break
}
si = stringIndexer{
group: grpIdx,
start: matchIndices[startIdx],
end: matchIndices[endIdx-1],
matched: true,
nm: names[grpIdx],
grpS: "",
ptrn: r.Regexp,
}
grpIdx++
// We do not include the entire match string here;
// we don't need it for this. Waste of memory.
_ = matchStr
/*
si.s = new(string)
*si.s = matchStr
*/
if si.nm == "" {
// unnamed capture group
continue
}
// sets si.matched and si.grpS
si.idxSlice(&s)
if !si.matched {
if !inclNoMatch {
continue
}
if _, ok = tmpMap[si.nm]; !ok {
if !inclNoMatchStrict {
tmpMap[si.nm] = nil
} else {
tmpMap[si.nm] = []string{""}
}
} else {
if inclNoMatchStrict {
tmpMap[si.nm] = append(tmpMap[si.nm], "")
}
}
continue
}
if _, ok = tmpMap[si.nm]; !ok {
tmpMap[si.nm] = make([]string, 0)
}
tmpMap[si.nm] = append(tmpMap[si.nm], si.grpS)
}
}
if inclNoMatch {
for _, grpNm = range names {
if grpNm == "" {
continue
}
if _, ok = tmpMap[grpNm]; !ok {
tmpMap[grpNm] = nil
}

344
remap/funcs_remap_test.go Normal file
View File

@@ -0,0 +1,344 @@
package remap
import (
"fmt"
"reflect"
"regexp"
"testing"
)
type (
testMatcher struct {
Nm string
S string
M *ReMap
All bool
Expected map[string][][]byte
ExpectedStr map[string][]string
ParamInclNoMatch bool
ParamInclNoMatchStrict bool
ParamInclMustMatch bool
}
)
func TestRemap(t *testing.T) {
var matches map[string][][]byte
for midx, m := range []testMatcher{
// 1
testMatcher{
Nm: "No matches",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: nil,
},
// 2
testMatcher{
Nm: "Single mid match",
S: "This contains a single match in the middle of a string",
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match)\s+`)},
Expected: map[string][][]byte{
"g1": [][]byte{[]byte("match")},
},
},
// 3
testMatcher{
Nm: "multi mid match",
S: "This contains a single match and another match in the middle of a string",
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another (?P<g1>match)\s+`)},
Expected: map[string][][]byte{
"g1": [][]byte{
[]byte("match"),
[]byte("match"),
},
},
},
// 4
testMatcher{
Nm: "line match",
S: "This\ncontains a\nsingle\nmatch\non a dedicated line",
M: &ReMap{regexp.MustCompile(`(?m)^(?P<g1>match)$`)},
Expected: map[string][][]byte{
"g1": [][]byte{
[]byte("match"),
},
},
},
// 5
testMatcher{
Nm: "multiline match",
S: "This\ncontains a\nsingle match and another\nmatch\nin the middle of a string",
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another\s+(?P<g1>match)\s+`)},
All: true,
Expected: map[string][][]byte{
"g1": [][]byte{
[]byte("match"),
[]byte("match"),
},
},
},
// 6
// More closely mirrors something closer to real-life
testMatcher{
Nm: "mixed match",
S: " # No longer log hits/reqs/resps to file.\n" +
" #access_log /mnt/nginx_logs/vhost/tenant/site/access.log main;\n" +
" #error_log /mnt/nginx_logs/vhost/tenant/site/error.log;\n" +
" access_log off;\n" +
" error_log /dev/null;\n\n" +
" ssl_certificate /etc/nginx/tls/crt/tenant.pem;\n" +
" ssl_certificate_key /etc/nginx/tls/key/tenant.pem;\n\n",
M: &ReMap{regexp.MustCompile(`(?m)^\s*(?:error|access)_log\s+(?P<logpath>.+);\s*$`)},
All: true,
Expected: map[string][][]byte{
"logpath": [][]byte{
[]byte("off"),
[]byte("/dev/null"),
},
},
},
} {
if m.All {
matches = m.M.MapAll([]byte(m.S), false, false, false)
} else {
matches = m.M.Map([]byte(m.S), false, false, false)
}
t.Logf(
"#%d:\n\tsrc:\t'%s'\n\tptrn:\t'%s'\n\tmatch:\t%s\n",
midx+1,
m.S,
m.M.Regexp.String(),
testBmapToStrMap(matches),
)
if !reflect.DeepEqual(matches, m.Expected) {
t.Fatalf("Case #%d (\"%s\"): expected '%#v' != received '%#v'", midx+1, m.Nm, m.Expected, matches)
}
}
}
func TestRemapParams(t *testing.T) {
var matches map[string][][]byte
for midx, m := range []testMatcher{
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: nil,
ParamInclNoMatch: false,
ParamInclNoMatchStrict: false,
ParamInclMustMatch: false,
},
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: nil,
ParamInclNoMatch: false,
ParamInclNoMatchStrict: true,
ParamInclMustMatch: false,
},
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: nil,
ParamInclNoMatch: false,
ParamInclNoMatchStrict: true,
ParamInclMustMatch: true,
},
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: nil,
ParamInclNoMatch: false,
ParamInclNoMatchStrict: false,
ParamInclMustMatch: true,
},
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: make(map[string][][]byte),
ParamInclNoMatch: true,
ParamInclNoMatchStrict: false,
ParamInclMustMatch: false,
},
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: make(map[string][][]byte),
ParamInclNoMatch: true,
ParamInclNoMatchStrict: true,
ParamInclMustMatch: false,
},
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: make(map[string][][]byte),
ParamInclNoMatch: true,
ParamInclNoMatchStrict: true,
ParamInclMustMatch: true,
},
testMatcher{
Nm: "",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
Expected: make(map[string][][]byte),
ParamInclNoMatch: true,
ParamInclNoMatchStrict: false,
ParamInclMustMatch: true,
},
} {
if m.All {
matches = m.M.MapAll([]byte(m.S), m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch)
} else {
matches = m.M.Map([]byte(m.S), m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch)
}
t.Logf(
"%d: %v/%v/%v: %#v\n",
midx+1, m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch, matches,
)
if !reflect.DeepEqual(matches, m.Expected) {
t.Fatalf("Case #%d (\"%s\"): '%#v' != '%#v'", midx+1, m.Nm, m.ExpectedStr, matches)
}
}
}
func TestRemapString(t *testing.T) {
var matches map[string][]string
for midx, m := range []testMatcher{
// 1
testMatcher{
Nm: "No matches",
S: "this is a test",
M: &ReMap{regexp.MustCompile(``)},
ExpectedStr: nil,
},
// 2
testMatcher{
Nm: "Single mid match",
S: "This contains a single match in the middle of a string",
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match)\s+`)},
ExpectedStr: map[string][]string{
"g1": []string{"match"},
},
},
// 3
testMatcher{
Nm: "multi mid match",
S: "This contains a single match and another match in the middle of a string",
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another (?P<g1>match)\s+`)},
ExpectedStr: map[string][]string{
"g1": []string{
"match",
"match",
},
},
},
// 4
testMatcher{
Nm: "line match",
S: "This\ncontains a\nsingle\nmatch\non a dedicated line",
M: &ReMap{regexp.MustCompile(`(?m)^(?P<g1>match)$`)},
ExpectedStr: map[string][]string{
"g1": []string{
"match",
},
},
},
// 5
testMatcher{
Nm: "multiline match",
S: "This\ncontains a\nsingle match and another\nmatch\nin the middle of a string",
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another\s+(?P<g1>match)\s+`)},
All: true,
ExpectedStr: map[string][]string{
"g1": []string{
"match",
"match",
},
},
},
// 6
// More closely mirrors something closer to real-life
testMatcher{
Nm: "mixed match",
S: " # No longer log hits/reqs/resps to file.\n" +
" #access_log /mnt/nginx_logs/vhost/tenant/site/access.log main;\n" +
" #error_log /mnt/nginx_logs/vhost/tenant/site/error.log;\n" +
" access_log off;\n" +
" error_log /dev/null;\n\n" +
" ssl_certificate /etc/nginx/tls/crt/tenant.pem;\n" +
" ssl_certificate_key /etc/nginx/tls/key/tenant.pem;\n\n",
M: &ReMap{regexp.MustCompile(`(?m)^\s*(?:error|access)_log\s+(?P<logpath>.+);\s*$`)},
All: true,
ExpectedStr: map[string][]string{
"logpath": []string{
"off",
"/dev/null",
},
},
},
} {
if m.All {
matches = m.M.MapStringAll(m.S, false, false, false)
} else {
matches = m.M.MapString(m.S, false, false, false)
}
t.Logf(
"#%d:\n\tsrc:\t'%s'\n\tptrn:\t'%s'\n\tmatch:\t%s\n",
midx+1,
m.S,
m.M.Regexp.String(),
testSmapToStrMap(matches),
)
if !reflect.DeepEqual(matches, m.ExpectedStr) {
t.Fatalf("Case #%d (\"%s\"): '%#v' != '%#v'", midx+1, m.Nm, m.ExpectedStr, matches)
}
}
}
func testBmapToStrMap(bmap map[string][][]byte) (s string) {
if bmap == nil {
return
}
s = "\n"
for k, v := range bmap {
s += fmt.Sprintf("\t%s\n", k)
for _, i := range v {
s += fmt.Sprintf("\t\t%s\n", string(i))
}
}
return
}
func testSmapToStrMap(smap map[string][]string) (s string) {
if smap == nil {
return
}
s = "\n"
for k, v := range smap {
s += fmt.Sprintf("\t%s\n", k)
for _, i := range v {
s += fmt.Sprintf("\t\t%s\n", i)
}
}
return
}

View File

@@ -0,0 +1,34 @@
package remap
// idx returns []int{s.start, s.end}.
func (s *stringIndexer) idx() (i []int) {
return []int{s.start, s.end}
}
// idxStrict returns [2]int{s.start, s.end}.
func (s *stringIndexer) idxStrict() (i [2]int) {
return [2]int{s.start, s.end}
}
/*
idxSlice populates s.grpS using s.start and s.end.
If str is nil, it will use s.s.
If str is nil and s.s is nil, it will panic with [ErrNoStr].
If the pattern does not match (s.start < 0 or s.end < 0),
s.matched will be set to false (otherwise true).
*/
func (s *stringIndexer) idxSlice(str *string) {
if str == nil {
if s.s == nil {
panic(ErrNoStr)
}
str = s.s
}
s.grpS, s.matched = strIdxSlicer(*str, s.idxStrict())
return
}

View File

@@ -5,7 +5,7 @@ import (
)
type (
// ReMap provides some map-related functions around a regexp.Regexp.
// ReMap provides some map-related functions around a [regexp.Regexp].
ReMap struct {
*regexp.Regexp
}
@@ -24,4 +24,45 @@ type (
}
*/
stringIndexer struct {
// group is the capture group index for this match.
group int
// start is the string index (from the original string) where the matched group starts
start int
// end is the string index where the matched group ends
end int
/*
matched indicates if explicitly no match was found.
(This is normally indeterminate with string regex returns,
as e.g. `(?P<mygrp>\s*)`, `(?P<mygrp>(?:somestring)?)`, etc. all can be a *matched* "".)
If grpS == "" and matched == true, it DID match an empty string.
If grpS == "" and matched == false, it DID NOT MATCH the pattern.
If grpS != "", matched can be completely disregarded.
*/
matched bool
// nm is the match group name.
nm string
/*
grpS is the actual group-matched *substring*.
It will ALWAYS be either:
* the entirety of s
* a substring of s
* an empty string
it will never, and cannot be, a SUPERset of s.
it may not always be included/populated to save on memory.
*/
grpS string
/*
s is the *entire* MATCHED (sub)string.
It may not always be populated if not needed to save memory.
*/
s *string
// ptrn is the pattern applied to s.
ptrn *regexp.Regexp
}
)

5
stringsx/TODO Normal file
View File

@@ -0,0 +1,5 @@
- Banner struct, with .Format(s string) method
-- draw border around multiline s
-- i have a version in python somewhere that does this, should dig that up
- create bytesx package that duplicates the functions here?

6
stringsx/consts.go Normal file
View File

@@ -0,0 +1,6 @@
package stringsx
const (
// DefMaskStr is the string used as the default maskStr if left empty in [Redact].
DefMaskStr string = "***"
)

17
stringsx/doc.go Normal file
View File

@@ -0,0 +1,17 @@
/*
Package stringsx aims to extend functionality of the stdlib [strings] module.
Note that if you need a way of mimicking Bash's shell quoting rules, [desertbit/shlex] or [buildkite/shellwords]
would be better options than [google/shlex] but this package does not attempt to reproduce
any of that functionality.
For line splitting, one should use [muesli/reflow/wordwrap].
Likewise for indentation, one should use [muesli/reflow/indent].
[desertbit/shlex]: https://pkg.go.dev/github.com/desertbit/go-shlex
[buildkite/shellwords]: https://pkg.go.dev/github.com/buildkite/shellwords
[google/shlex]: https://pkg.go.dev/github.com/google/shlex
[muesli/reflow/wordwrap]: https://pkg.go.dev/github.com/muesli/reflow/wordwrap
[muesli/reflow/indent]: https://pkg.go.dev/github.com/muesli/reflow/indent
*/
package stringsx

326
stringsx/funcs.go Normal file
View File

@@ -0,0 +1,326 @@
package stringsx
import (
`fmt`
`strings`
`unicode`
)
/*
LenSplit formats string `s` to break at, at most, every `width` characters.
Any existing newlines (e.g. \r\n) will be removed during a string/
substring/line's length calculation. (e.g. `foobarbaz\n` and `foobarbaz\r\n` are
both considered to be lines of length 9, not 10 and 11 respectively).
This also means that any newlines (\n or \r\n) are inherently removed from
`out` (even if included in `wordWrap`; see below).
Note that if `s` is multiline (already contains newlines), they will be respected
as-is - that is, if a line ends with less than `width` chars and then has a newline,
it will be preserved as an empty element. That is to say:
"foo\nbar\n\n" → []string{"foo", "bar", ""}
"foo\n\nbar\n" → []string{"foo", "", "bar"}
This splitter is particularly simple. If you need wordwrapping, it should be done
with e.g. [github.com/muesli/reflow/wordwrap].
*/
func LenSplit(s string, width uint) (out []string) {
var end int
var line string
var lineRunes []rune
if width == 0 {
out = []string{s}
return
}
for line = range strings.Lines(s) {
line = strings.TrimRight(line, "\n")
line = strings.TrimRight(line, "\r")
lineRunes = []rune(line)
if uint(len(lineRunes)) <= width {
out = append(out, line)
continue
}
for i := 0; i < len(lineRunes); i += int(width) {
end = i + int(width)
if end > len(lineRunes) {
end = len(lineRunes)
}
out = append(out, string(lineRunes[i:end]))
}
}
return
}
/*
LenSplitStr wraps [LenSplit] but recombines into a new string with newlines.
It's mostly just a convenience wrapper.
All arguments remain the same as in [LenSplit] with an additional one,
`winNewLine`, which if true will use \r\n as the newline instead of \n.
*/
func LenSplitStr(s string, width uint, winNewline bool) (out string) {
var outSl []string = LenSplit(s, width)
if winNewline {
out = strings.Join(outSl, "\r\n")
} else {
out = strings.Join(outSl, "\n")
}
return
}
/*
Pad pads each element in `s` to length `width` using `pad`.
If `pad` is empty, a single space (0x20) will be assumed.
Note that `width` operates on rune size, not byte size.
(In ASCII, they will be the same size.)
If a line in `s` is greater than or equal to `width`,
no padding will be performed.
If `leftPad` is true, padding will be applied to the "left" (beginning")
of each element instead of the "right" ("end").
*/
func Pad(s []string, width uint, pad string, leftPad bool) (out []string) {
var idx int
var padIdx int
var runeIdx int
var padLen uint
var elem string
var unpadLen uint
var tmpPadLen int
var padRunes []rune
var tmpPad []rune
if width == 0 {
out = s
return
}
out = make([]string, len(s))
// Easy; supported directly in fmt.
if pad == "" {
for idx, elem = range s {
if leftPad {
out[idx] = fmt.Sprintf("%*s", width, elem)
} else {
out[idx] = fmt.Sprintf("%-*s", width, elem)
}
}
return
}
// This gets a little more tricky.
padRunes = []rune(pad)
padLen = uint(len(padRunes))
for idx, elem = range s {
// First we need to know the number of runes in elem.
unpadLen = uint(len([]rune(elem)))
// If it's more than/equal to width, as-is.
if unpadLen >= width {
out[idx] = elem
} else {
// Otherwise, we need to construct/calculate a pad.
if (width-unpadLen)%padLen == 0 {
// Also easy enough.
if leftPad {
out[idx] = fmt.Sprintf("%s%s", strings.Repeat(pad, int((width-unpadLen)/padLen)), elem)
} else {
out[idx] = fmt.Sprintf("%s%s", elem, strings.Repeat(pad, int((width-unpadLen)/padLen)))
}
} else {
// This is where it gets a little hairy.
tmpPad = []rune{}
tmpPadLen = int(width - unpadLen)
idx = 0
padIdx = 0
for runeIdx = range tmpPadLen {
tmpPad[runeIdx] = padRunes[padIdx]
if uint(padIdx) >= padLen {
padIdx = 0
} else {
padIdx++
}
runeIdx++
}
if leftPad {
out[idx] = fmt.Sprintf("%s%s", string(tmpPad), elem)
} else {
out[idx] = fmt.Sprintf("%s%s", elem, string(tmpPad))
}
}
}
}
return
}
/*
Redact provides a "masked" version of string s (e.g. `my_terrible_password` -> `my****************rd`).
maskStr is the character or sequence of characters
to repeat for every masked character of s.
If an empty string, the default [DefMaskStr] will be used.
(maskStr does not need to be a single character.
It is recommended to use a multi-char mask to help obfuscate a string's length.)
leading specifies the number of leading characters of s to leave *unmasked*.
If 0, no leading characters will be unmasked.
trailing specifies the number of trailing characters of s to leave *unmasked*.
if 0, no trailing characters will be unmasked.
newlines, if true, will preserve newline characters - otherwise
they will be treated as regular characters.
As a safety precaution, if:
len(s) <= (leading + trailing)
then the entire string will be *masked* and no unmasking will be performed.
Note that this DOES NOT do a string *replace*, it provides a masked version of `s` itself.
Wrap Redact with [strings.ReplaceAll] if you want to replace a certain value with a masked one.
*/
func Redact(s, maskStr string, leading, trailing uint, newlines bool) (redacted string) {
var nl string
var numMasked int
var sb strings.Builder
var endIdx int = int(leading)
// This condition functionally won't do anything, so just return the input as-is.
if s == "" {
return
}
if maskStr == "" {
maskStr = DefMaskStr
}
if newlines {
for line := range strings.Lines(s) {
nl = getNewLine(line)
sb.WriteString(
Redact(
strings.TrimSuffix(line, nl), maskStr, leading, trailing, false,
),
)
sb.WriteString(nl)
}
} else {
if len(s) <= int(leading+trailing) {
redacted = strings.Repeat(maskStr, len(s))
return
}
if leading == 0 && trailing == 0 {
redacted = strings.Repeat(maskStr, len(s))
return
}
numMasked = len(s) - int(leading+trailing)
endIdx = endIdx + numMasked
if leading > 0 {
sb.WriteString(s[:int(leading)])
}
sb.WriteString(strings.Repeat(maskStr, numMasked))
if trailing > 0 {
sb.WriteString(s[endIdx:])
}
}
redacted = sb.String()
return
}
/*
TrimLines is like [strings.TrimSpace] but operates on *each line* of s.
It is *NIX-newline (`\n`) vs. Windows-newline (`\r\n`) agnostic.
The first encountered linebreak (`\n` vs. `\r\n`) are assumed to be
the canonical linebreak for the rest of s.
left, if true, performs a [TrimSpaceLeft] on each line (retaining the newline).
right, if true, performs a [TrimSpaceRight] on each line (retaining the newline).
*/
func TrimLines(s string, left, right bool) (trimmed string) {
var sl string
var nl string
var sb strings.Builder
// These conditions functionally won't do anything, so just return the input as-is.
if s == "" {
return
}
if !left && !right {
trimmed = s
return
}
for line := range strings.Lines(s) {
nl = getNewLine(line)
sl = strings.TrimSuffix(line, nl)
if left && right {
sl = strings.TrimSpace(sl)
} else if left {
sl = TrimSpaceLeft(sl)
} else if right {
sl = TrimSpaceRight(sl)
}
sb.WriteString(sl + nl)
}
trimmed = sb.String()
return
}
// TrimSpaceLeft is like [strings.TrimSpace] but only removes leading whitespace from string `s`.
func TrimSpaceLeft(s string) (trimmed string) {
trimmed = strings.TrimLeftFunc(s, unicode.IsSpace)
return
}
/*
TrimSpaceRight is like [strings.TrimSpace] but only removes trailing whitespace from string s.
*/
func TrimSpaceRight(s string) (trimmed string) {
trimmed = strings.TrimRightFunc(s, unicode.IsSpace)
return
}
// getNewLine is too unpredictable/nuanced to be used as part of a public API promise so it isn't exported.
func getNewLine(s string) (nl string) {
if strings.HasSuffix(s, "\r\n") {
nl = "\r\n"
} else if strings.HasSuffix(s, "\n") {
nl = "\n"
}
return
}

344
stringsx/funcs_test.go Normal file
View File

@@ -0,0 +1,344 @@
package stringsx
import (
"testing"
)
type (
testIndentSet struct {
name string
orig string
indent string
lvl uint
ws bool
empty bool
tgt string
}
testRedactSet struct {
name string
orig string
leading uint
trailing uint
tgt string
newline bool
mask string // defaults to DefMaskStr.
}
testTrimLinesSet struct {
name string
orig string
left bool
right bool
tgt string
}
testTrimSet struct {
name string
orig string
tgt string
}
)
func TestRedact(t *testing.T) {
var out string
var tests []testRedactSet = []testRedactSet{
testRedactSet{
name: "empty in, empty out",
orig: "",
leading: 0,
trailing: 0,
tgt: "",
},
testRedactSet{
name: "standard",
orig: "password",
leading: 0,
trailing: 0,
tgt: "************************",
},
testRedactSet{
name: "standard with newline",
orig: "pass\nword",
leading: 0,
trailing: 0,
tgt: "************\n************",
newline: true,
},
testRedactSet{
name: "standard with Windows newline",
orig: "pass\r\nword",
leading: 0,
trailing: 0,
tgt: "************\r\n************",
newline: true,
},
testRedactSet{
name: "standard with newline without newlines",
orig: "pass\nword",
leading: 0,
trailing: 0,
tgt: "***************************",
},
testRedactSet{
name: "single leading",
orig: "password",
leading: 1,
trailing: 0,
tgt: "p*********************",
},
testRedactSet{
name: "single trailing",
orig: "password",
leading: 0,
trailing: 1,
tgt: "*********************d",
},
testRedactSet{
name: "three leading",
orig: "password",
leading: 3,
trailing: 0,
tgt: "pas***************",
},
testRedactSet{
name: "three trailing",
orig: "password",
leading: 0,
trailing: 3,
tgt: "***************ord",
},
testRedactSet{
name: "three leading and trailing",
orig: "password",
leading: 3,
trailing: 3,
tgt: "pas******ord",
},
testRedactSet{
name: "unmask overflow leading",
orig: "password",
leading: 5,
trailing: 4,
tgt: "************************",
},
testRedactSet{
name: "unmask overflow trailing",
orig: "password",
leading: 4,
trailing: 5,
tgt: "************************",
},
testRedactSet{
name: "single mask",
orig: "password",
leading: 0,
trailing: 0,
tgt: "********",
mask: "*",
},
testRedactSet{
name: "standard trailing newline with newlines",
orig: "password\n",
leading: 0,
trailing: 0,
tgt: "************************\n",
newline: true,
},
testRedactSet{
name: "standard trailing newline without newlines",
orig: "password\n",
leading: 0,
trailing: 0,
tgt: "***************************",
},
}
for idx, ts := range tests {
out = Redact(ts.orig, ts.mask, ts.leading, ts.trailing, ts.newline)
if out == ts.tgt {
t.Logf("[%d] OK (%s): %#v: got %#v", idx, ts.name, ts.orig, out)
} else {
t.Errorf(
"[%d] FAIL (%s): %#v (len %d):\n"+
"\t\t\texpected (len %d): %#v\n"+
"\t\t\tgot (len %d): %#v\n"+
"\t\t%#v",
idx, ts.name, ts.orig, len(ts.orig),
len(ts.tgt), ts.tgt,
len(out), out,
ts,
)
}
}
}
func TestTrimLines(t *testing.T) {
var out string
var tests []testTrimLinesSet = []testTrimLinesSet{
testTrimLinesSet{
name: "none",
orig: " foo \n bar \n baz ",
left: false,
right: false,
tgt: " foo \n bar \n baz ",
},
testTrimLinesSet{
name: "standard",
orig: " foo \n bar \n baz ",
left: true,
right: true,
tgt: "foo\nbar\nbaz",
},
testTrimLinesSet{
name: "left only",
orig: " foo \n bar \n baz ",
left: true,
right: false,
tgt: "foo \nbar \nbaz ",
},
testTrimLinesSet{
name: "right only",
orig: " foo \n bar \n baz ",
left: false,
right: true,
tgt: " foo\n bar\n baz",
},
testTrimLinesSet{
name: "standard, trailing newline",
orig: " foo \n bar \n baz \n",
left: true,
right: true,
tgt: "foo\nbar\nbaz\n",
},
testTrimLinesSet{
name: "left only, trailing newline",
orig: " foo \n bar \n baz \n",
left: true,
right: false,
tgt: "foo \nbar \nbaz \n",
},
testTrimLinesSet{
name: "right only, trailing newline",
orig: " foo \n bar \n baz \n",
left: false,
right: true,
tgt: " foo\n bar\n baz\n",
},
// Since there's no "non-space" boundary, both of these condition tests do the same thing.
testTrimLinesSet{
name: "left only, trailing newline and ws",
orig: " foo \n bar \n baz \n ",
left: true,
right: false,
tgt: "foo \nbar \nbaz \n",
},
testTrimLinesSet{
name: "right only, trailing newline and ws",
orig: " foo \n bar \n baz \n ",
left: false,
right: true,
tgt: " foo\n bar\n baz\n",
},
}
for idx, ts := range tests {
out = TrimLines(ts.orig, ts.left, ts.right)
if out == ts.tgt {
t.Logf("[%d] OK (%s): %#v: got %#v", idx, ts.name, ts.orig, out)
} else {
t.Errorf(
"[%d] FAIL (%s): %#v (len %d):\n"+
"\t\t\texpected (len %d): %#v\n"+
"\t\t\tgot (len %d): %#v\n"+
"\t\t%#v",
idx, ts.name, ts.orig, len(ts.orig),
len(ts.tgt), ts.tgt,
len(out), out,
ts,
)
}
}
}
func TestTrimSpaceLeft(t *testing.T) {
var out string
var tests []testTrimSet = []testTrimSet{
testTrimSet{
name: "standard",
orig: " foo ",
tgt: "foo ",
},
testTrimSet{
name: "tabs",
orig: "\t\tfoo\t\t",
tgt: "foo\t\t",
},
testTrimSet{
name: "newlines",
orig: "\n\nfoo\n\n",
tgt: "foo\n\n",
},
}
for idx, ts := range tests {
out = TrimSpaceLeft(ts.orig)
if out == ts.tgt {
t.Logf("[%d] OK (%s): %#v: got %#v", idx, ts.name, ts.orig, out)
} else {
t.Errorf(
"[%d] FAIL (%s): %#v (len %d):\n"+
"\t\t\texpected (len %d): %#v\n"+
"\t\t\tgot (len %d): %#v\n"+
"\t\t%#v",
idx, ts.name, ts.orig, len(ts.orig),
len(ts.tgt), ts.tgt,
len(out), out,
ts,
)
}
}
}
func TestTrimSpaceRight(t *testing.T) {
var out string
var tests []testTrimSet = []testTrimSet{
testTrimSet{
name: "standard",
orig: " foo ",
tgt: " foo",
},
testTrimSet{
name: "tabs",
orig: "\t\tfoo\t\t",
tgt: "\t\tfoo",
},
testTrimSet{
name: "newlines",
orig: "\n\nfoo\n\n",
tgt: "\n\nfoo",
},
}
for idx, ts := range tests {
out = TrimSpaceRight(ts.orig)
if out == ts.tgt {
t.Logf("[%d] OK (%s): %#v: got %#v", idx, ts.name, ts.orig, out)
} else {
t.Errorf(
"[%d] FAIL (%s): %#v (len %d):\n"+
"\t\t\texpected (len %d): %#v\n"+
"\t\t\tgot (len %d): %#v\n"+
"\t\t%#v",
idx, ts.name, ts.orig, len(ts.orig),
len(ts.tgt), ts.tgt,
len(out), out,
ts,
)
}
}
}

4
timex/doc.go Normal file
View File

@@ -0,0 +1,4 @@
/*
Package timex provides some handy [time]-related functions.
*/
package timex

35
timex/funcs.go Normal file
View File

@@ -0,0 +1,35 @@
package timex
import (
`time`
)
/*
F64Seconds returns [time.Time] `t` as a 64-bit float of <seconds>.<nanoseconds>
(where <nanoseconds> is the number of nanoseconds since <seconds>,
and <seconds> is the number of seconds since the UNIX epoch).
This can be used to represent a UNIX Epoch timestamp as seconds but with nanosecond precision.
*/
func F64Seconds(t time.Time) (f64 float64) {
return F64Nanoseconds(t) / float64(time.Second)
}
/*
F64Milliseconds is like [F64Seconds] but with a millisecond integer.
*/
func F64Milliseconds(t time.Time) (f64 float64) {
return F64Nanoseconds(t) / float64(time.Millisecond)
}
/*
F64Microseconds is like [F64Seconds] but with a microsecond integer.
*/
func F64Microseconds(t time.Time) (f64 float64) {
return F64Nanoseconds(t) / float64(time.Microsecond)
}
// F64Nanoseconds returns [time.Time.UnixNano] as a float64.
func F64Nanoseconds(t time.Time) (f64 float64) {
return float64(t.UnixNano())
}

30
timex/funcs_test.go Normal file
View File

@@ -0,0 +1,30 @@
package timex
import (
"testing"
`time`
)
func TestF64(t *testing.T) {
var tmNano float64 = 1766533329999999999
var tmSeconds float64 = 1766533329.999999999
var tmMilli float64 = 1766533329999.999999
var tmMicro float64 = 1766533329999999.999
// 2025-12-23 23:42:09.999999999 +0000 UTC
var tm time.Time = time.Unix(1766533329, int64(time.Second-1))
if F64Seconds(tm) != tmSeconds {
t.Fatalf("Failed seconds: %f != %f", F64Seconds(tm), tmSeconds)
}
if F64Milliseconds(tm) != tmMilli {
t.Fatalf("Failed milliseconds: %f != %f", F64Milliseconds(tm), tmMilli)
}
if F64Microseconds(tm) != tmMicro {
t.Fatalf("Failed microseconds: %f != %f", F64Microseconds(tm), tmMicro)
}
if F64Nanoseconds(tm) != tmNano {
t.Fatalf("Failed nanoseconds: %f != %f", F64Nanoseconds(tm), tmNano)
}
}

6
tplx/consts.go Normal file
View File

@@ -0,0 +1,6 @@
package tplx
const (
TplTypeText tplType = iota
TplTypeHtml
)

4
tplx/doc.go Normal file
View File

@@ -0,0 +1,4 @@
/*
Package tplx provides some "shortcuts" to [text/template] and [html/template] rendering.
*/
package tplx

9
tplx/errs.go Normal file
View File

@@ -0,0 +1,9 @@
package tplx
import (
`errors`
)
var (
ErrInvalidTplType = errors.New("unknown/invalid template type")
)

235
tplx/funcs.go Normal file
View File

@@ -0,0 +1,235 @@
package tplx
import (
`bytes`
htmlTpl `html/template`
txtTpl `text/template`
)
// MustTplStrToStr wraps [TplStrToStr] but will panic on a non-nil error instead of returning it.
func MustTplStrToStr(tplStr string, typ tplType, obj any) (s string) {
var err error
if s, err = TplStrToStr(tplStr, typ, obj); err != nil {
panic(err)
}
return
}
// MustTplToStr wraps [TplToStr] but will panic on error instead of returning it.
func MustTplToStr[T Template](tpl T, obj any) (s string) {
var err error
if s, err = TplToStr(tpl, obj); err != nil {
panic(err)
}
return
}
// MustTplToStrWith wraps [TplToStrWith] but will panic on error instead of returning it.
func MustTplToStrWith[T Template](tpl T, tplNm string, obj any) (s string) {
var err error
if s, err = TplToStrWith(tpl, tplNm, obj); err != nil {
panic(err)
}
return
}
/*
TplStrToStr takes in a template string, a template type (see i.e. [TplTypeText], [TplTypeHtml]),
and an object and renders to a string.
This is obviously quite inflexible - there's no way to provide a [text/template.FuncMap]/[html/template.FuncMap],
for instance, but if more advanced template features aren't needed then this might just do the trick.
If you need something more flexible, see [TplToStr] instead.
*/
func TplStrToStr(tplStr string, typ tplType, obj any) (out string, err error) {
var ttpl *txtTpl.Template
var htpl *htmlTpl.Template
var buf *bytes.Buffer = new(bytes.Buffer)
switch typ {
case TplTypeText:
if ttpl, err = txtTpl.New("").Parse(tplStr); err != nil {
return
}
if err = ttpl.Execute(buf, obj); err != nil {
return
}
case TplTypeHtml:
if htpl, err = htmlTpl.New("").Parse(tplStr); err != nil {
return
}
if err = htpl.Execute(buf, obj); err != nil {
return
}
default:
err = ErrInvalidTplType
return
}
out = buf.String()
return
}
/*
TplToStr takes in an [html/template] or [text/template] and an object and executes it.
PLEASE NOTE that it is expected that `tpl` has already had at least one template string `.Parse()`'d in.
If you haven't used generics in Golang yet, this function would be used via something like the following complete example
for both a [text/template.Template] (import-aliased as `txtT.Template`) and
an [html/template.Template] (import-aliased as `htmlT.Template`).
import (
"fmt"
"log"
txtT "text/template"
htmlT "html/template"
`r00t2.io/goutils/tplx`
)
type (
S struct {
Name string
}
)
var (
tTpl *txtT.Template
hTpl *htmlT.Template
)
const tTplStr string = "Greetings, {{ .Name }}!\n"
const hTplStr string = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Hello, {{ .Name }}!</title>
</head>
<body>
<p>Hello, {{ .Name }}. Good to see you.</p>
</body>
</html>
`
func main() {
var err error
var s string
var o *S
o = &S{
Name: "Bob",
}
// A text template.
if tTpl, err = txtT.
New("my_txt_template").
Parse(tTplStr); err != nil {
log.Panicf("Failed to parse text template string '%s': %v\n", tTplStr, err)
}
if s, err = tplx.TplToStr[*txtT.Template](tTpl, o); err != nil {
log.Panicf("Failed to render text template to string: %v\n", err)
}
fmt.Println(s)
// An HTML template.
if hTpl, err = htmlT.
New("index.html").
Parse(hTplStr); err != nil {
log.Panicf("Failed to parse HTML template string '%s': %v\n", hTplStr, err)
}
if s, err = tplx.TplToStr[*htmlT.Template](hTpl, o); err != nil {
log.Panicf("Failed to render HTML template to string: %v\n", err)
}
fmt.Println(s)
}
Additionally, because this function uses a union type [Template],
you can even leave the type indicator off.
For example:
// ...
if s, err = tplx.TplToStr(tTpl, o); err != nil {
log.Panicf("Failed to render text template to string: %v\n", err)
}
// ...
if s, err = tplx.TplToStr(hTpl, o); err != nil {
log.Panicf("Failed to render HTML template to string: %v\n", err)
}
// ...
However, this is not recommended for readability purposes - including
the type indicator indicates (heh heh) to others reading your code
what type `tTpl` and `hTpl` are without needing to cross-reference
their declaration/assignment/definition.
For more information on generics in Golang, see:
* The introductory [blog post]
* The official [tutorial]
* The syntax [reference doc]
* The (community-maintained/unofficial) [Go by Example: Generics]
[blog post]: https://go.dev/blog/intro-generics
[tutorial]: https://go.dev/doc/tutorial/generics
[reference doc]: https://go.dev/ref/spec#Instantiations
[Go by Example: Generics]: https://gobyexample.com/generics
*/
func TplToStr[T Template](tpl T, obj any) (out string, err error) {
var buf *bytes.Buffer = new(bytes.Buffer)
if err = tpl.Execute(buf, obj); err != nil {
return
}
out = buf.String()
return
}
/*
TplToStrWith functions the exact same as [TplToStr] but allows you to specify the
template entry point (template name) named `nm`.
For example (see [TplToStr] for a full example):
// ...
var tplNm string = "index.html"
if s, err = tplx.TplToStrWith(tTpl, tplNm, o); err != nil {
log.Panicf("Failed to render HTML template '%s' to string: %v\n", tplNm, err)
}
// ...
would call the equivalent of:
// ...
if err = tpl.ExecuteTemplate(<internal buffer>, tplNm, o); err != nil {
// ...
}
*/
func TplToStrWith[T Template](tpl T, tplNm string, obj any) (out string, err error) {
var buf *bytes.Buffer = new(bytes.Buffer)
if err = tpl.ExecuteTemplate(buf, tplNm, obj); err != nil {
return
}
out = buf.String()
return
}

103
tplx/funcs_test.go Normal file
View File

@@ -0,0 +1,103 @@
package tplx
import (
htmlT `html/template`
`log`
"testing"
txtT `text/template`
)
const (
txtTplNm string = "my_txt_template"
htmlTplNm string = "index.html"
tgtTxt string = "Greetings, Bob!\n"
tgtHtml string = "<!DOCTYPE html>\n<html lang=\"en\">\n\t<head>\n\t\t<meta charset=\"utf-8\">\n\t\t<title>Hello, Bob!</title>\n\t</head>\n\t<body>\n\t\t<p>Hello, Bob. Good to see you.</p>\n\t</body>\n</html>\n"
tTplStr string = "Greetings, {{ .Name }}!\n"
hTplStr string = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Hello, {{ .Name }}!</title>
</head>
<body>
<p>Hello, {{ .Name }}. Good to see you.</p>
</body>
</html>
`
)
var (
tTpl *txtT.Template = txtT.Must(txtT.New(txtTplNm).Parse(tTplStr))
hTpl *htmlT.Template = htmlT.Must(htmlT.New(htmlTplNm).Parse(hTplStr))
o struct{ Name string } = struct{ Name string }{
Name: "Bob",
}
)
func TestTpl(t *testing.T) {
var err error
var s string
// if s, err = TplToStr[*txtT.Template](tTpl, o); err != nil {
if s, err = TplToStr(tTpl, o); err != nil {
t.Fatalf("Failed to render text template to string: %v\n", err)
}
t.Logf("Text template (%#v): '%s'", s, s)
if s != tgtTxt {
t.Fatalf("Mismatch on text template '%s'", s)
}
// if s, err = TplToStr[*htmlT.Template](hTpl, o); err != nil {
if s, err = TplToStr(hTpl, o); err != nil {
log.Panicf("Failed to render HTML template to string: %v\n", err)
}
t.Logf("HTML template (%#v):\n%s", s, s)
if s != tgtHtml {
t.Fatalf("Mismatch on HTML template '%s'", s)
}
}
func TestTplStr(t *testing.T) {
var err error
var s string
if s, err = TplStrToStr(tTplStr, TplTypeText, o); err != nil {
t.Fatalf("Failed to render text template to string: %v\n", err)
}
t.Logf("Text template (%#v): '%s'", s, s)
if s != tgtTxt {
t.Fatalf("Mismatch on text template '%s'", s)
}
if s, err = TplStrToStr(hTplStr, TplTypeHtml, o); err != nil {
log.Panicf("Failed to render HTML template to string: %v\n", err)
}
t.Logf("HTML template (%#v):\n%s", s, s)
if s != tgtHtml {
t.Fatalf("Mismatch on HTML template '%s'", s)
}
}
func TestTplWith(t *testing.T) {
var err error
var s string
if s, err = TplToStrWith(tTpl, txtTplNm, o); err != nil {
t.Fatalf("Failed to render text template to string: %v\n", err)
}
t.Logf("Text template (%#v): '%s'", s, s)
if s != tgtTxt {
t.Fatalf("Mismatch on text template '%s'", s)
}
if s, err = TplToStrWith(hTpl, htmlTplNm, o); err != nil {
log.Panicf("Failed to render HTML template to string: %v\n", err)
}
t.Logf("HTML template (%#v):\n%s", s, s)
if s != tgtHtml {
t.Fatalf("Mismatch on HTML template '%s'", s)
}
}

655
tplx/sprigx/README.adoc Normal file
View File

@@ -0,0 +1,655 @@
= SprigX
Brent Saner <bts@square-r00t.net>
Last rendered {localdatetime}
:doctype: book
:docinfo: shared
:data-uri:
:imagesdir: images
:sectlinks:
:sectnums:
:sectnumlevels: 7
:toc: preamble
:toc2: left
:idprefix:
:toclevels: 7
:source-highlighter: rouge
:docinfo: shared
[id="wat"]
== What is SprigX?
SprigX are extensions to https://masterminds.github.io/sprig/[the `sprig` library^] (https://pkg.go.dev/github.com/Masterminds/sprig/v3[Go docs^]).
They provide functions that offer more enriched use cases and domain-specific data.
[TIP]
====
If you are reading this README on the Go Module Directory documentation (https://pkg.go.dev/r00t2.io/goutils/tplx/sprigx)
or the directory landing page (https://git.r00t2.io/r00t2/go_goutils/src/branch/master/tplx/sprigx), it may not render correctly.
Be sure to view it at properly via https://git.r00t2.io/r00t2/go_goutils/src/branch/master/tplx/sprigx/README.adoc[the AsciiDoc rendering^]
or by downloading and viewing the https://git.r00t2.io/r00t2/go_goutils/raw/branch/master/tplx/sprigx/README.html[HTML version^].
====
[id="use"]
== How do I Use SprigX?
The same way you would `sprig`!
[%collapsible]
.Like this.
====
[source,go]
----
package main
import (
htmlTplLib "html/template"
txtTplLib "text/template"
"r00t2.io/goutils/tplx/sprigx"
)
var (
txtTpl *txtTplLib.Template = txtTplLib.
New("").
Funcs(
sprigx.TxtFuncMap(),
)
htmlTpl *htmlTplLib.Template = htmlTplLib.
New("").
Funcs(
sprigx.HtmlFuncMap(),
)
)
----
====
They can even be combined/used together.
[%collapsible]
.Like this.
====
[source,go]
----
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
var txtTpl *template.Template = template.
New("").
Funcs(
sprigx.TxtFuncMap(),
).
Funcs(
sprig.TxtFuncMap(),
)
// Or:
/*
var txtTpl *template.Template = template.
New("").
Funcs(
sprig.TxtFuncMap(),
).
Funcs(
sprigx.TxtFuncMap(),
)
*/
----
====
If a `<template>.FuncMap` is added via `.Funcs()` *after* template parsing, it will override any functions of the same name of a `<template>.FuncMap` *before* parsing.
For example, if both `sprig` and `sprigx` provide a function `foo`:
this will use `foo` from `sprigx`
[%collapsible]
.(show)
====
[source,go]
----
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
const (
myTpl string = `{{ "This is an example template string." | foo }}`
)
var (
tpl *template.Template = template.Must(
template.
New("").
Funcs(sprig.TxtFuncMap()).
Parse(myTpl),
).
Funcs(sprigx.TxtFuncMap())
)
----
====
whereas this will use `foo` from `sprig`
[%collapsible]
.(show)
====
[source,go]
----
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
const (
myTpl string = `{{ "This is an example template string." | foo }}`
)
var (
tpl *template.Template = template.Must(
template.
New("").
Funcs(sprigx.TxtFuncMap()).
Parse(myTpl),
).
Funcs(sprig.TxtFuncMap())
)
----
====
and a function can even be explicitly overridden.
[%collapsible]
.(show)
====
This would override a function `foo` and `foo2` in `sprigx` from `foo` and `foo2` from `sprig`, but leave all other `sprig` functions untouched.
[source,go]
----
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
const (
myTpl string = `{{ "This is an example template string." | foo }}`
)
var (
overrideFuncs template.FuncMap = sprig.TxtFuncMap()
tpl *template.Template = template.Must(
template.
New("").
Funcs(sprigx.TxtFuncMap()).
Parse(myTpl),
).
Funcs(
template.FuncMap(
map[string]any{
"foo": overrideFuncs["foo"],
"foo2": overrideFuncs["foo2"],
},
),
)
)
----
====
[id="fn"]
== Functions
Expect this list to grow over time, and potentially more frequently than the `sprigx` functions.
[id="fn_os"]
=== Operating System
[id="fn_os_hstnm"]
==== `osHostname`
`osHostname` simply wraps and returns the result of calling https://pkg.go.dev/os#Hostname[`os.Hostname`^].
As such, it comes with the same caveats - it's possible for it to error, and it isn't guaranteed to be an FQDN -- it will be precisely/exactly whatever the kernel's hostname is set as.
[id="fn_sys"]
=== System/Platform/Architecture
[id="fn_sys_arch"]
==== `sysArch`
Returns the https://pkg.go.dev/runtime#GOARCH[`runtime.GOARCH`^] constant.
[id="fn_sys_numcpu"]
==== `sysNumCpu`
Returns the value from https://pkg.go.dev/runtime#NumCPU[`runtime.NumCPU`^].
[id="fn_sys_os"]
==== `sysOsName`
Returns the https://pkg.go.dev/runtime#GOOS[`runtime.GOOS`^] constant.
[id="fn_sys_rntm"]
==== `sysRuntime`
This function returns a `map[string]string` of various information from the https://pkg.go.dev/runtime[`runtime` stdlib library^].
Specifically, the following are returned.
[TIP]
====
The value type is a direct link to the `runtime` documentation providing more detail about the associated value.
Because all values are mapped as strings, they can be converted back to their native type via e.g. the https://masterminds.github.io/sprig/conversion.html[Sprig conversion functions^] if necessary.
====
.`sysRuntime` Values
[cols="^.^3m,^.^3",options="header"]
|===
| Key | Value Type
| compiler | https://pkg.go.dev/runtime#Compiler[string^]
| arch | https://pkg.go.dev/runtime#GOARCH[string^]
| os | https://pkg.go.dev/runtime#GOOS[string^]
| maxprocs | https://pkg.go.dev/runtime#GOMAXPROCS[int^] footnote:[For safety concerns, `sprigx` does not allow *setting* `GOMAXPROCS`, this value only contains the *current* `GOMAXPROCS` value.]
| cpu_cnt | https://pkg.go.dev/runtime#NumCPU[int^]
| num_cgo | https://pkg.go.dev/runtime#NumCgoCall[int^]
| num_go | https://pkg.go.dev/runtime#NumGoroutine[int^]
| go_ver | https://pkg.go.dev/runtime#Version[string^]
|===
As a convenience, some of these values also have their own dedicated functions as well:
* <<fn_sys_arch>>
* <<fn_sys_numcpu>>
* <<fn_sys_os>>
[id="fn_path"]
=== Paths
[id="fn_path_gnrc"]
==== Generic
These operate similar to https://pkg.go.dev/path[the `path` stdlib library^] and use a fixed `/` path separator.
[id="fn_path_gnrc_pj"]
===== `pathJoin`
`pathJoin` operates *exactly* like https://pkg.go.dev/path#Join[`path.Join`^] in stdlib.
[WARNING]
====
If you are joining paths in a pipeline, you almost assuredly want <<fn_path_gnrc_ppj>> or <<fn_path_gnrc_pspj>> instead unless you are explicitly *appending* a pipeline result to a path.
====
[source,gotemplate]
----
{{- pathJoin "a" "b" "c" }}
{{- pathJoin "/" "a" "b" "c" }}
{{- pathJoin "/a/b" "c" }}
----
renders as:
[source,text]
----
a/b/c
/a/b/c
/a/b/c
----
[id="fn_path_gnrc_ppj"]
===== `pathPipeJoin`
`pathPipeJoin` operates like <<fn_path_gnrc_pj>> with one deviation: the root/base path is expected to be *last* in the arguments.
This makes it much more suitable for use in template pipelines, as the previous value in a pipeline is passed in as the last element to the next pipe function.
[source,gotemplate]
----
{{- $myBase := "/a" -}}
{{- pathPipeJoin "b" "c" "a" }}
{{- pathPipeJoin "a" "b" "c" "/" }}
{{- $myBase | pathPipeJoin "b" "c" }}
----
renders as:
[source,text]
----
a/b/c
/a/b/c
/a/b/c
----
[id="fn_path_gnrc_psj"]
===== `pathSliceJoin`
`pathSliceJoin` joins a slice of path segment strings (`[]string`) instead of a variadic sequence of strings.
[TIP]
====
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
====
[source,gotemplate]
----
{{- $myList := "a,b,c" | splitList "," -}}
{{- $myList | pathSliceJoin }}
{{- ("a,b,c" | splitList ",") | pathSliceJoin }}
{{- ("/,a,b,c" | splitList ",") | pathSliceJoin }}
----
renders as:
[source,text]
----
a/b/c
a/b/c
/a/b/c
----
[id="fn_path_gnrc_pspj"]
===== `pathSlicePipeJoin`
`pathSlicePipeJoin` operates like <<fn_path_gnrc_ppj>> in that it is suitable for pipeline use in which the root/base path is passed in from the pipeline, but it is like <<fn_path_gnrc_psj>> in that it then also accepts a slice of path segments (`[]string`) to append to that base path.
[TIP]
====
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
====
[source,gotemplate]
----
{{- $myBase := "/a" -}}
{{- $myList := "b,c,d" | splitList "." -}}
{{- pathSlicePipeJoin $myList $myBase }}
{{- $myBase | pathSlicePipeJoin $myList }}
----
renders as:
[source,text]
----
/a/b/c
/a/b/c
----
[id="fn_path_gnrc_psubj"]
===== `pathSubJoin`
`pathSubJoin` operates like <<fn_path_gnrc_pj>> but it expects an explicit root/base path.
The pipeline-friendly equivalent of this is <<fn_path_gnrc_ppj>>.
[source,gotemplate]
----
{{- pathSubJoin "/a/b" "c" }}
{{- pathSubJoin "/" "a" "b" "c" }}
{{- "c" | pathSubJoin "/" "a" "b" }}
----
renders as:
[source,text]
----
/a/b/c
/a/b/c
/a/b/c
----
[id="fn_path_os"]
==== OS/Platform-Tailored
These operate similar to https://pkg.go.dev/path/filepath[the `path/filepath` stdlib library^], and use the OS-specific https://pkg.go.dev/os#PathSeparator[`os.PathSeparator`^].
[WARNING]
====
Take special note of the oddness around specifying Windows paths and drive letters in e.g. <<fn_path_os_pj>>!
It is recommended to make use of <<fn_sys_os>> to conditionally format path bases/roots if needed.
====
[id="fn_path_os_pj"]
===== `osPathJoin`
`osPathJoin` operates *exactly* like https://pkg.go.dev/path/filepath#Join[`path/filepath.Join`^] in stdlib.
[WARNING]
====
If you are joining paths in a pipeline, you almost assuredly want <<fn_path_os_ppj>> or <<fn_path_os_pspj>> instead unless you are explicitly *appending* a pipeline result to a path.
====
[source,gotemplate]
----
{{- osPathJoin "a" "b" "c" }}
{{- osPathJoin "/" "a" "b" "c" }}
{{- osPathJoin "C:\\" "a" "b" "c" }}
{{- osPathJoin "C:" "a" "b" "c" }}
----
renders as:
[cols="^.^2,.^4a",options="header"]
|===
| OS ^| Result
| Windows | [source,text]
----
a\b\c
\a\b\c
\a\b\c
C:\a\b\c
C:a\b\c
----
| Others (e.g. Linux, macOS) | [source,text]
----
a/b/c
/a/b/c
C:\/a/b/c
C:/a/b/c
----
|===
[id="fn_path_os_ppj"]
===== `osPathPipeJoin`
`osPathPipeJoin` operates like <<fn_path_gnrc_ppj>> (except using OS-specific path separators).
This makes it much more suitable for use in template pipelines, as the previous value in a pipeline is passed in as the last element to the next pipe function.
[source,gotemplate]
----
{{- $myBase := "/a" -}}
{{- osPathPipeJoin "b" "c" "a" }}
{{- osPathPipeJoin "a" "b" "c" "/" }}
{{- $myBase | osPathPipeJoin "b" "c" }}
----
renders as:
[cols="^.^2,.^4a",options="header"]
|===
| OS ^| Result
| Windows | [source,text]
----
a\b\c
\a\b\c
\a\b\c
----
| Others (e.g. Linux, macOS) | [source,text]
----
a/b/c
/a/b/c
/a/b/c
----
|===
[id="fn_path_ossep"]
===== `osPathSep`
`osPathSep` returns the https://pkg.go.dev/os#PathSeparator[`os.PathSeparator`^] for this OS.
[source,gotemplate]
----
{{- osPathSep }}
----
renders as:
[cols="^.^2,.^4a",options="header"]
|===
| OS ^| Result
| Windows | [source,text]
----
\
----
| Others (e.g. Linux, macOS) | [source,text]
----
/
----
|===
[id="fn_path_os_psj"]
===== `osPathSliceJoin`
`osPathSliceJoin` operates like <<fn_path_gnrc_psj>> but with OS-specific path separators.
[TIP]
====
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
====
[source,gotemplate]
----
{{- $myList := "a,b,c" | splitList "," -}}
{{- $myList | osPathSliceJoin }}
{{- ("a,b,c" | splitList ",") | osPathSliceJoin }}
{{- ("/,a,b,c" | splitList ",") | osPathSliceJoin }}
----
renders as:
[cols="^.^2,.^4a",options="header"]
|===
| OS ^| Result
| Windows | [source,text]
----
a\b\c
a\b\c
\a\b\c
----
| Others (e.g. Linux, macOS) | [source,text]
----
a/b/c
a/b/c
/a/b/c
----
|===
[id="fn_path_os_pspj"]
===== `osPathSlicePipeJoin`
`osPathSlicePipeJoin` operates like <<fn_path_gnrc_pspj>> but with OS-specific separators.
[TIP]
====
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
====
[source,gotemplate]
----
{{- $myBase := "/a" -}}
{{- $myList := "b,c,d" | splitList "." -}}
{{- osPathSlicePipeJoin $myList $myBase }}
{{- $myBase | osPathSlicePipeJoin $myList }}
----
renders as:
[cols="^.^2,.^4a",options="header"]
|===
| OS ^| Result
| Windows | [source,text]
----
\a\b\c\d
\a\b\c\d
----
| Others (e.g. Linux, macOS) | [source,text]
----
/a/b/c/d
/a/b/c/d
----
|===
[id="fn_path_os_psubj"]
===== `osPathSubJoin`
`osPathSubJoin` operates like <<fn_path_gnrc_psubj>> but with OS-specific separators.
The pipeline-friendly equivalent of this is <<fn_path_os_ppj>>.
[source,gotemplate]
----
{{- osPathSubJoin "/a/b" "c" }}
{{- osPathSubJoin "/" "a" "b" "c" }}
{{- "c" | osPathSubJoin "/" "a" "b" }}
----
renders as:
[cols="^.^2,.^4a",options="header"]
|===
| OS ^| Result
| Windows | [source,text]
----
\a\b\c
\a\b\c
\a\b\c
----
| Others (e.g. Linux, macOS) | [source,text]
----
/a/b/c
/a/b/c
/a/b/c
----
|===
[id="fn_str"]
=== Strings
[id="fn_str_extindent"]
==== `extIndent`
`extIndent` allows for a MUCH more flexible indenter than the `sprig` `indent` function.
It works with both Windows (`\r\n`) and POSIX (`\n`) linebreaks.
[TIP]
====
If `<indentString>` is set to `\n` and `<levels>` is always set to `1`, this function can even be used to doubelspace text!
====
It has quite a few arguments, however:
[source,gotemplate]
----
{{ extIndent <levels> <skipFirst> <skipEmpty> <skipWhitespace> <indentString> <input> }}
----
Where:
* `<levels>`: The level of indentation for the text. If less than or equal to `0`, `extIndent` just returns `<input>` as-is and NO-OPs otherwise.
* `<skipFirst>`: If true, skip indenting the first line. This is particularly handy if you like to visually align your function calls in your templates.
* `<skipEmpty>`: If true, do not add an indent to *empty* lines (where an "empty line" means "only has a linebreak").
* `<skipWhitespace>`: If true, do not add an indent to lines that *only* consist of whitespace (spaces, tabs, etc.) and a linebreak.
* `<indentString>`: The string to use as the "indent character". This can be any string, such as `" "`, `"\t"`, `"."`, `"|"`, `"=="` etc.
* `<input>`: The text to be indented. Because it is the last argument, `extIndent` works with pipelined text as well.
[id="fn_dbg"]
=== Debugging
[id="fn_dbg_dump"]
==== `dump`
The `dump` function calls https://pkg.go.dev/github.com/davecgh/go-spew/spew#Sdump[the `Sdump` function^] from https://github.com/davecgh/go-spew[`go-spew`] (https://pkg.go.dev/github.com/davecgh/go-spew/spew[`github.com/davecgh/go-spew/spew`^]) for whatever object(s) is/are passed to it.

1605
tplx/sprigx/README.html Normal file

File diff suppressed because it is too large Load Diff

699
tplx/sprigx/README.md Normal file
View File

@@ -0,0 +1,699 @@
# What is SprigX?
SprigX are extensions to [the `sprig`
library](https://masterminds.github.io/sprig/) ([Go
docs](https://pkg.go.dev/github.com/Masterminds/sprig/v3)).
They provide functions that offer more enriched use cases and
domain-specific data.
If you are reading this README on the Go Module Directory documentation
(<https://pkg.go.dev/r00t2.io/goutils/tplx/sprigx>) or the directory
landing page
(<https://git.r00t2.io/r00t2/go_goutils/src/branch/master/tplx/sprigx>),
it may not render correctly.
Be sure to view it at properly via [the AsciiDoc
rendering](https://git.r00t2.io/r00t2/go_goutils/src/branch/master/tplx/sprigx/README.adoc)
or by downloading and viewing the [HTML
version](https://git.r00t2.io/r00t2/go_goutils/raw/branch/master/tplx/sprigx/README.html).
# How do I Use SprigX?
The same way you would `sprig`!
package main
import (
htmlTplLib "html/template"
txtTplLib "text/template"
"r00t2.io/goutils/tplx/sprigx"
)
var (
txtTpl *txtTplLib.Template = txtTplLib.
New("").
Funcs(
sprigx.TxtFuncMap(),
)
htmlTpl *htmlTplLib.Template = htmlTplLib.
New("").
Funcs(
sprigx.HtmlFuncMap(),
)
)
They can even be combined/used together.
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
var txtTpl *template.Template = template.
New("").
Funcs(
sprigx.TxtFuncMap(),
).
Funcs(
sprig.TxtFuncMap(),
)
// Or:
/*
var txtTpl *template.Template = template.
New("").
Funcs(
sprig.TxtFuncMap(),
).
Funcs(
sprigx.TxtFuncMap(),
)
*/
If a `<template>.FuncMap` is added via `.Funcs()` **after** template
parsing, it will override any functions of the same name of a
`<template>.FuncMap` **before** parsing.
For example, if both `sprig` and `sprigx` provide a function `foo`:
this will use `foo` from `sprigx`
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
const (
myTpl string = `{{ "This is an example template string." | foo }}`
)
var (
tpl *template.Template = template.Must(
template.
New("").
Funcs(sprig.TxtFuncMap()).
Parse(myTpl),
).
Funcs(sprigx.TxtFuncMap())
)
whereas this will use `foo` from `sprig`
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
const (
myTpl string = `{{ "This is an example template string." | foo }}`
)
var (
tpl *template.Template = template.Must(
template.
New("").
Funcs(sprigx.TxtFuncMap()).
Parse(myTpl),
).
Funcs(sprig.TxtFuncMap())
)
and a function can even be explicitly overridden.
This would override a function `foo` and `foo2` in `sprigx` from `foo`
and `foo2` from `sprig`, but leave all other `sprig` functions
untouched.
package main
import (
"text/template"
"github.com/Masterminds/sprig/v3"
"r00t2.io/goutils/tplx/sprigx"
)
const (
myTpl string = `{{ "This is an example template string." | foo }}`
)
var (
overrideFuncs template.FuncMap = sprig.TxtFuncMap()
tpl *template.Template = template.Must(
template.
New("").
Funcs(sprigx.TxtFuncMap()).
Parse(myTpl),
).
Funcs(
template.FuncMap(
map[string]any{
"foo": overrideFuncs["foo"],
"foo2": overrideFuncs["foo2"],
},
),
)
)
# Functions
Expect this list to grow over time, and potentially more frequently than
the `sprigx` functions.
## Operating System
### `osHostname`
`osHostname` simply wraps and returns the result of calling
[`os.Hostname`](https://pkg.go.dev/os#Hostname).
As such, it comes with the same caveats - its possible for it to error,
and it isnt guaranteed to be an FQDNit will be precisely/exactly
whatever the kernels hostname is set as.
## System/Platform/Architecture
### `sysArch`
Returns the [`runtime.GOARCH`](https://pkg.go.dev/runtime#GOARCH)
constant.
### `sysNumCpu`
Returns the value from
[`runtime.NumCPU`](https://pkg.go.dev/runtime#NumCPU).
### `sysOsName`
Returns the [`runtime.GOOS`](https://pkg.go.dev/runtime#GOOS) constant.
### `sysRuntime`
This function returns a `map[string]string` of various information from
the [`runtime` stdlib library](https://pkg.go.dev/runtime).
Specifically, the following are returned.
The value type is a direct link to the `runtime` documentation providing
more detail about the associated value.
Because all values are mapped as strings, they can be converted back to
their native type via e.g. the [Sprig conversion
functions](https://masterminds.github.io/sprig/conversion.html) if
necessary.
<table>
<caption><code>sysRuntime</code> Values</caption>
<colgroup>
<col style="width: 50%" />
<col style="width: 50%" />
</colgroup>
<thead>
<tr>
<th style="text-align: center;">Key</th>
<th style="text-align: center;">Value Type</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: center;"><p><code>compiler</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#Compiler">string</a></p></td>
</tr>
<tr>
<td style="text-align: center;"><p><code>arch</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#GOARCH">string</a></p></td>
</tr>
<tr>
<td style="text-align: center;"><p><code>os</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#GOOS">string</a></p></td>
</tr>
<tr>
<td style="text-align: center;"><p><code>maxprocs</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#GOMAXPROCS">int</a> <a href="#fn1"
class="footnote-ref" id="fnref1"
role="doc-noteref"><sup>1</sup></a></p></td>
</tr>
<tr>
<td style="text-align: center;"><p><code>cpu_cnt</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#NumCPU">int</a></p></td>
</tr>
<tr>
<td style="text-align: center;"><p><code>num_cgo</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#NumCgoCall">int</a></p></td>
</tr>
<tr>
<td style="text-align: center;"><p><code>num_go</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#NumGoroutine">int</a></p></td>
</tr>
<tr>
<td style="text-align: center;"><p><code>go_ver</code></p></td>
<td style="text-align: center;"><p><a
href="https://pkg.go.dev/runtime#Version">string</a></p></td>
</tr>
</tbody>
</table>
<section id="footnotes" class="footnotes footnotes-end-of-document"
role="doc-endnotes">
<hr />
<ol>
<li id="fn1"><p>For safety concerns, <code>sprigx</code> does not allow
<strong>setting</strong> <code>GOMAXPROCS</code>, this value only
contains the <strong>current</strong> <code>GOMAXPROCS</code> value.<a
href="#fnref1" class="footnote-back" role="doc-backlink">↩︎</a></p></li>
</ol>
</section>
As a convenience, some of these values also have their own dedicated
functions as well:
- [](#fn_sys_arch)
- [](#fn_sys_numcpu)
- [](#fn_sys_os)
## Paths
### Generic
These operate similar to [the `path` stdlib
library](https://pkg.go.dev/path) and use a fixed `/` path separator.
#### `pathJoin`
`pathJoin` operates **exactly** like
[`path.Join`](https://pkg.go.dev/path#Join) in stdlib.
If you are joining paths in a pipeline, you almost assuredly want
[](#fn_path_gnrc_ppj) or [](#fn_path_gnrc_pspj) instead unless you are
explicitly **appending** a pipeline result to a path.
{{- pathJoin "a" "b" "c" }}
{{- pathJoin "/" "a" "b" "c" }}
{{- pathJoin "/a/b" "c" }}
renders as:
a/b/c
/a/b/c
/a/b/c
#### `pathPipeJoin`
`pathPipeJoin` operates like [](#fn_path_gnrc_pj) with one deviation:
the root/base path is expected to be **last** in the arguments.
This makes it much more suitable for use in template pipelines, as the
previous value in a pipeline is passed in as the last element to the
next pipe function.
{{- $myBase := "/a" -}}
{{- pathPipeJoin "b" "c" "a" }}
{{- pathPipeJoin "a" "b" "c" "/" }}
{{- $myBase | pathPipeJoin "b" "c" }}
renders as:
a/b/c
/a/b/c
/a/b/c
#### `pathSliceJoin`
`pathSliceJoin` joins a slice of path segment strings (`[]string`)
instead of a variadic sequence of strings.
The `splitList` function shown below is from the [`sprig` string slice
functions](https://masterminds.github.io/sprig/string_slice.html).
{{- $myList := "a,b,c" | splitList "," -}}
{{- $myList | pathSliceJoin }}
{{- ("a,b,c" | splitList ",") | pathSliceJoin }}
{{- ("/,a,b,c" | splitList ",") | pathSliceJoin }}
renders as:
a/b/c
a/b/c
/a/b/c
#### `pathSlicePipeJoin`
`pathSlicePipeJoin` operates like [](#fn_path_gnrc_ppj) in that it is
suitable for pipeline use in which the root/base path is passed in from
the pipeline, but it is like [](#fn_path_gnrc_psj) in that it then also
accepts a slice of path segments (`[]string`) to append to that base
path.
The `splitList` function shown below is from the [`sprig` string slice
functions](https://masterminds.github.io/sprig/string_slice.html).
{{- $myBase := "/a" -}}
{{- $myList := "b,c,d" | splitList "." -}}
{{- pathSlicePipeJoin $myList $myBase }}
{{- $myBase | pathSlicePipeJoin $myList }}
renders as:
/a/b/c
/a/b/c
#### `pathSubJoin`
`pathSubJoin` operates like [](#fn_path_gnrc_pj) but it expects an
explicit root/base path.
The pipeline-friendly equivalent of this is [](#fn_path_gnrc_ppj).
{{- pathSubJoin "/a/b" "c" }}
{{- pathSubJoin "/" "a" "b" "c" }}
{{- "c" | pathSubJoin "/" "a" "b" }}
renders as:
/a/b/c
/a/b/c
/a/b/c
### OS/Platform-Tailored
These operate similar to [the `path/filepath` stdlib
library](https://pkg.go.dev/path/filepath), and use the OS-specific
[`os.PathSeparator`](https://pkg.go.dev/os#PathSeparator).
Take special note of the oddness around specifying Windows paths and
drive letters in e.g. [](#fn_path_os_pj)!
It is recommended to make use of [](#fn_sys_os) to conditionally format
path bases/roots if needed.
#### `osPathJoin`
`osPathJoin` operates **exactly** like
[`path/filepath.Join`](https://pkg.go.dev/path/filepath#Join) in stdlib.
If you are joining paths in a pipeline, you almost assuredly want
[](#fn_path_os_ppj) or [](#fn_path_os_pspj) instead unless you are
explicitly **appending** a pipeline result to a path.
{{- osPathJoin "a" "b" "c" }}
{{- osPathJoin "/" "a" "b" "c" }}
{{- osPathJoin "C:\\" "a" "b" "c" }}
{{- osPathJoin "C:" "a" "b" "c" }}
renders as:
<table>
<colgroup>
<col style="width: 33%" />
<col style="width: 66%" />
</colgroup>
<thead>
<tr>
<th style="text-align: center;">OS</th>
<th style="text-align: center;">Result</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: center;"><p>Windows</p></td>
<td style="text-align: left;"><pre class="text"><code>a\b\c
\a\b\c
\a\b\c
C:\a\b\c
C:a\b\c</code></pre></td>
</tr>
<tr>
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
<td style="text-align: left;"><pre class="text"><code>a/b/c
/a/b/c
C:\/a/b/c
C:/a/b/c</code></pre></td>
</tr>
</tbody>
</table>
#### `osPathPipeJoin`
`osPathPipeJoin` operates like [](#fn_path_gnrc_ppj) (except using
OS-specific path separators).
This makes it much more suitable for use in template pipelines, as the
previous value in a pipeline is passed in as the last element to the
next pipe function.
{{- $myBase := "/a" -}}
{{- osPathPipeJoin "b" "c" "a" }}
{{- osPathPipeJoin "a" "b" "c" "/" }}
{{- $myBase | osPathPipeJoin "b" "c" }}
renders as:
<table>
<colgroup>
<col style="width: 33%" />
<col style="width: 66%" />
</colgroup>
<thead>
<tr>
<th style="text-align: center;">OS</th>
<th style="text-align: center;">Result</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: center;"><p>Windows</p></td>
<td style="text-align: left;"><pre class="text"><code>a\b\c
\a\b\c
\a\b\c</code></pre></td>
</tr>
<tr>
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
<td style="text-align: left;"><pre class="text"><code>a/b/c
/a/b/c
/a/b/c</code></pre></td>
</tr>
</tbody>
</table>
#### `osPathSep`
`osPathSep` returns the
[`os.PathSeparator`](https://pkg.go.dev/os#PathSeparator) for this OS.
{{- osPathSep }}
renders as:
<table>
<colgroup>
<col style="width: 33%" />
<col style="width: 66%" />
</colgroup>
<thead>
<tr>
<th style="text-align: center;">OS</th>
<th style="text-align: center;">Result</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: center;"><p>Windows</p></td>
<td style="text-align: left;"><pre class="text"><code>\</code></pre></td>
</tr>
<tr>
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
<td style="text-align: left;"><pre class="text"><code>/</code></pre></td>
</tr>
</tbody>
</table>
#### `osPathSliceJoin`
`osPathSliceJoin` operates like [](#fn_path_gnrc_psj) but with
OS-specific path separators.
The `splitList` function shown below is from the [`sprig` string slice
functions](https://masterminds.github.io/sprig/string_slice.html).
{{- $myList := "a,b,c" | splitList "," -}}
{{- $myList | osPathSliceJoin }}
{{- ("a,b,c" | splitList ",") | osPathSliceJoin }}
{{- ("/,a,b,c" | splitList ",") | osPathSliceJoin }}
renders as:
<table>
<colgroup>
<col style="width: 33%" />
<col style="width: 66%" />
</colgroup>
<thead>
<tr>
<th style="text-align: center;">OS</th>
<th style="text-align: center;">Result</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: center;"><p>Windows</p></td>
<td style="text-align: left;"><pre class="text"><code>a\b\c
a\b\c
\a\b\c</code></pre></td>
</tr>
<tr>
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
<td style="text-align: left;"><pre class="text"><code>a/b/c
a/b/c
/a/b/c</code></pre></td>
</tr>
</tbody>
</table>
#### `osPathSlicePipeJoin`
`osPathSlicePipeJoin` operates like [](#fn_path_gnrc_pspj) but with
OS-specific separators.
The `splitList` function shown below is from the [`sprig` string slice
functions](https://masterminds.github.io/sprig/string_slice.html).
{{- $myBase := "/a" -}}
{{- $myList := "b,c,d" | splitList "." -}}
{{- osPathSlicePipeJoin $myList $myBase }}
{{- $myBase | osPathSlicePipeJoin $myList }}
renders as:
<table>
<colgroup>
<col style="width: 33%" />
<col style="width: 66%" />
</colgroup>
<thead>
<tr>
<th style="text-align: center;">OS</th>
<th style="text-align: center;">Result</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: center;"><p>Windows</p></td>
<td style="text-align: left;"><pre class="text"><code>\a\b\c\d
\a\b\c\d</code></pre></td>
</tr>
<tr>
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
<td style="text-align: left;"><pre class="text"><code>/a/b/c/d
/a/b/c/d</code></pre></td>
</tr>
</tbody>
</table>
#### `osPathSubJoin`
`osPathSubJoin` operates like [](#fn_path_gnrc_psubj) but with
OS-specific separators.
The pipeline-friendly equivalent of this is [](#fn_path_os_ppj).
{{- osPathSubJoin "/a/b" "c" }}
{{- osPathSubJoin "/" "a" "b" "c" }}
{{- "c" | osPathSubJoin "/" "a" "b" }}
renders as:
<table>
<colgroup>
<col style="width: 33%" />
<col style="width: 66%" />
</colgroup>
<thead>
<tr>
<th style="text-align: center;">OS</th>
<th style="text-align: center;">Result</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: center;"><p>Windows</p></td>
<td style="text-align: left;"><pre class="text"><code>\a\b\c
\a\b\c
\a\b\c</code></pre></td>
</tr>
<tr>
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
<td style="text-align: left;"><pre class="text"><code>/a/b/c
/a/b/c
/a/b/c</code></pre></td>
</tr>
</tbody>
</table>
## Strings
### `extIndent`
`extIndent` allows for a MUCH more flexible indenter than the `sprig`
`indent` function.
It works with both Windows (`\r\n`) and POSIX (`\n`) linebreaks.
If `<indentString>` is set to `\n` and `<levels>` is always set to `1`,
this function can even be used to doubelspace text!
It has quite a few arguments, however:
{{ extIndent <levels> <skipFirst> <skipEmpty> <skipWhitespace> <indentString> <input> }}
Where:
- `<levels>`: The level of indentation for the text. If less than or
equal to `0`, `extIndent` just returns `<input>` as-is and NO-OPs
otherwise.
- `<skipFirst>`: If true, skip indenting the first line. This is
particularly handy if you like to visually align your function calls
in your templates.
- `<skipEmpty>`: If true, do not add an indent to **empty** lines
(where an "empty line" means "only has a linebreak").
- `<skipWhitespace>`: If true, do not add an indent to lines that
**only** consist of whitespace (spaces, tabs, etc.) and a linebreak.
- `<indentString>`: The string to use as the "indent character". This
can be any string, such as `" "`, `"\t"`, `"."`, `"|"`, `"=="` etc.
- `<input>`: The text to be indented. Because it is the last argument,
`extIndent` works with pipelined text as well.
## Debugging
### `dump`
The `dump` function calls [the `Sdump`
function](https://pkg.go.dev/github.com/davecgh/go-spew/spew#Sdump) from
[`go-spew`](https://github.com/davecgh/go-spew)
([`github.com/davecgh/go-spew/spew`](https://pkg.go.dev/github.com/davecgh/go-spew/spew))
for whatever object(s) is/are passed to it.

101
tplx/sprigx/_test.tpl Normal file
View File

@@ -0,0 +1,101 @@
################################################################################
# RUNTIME #
################################################################################
{{- $rntm := sysRuntime }}
Arch: {{ sysArch }}
CPUs: {{ sysNumCpu }}
OS: {{ sysNumCpu }}
RUNTIME: {{ $rntm }}
{{ range $rntmk, $rntmv := $rntm }}
{{ $rntmk }}:
{{ $rntmv }}
{{- end }}
{{ dump $rntm }}
################################################################################
# PATHS #
################################################################################
###########
# Generic #
###########
pathJoin "a" "b" "c"
{{ pathJoin "a" "b" "c" }}
pathJoin "/" "a" "b" "c"
{{ pathJoin "/" "a" "b" "c" }}
pathJoin "/a" "b" "c"
{{ pathJoin "/a" "b" "c" }}
#
pathPipeJoin "b" "c" "d" "a"
{{ pathPipeJoin "b" "c" "d" "a" }}
"a" | pathPipeJoin "b" "c" "d"
{{ "a" | pathPipeJoin "b" "c" "d"}}
#
$base := "/"
$myPsjSlice := "a,b,c" | splitList ","
pathSliceJoin $myPsjSlice
{{- $base := "/" }}
{{- $myPsjSlice := "a,b,c" | splitList "," }}
{{ pathSliceJoin $myPsjSlice }}
#
$base | pathSlicePipeJoin $myPsjSlice
{{ $base | pathSlicePipeJoin $myPsjSlice }}
#
pathSubJoin $base "a" "b" "c"
{{ pathSubJoin $base "a" "b" "c" }}
######################
# OS/System/Platform #
######################
osPathJoin "a" "b" "c"
{{ osPathJoin "a" "b" "c" }}
osPathJoin "/" "a" "b" "c"
{{ osPathJoin "a" "b" "c" }}
osPathJoin "/a" "b" "c"
{{ osPathJoin "a" "b" "c" }}
#
osPathPipeJoin "b" "c" "d" "a"
{{ osPathPipeJoin "b" "c" "d" "a" }}
"a" | osPathPipeJoin "b" "c" "d"
{{ "a" | osPathPipeJoin "b" "c" "d" }}
#
$osBase := "/"
$myOsPsjSlice := "a,b,c" | splitList ","
osPathSliceJoin $myOsPsjSlice
{{- $osBase := "/" }}
{{- $myOsPsjSlice := "a,b,c" | splitList "," }}
{{ osPathSliceJoin $myOsPsjSlice }}
#
$osBase | osPathSlicePipeJoin $myOsPsjSlice
{{ $osBase | osPathSlicePipeJoin $myOsPsjSlice }}
#
osPathSubJoin $osBase "a" "b" "c"
{{ osPathSubJoin $osBase "a" "b" "c" }}

40
tplx/sprigx/consts.go Normal file
View File

@@ -0,0 +1,40 @@
package sprigx
import (
"path"
"path/filepath"
)
var (
// genericMap holds functions usable/intended for use in either an [html/template.FuncMap] or [text/template.FuncMap].
genericMap map[string]any = map[string]any{
// Debugging
"dump": dump,
// Strings
"extIndent": extIndent, // PR in: https://github.com/Masterminds/sprig/pull/468
// OS/System
"sysArch": sysArch,
"sysNumCpu": sysNumCpu,
"sysOsName": sysOsNm,
"sysRuntime": sysRuntime,
// Paths: Generic
"pathJoin": path.Join,
"pathPipeJoin": pathPipeJoin,
"pathSliceJoin": pathSliceJoin,
"pathSlicePipeJoin": pathSlicePipeJoin,
"pathSubJoin": pathSubJoin,
// Paths: OS/Platform
"osPathJoin": filepath.Join,
"osPathPipeJoin": osPathPipeJoin,
"osPathSep": osPathSep,
"osPathSliceJoin": osPathSliceJoin,
"osPathSlicePipeJoin": osPathSlicePipeJoin,
"osPathSubJoin": osPathSubJoin,
}
// htmlMap holds functions usable/intended for use in only an [html/template.FuncMap].
htmlMap map[string]any = map[string]any{}
// txtMap holds functions usable/intended for use in only a [text/template.FuncMap].
txtMap map[string]any = map[string]any{}
)

16
tplx/sprigx/doc.go Normal file
View File

@@ -0,0 +1,16 @@
/*
Package sprigx aims to provide additional functions that the author believes are missing from [sprig] ([Go docs]).
It's a decent enough "basics" library, but I frequently find it falls short once you start needing domain-specific data.
These may get merged into sprig, they may not. It all depends on how responsive they are to PRs.
Given that they only update it every 6 months or so, however...
See the [full documentation] on the [repo].
[sprig]: https://masterminds.github.io/sprig/
[Go docs]: https://pkg.go.dev/github.com/Masterminds/sprig/v3
[full documentation]: https://git.r00t2.io/r00t2/go_goutils/src/branch/master/tplx/sprigx/README.adoc
[repo]: https://git.r00t2.io/r00t2/go_goutils
*/
package sprigx

64
tplx/sprigx/funcs.go Normal file
View File

@@ -0,0 +1,64 @@
package sprigx
import (
htpl "html/template"
ttpl "text/template"
)
/*
Many of these functions are modeled after sprig's.
*/
/*
FuncMap returns a generic function map.
You probably want [HtmlFuncMap] or [TxtFuncMap] instead,
as they wrap this with the appropriate type.
*/
func FuncMap() (fmap map[string]any) {
var fn string
var f any
fmap = make(map[string]any, len(genericMap))
for fn, f = range genericMap {
fmap[fn] = f
}
return
}
// HtmlFuncMap returns an [html/template.FuncMap].
func HtmlFuncMap() (fmap htpl.FuncMap) {
var fn string
var f any
fmap = htpl.FuncMap(FuncMap())
if htmlMap != nil && len(htmlMap) > 0 {
for fn, f = range htmlMap {
fmap[fn] = f
}
}
return
}
// TxtFuncMap returns a [text/template.FuncMap].
func TxtFuncMap() (fmap ttpl.FuncMap) {
var fn string
var f any
fmap = ttpl.FuncMap(FuncMap())
if txtMap != nil && len(txtMap) > 0 {
for fn, f = range txtMap {
fmap[fn] = f
}
}
return
}

33
tplx/sprigx/funcs_test.go Normal file
View File

@@ -0,0 +1,33 @@
package sprigx
import (
`bytes`
_ "embed"
"testing"
`text/template`
"github.com/Masterminds/sprig/v3"
)
var (
//go:embed "_test.tpl"
testTplBytes []byte
testTpl *template.Template = template.Must(
template.
New("").
Funcs(sprig.TxtFuncMap()).
Funcs(TxtFuncMap()).
Parse(string(testTplBytes)),
)
)
func TestFuncs(t *testing.T) {
var err error
var buf *bytes.Buffer = new(bytes.Buffer)
if err = testTpl.Execute(buf, nil); err != nil {
t.Fatal(err)
}
t.Log(buf.String())
}

View File

@@ -0,0 +1,17 @@
package sprigx
import (
`github.com/davecgh/go-spew/spew`
)
/*
dump calls [spew.Sdump] on obj.
[spew.Sdump]: https://pkg.go.dev/github.com/davecgh/go-spew/spew
*/
func dump(obj any) (out string) {
out = spew.Sdump(obj)
return
}

View File

@@ -0,0 +1,13 @@
package sprigx
import (
"os"
)
// osHostname returns os.Hostname()
func osHostname() (out string, err error) {
out, err = os.Hostname()
return
}

View File

@@ -0,0 +1,155 @@
package sprigx
import (
`os`
`path`
`path/filepath`
)
/*
//
// GENERIC
//
*/
/*
pathPipeJoin wraps path.Join with the root element at the *end* instead of the beginning.
{{ pathPipeJoin "b" "c" "a" }}
is equivalent to
path.Join("a", "b", "c")
This order variation is better suited for pipelines that pass the root path.
*/
func pathPipeJoin(elems ...string) (out string) {
var rootIdx int
if elems == nil || len(elems) == 0 {
return
}
rootIdx = len(elems) - 1
out = elems[rootIdx]
if len(elems) == 1 {
return
}
out = pathSubJoin(out, elems[:rootIdx]...)
return
}
// pathSliceJoin joins a slice of path segments.
func pathSliceJoin(sl []string) (out string) {
out = path.Join(sl...)
return
}
/*
pathSlicePipeJoin behaves like a mix of pathPipeJoin (in that it accepts the root element last)
and pathSliceJoin (in that it accepts a slice of subpath segments).
It's essentially like pathSubJoin in reverse, and with an explicit slice.
*/
func pathSlicePipeJoin(sl []string, root string) (out string) {
out = pathSubJoin(root, sl...)
return
}
/*
pathSubJoin is like path.Join except it takes an explicit root
and additional slice of subpaths to sequentially join to it.
*/
func pathSubJoin(root string, elems ...string) (out string) {
if elems == nil || len(elems) == 0 {
out = root
return
}
out = path.Join(
root,
path.Join(
elems...,
),
)
return
}
/*
//
// OS/PLATFORM
//
*/
/*
osPathPipeJoin is like pathPipeJoin but uses the rendering OS' path separator (os.PathSeparator).
*/
func osPathPipeJoin(elems ...string) (out string) {
var rootIdx int
if elems == nil || len(elems) == 0 {
return
}
rootIdx = len(elems) - 1
out = elems[rootIdx]
if len(elems) == 1 {
return
}
out = osPathSubJoin(out, elems[:rootIdx]...)
return
}
// osPathSep returns os.PathSeparator.
func osPathSep() (out string) {
out = string(os.PathSeparator)
return
}
// osPathSliceJoin is the OS-specific implementation of pathSliceJoin.
func osPathSliceJoin(sl []string) (out string) {
out = filepath.Join(sl...)
return
}
// osPathSlicePipeJoin is the OS-specific implementation of pathSlicePipeJoin.
func osPathSlicePipeJoin(sl []string, root string) (out string) {
out = osPathSubJoin(root, sl...)
return
}
// osPathSubJoin is the OS-specific implementation of pathSubJoin.
func osPathSubJoin(root string, elems ...string) (out string) {
if elems == nil || len(elems) == 0 {
out = root
return
}
out = filepath.Join(
root,
filepath.Join(
elems...,
),
)
return
}

View File

@@ -0,0 +1,52 @@
package sprigx
import (
`strings`
)
/*
extIndent serves as a much more flexible alternative to the Sprig `indent`.
It has 6 arguments (the last of which may be passed in via pipeline):
* levels: The level of indentation for the text. If less than or equal to `0`, `extIndent` just returns `<input>` as-is and NO-OPs otherwise.
* skipFirst: If true, skip indenting the first line. This is particularly handy if you like to visually align your function calls in your templates.
* skipEmpty: If true, do not add an indent to *empty* lines (where an "empty line" means "only has a linebreak").
* skipWhitespace: If true, do not add an indent to lines that *only* consist of whitespace (spaces, tabs, etc.) and a linebreak.
* indentString: The string to use as the "indent character". This can be any string, such as `" "`, `"\t"`, `"."`, `"|"`, `"=="` etc.
(In fact, if indentString is set to "\n" and levels is always set to 1, this function can even be used to doubelspace text!)
* input: The text to be indented. Because it is the last argument, `extIndent` works with pipelined text as well.
*/
func extIndent(levels int, skipFirst, skipEmpty, skipWhitespace bool, indentString, input string) (out string) {
var idx int
var pad string
var line string
var lines []string
if levels <= 0 {
out = input
return
}
pad = strings.Repeat(indentString, levels)
lines = strings.Split(input, "\n")
for idx, line = range lines {
if idx == 0 && skipFirst {
continue
}
if skipWhitespace && strings.TrimSpace(line) == "" && line != "" {
continue
}
if skipEmpty && (line == "" || line == "\r") {
continue
}
lines[idx] = pad + line
}
out = strings.Join(lines, "\n")
return
}

View File

@@ -0,0 +1,47 @@
package sprigx
import (
`fmt`
`runtime`
)
// sysRuntime returns various information from [runtime].
func sysRuntime() (out map[string]string) {
out = map[string]string{
"compiler": runtime.Compiler,
"arch": runtime.GOARCH,
"os": runtime.GOOS,
"maxprocs": fmt.Sprintf("%d", runtime.GOMAXPROCS(-1)),
"cpu_cnt": fmt.Sprintf("%d", runtime.NumCPU()),
"num_cgo": fmt.Sprintf("%d", runtime.NumCgoCall()),
"num_go": fmt.Sprintf("%d", runtime.NumGoroutine()),
"go_ver": runtime.Version(),
}
return
}
// sysArch returns [runtime.GOARCH].
func sysArch() (out string) {
out = runtime.GOARCH
return
}
// sysNumCpu returns the reuslt from [runtime.NumCPU].
func sysNumCpu() (out string) {
out = fmt.Sprintf("%d", runtime.NumCPU())
return
}
// sysOsNm returns [runtime.GOOS].
func sysOsNm() (out string) {
out = runtime.GOOS
return
}

19
tplx/types.go Normal file
View File

@@ -0,0 +1,19 @@
package tplx
import (
htmlTpl `html/template`
`io`
txtTpl `text/template`
)
type (
tplType uint8
)
type (
Template interface {
*txtTpl.Template | *htmlTpl.Template
Execute(w io.Writer, obj any) (err error)
ExecuteTemplate(w io.Writer, tplNm string, obj any) (err error)
}
)