Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
927ad08057
|
||
|
|
2edbc9306d
|
||
|
|
bb71be187f
|
||
|
|
834395c050
|
||
|
|
ef56898d6b
|
||
|
|
006cf39fa1
|
||
|
|
145c32268e
|
||
|
|
6ddfcdb416
|
||
|
|
79f10b7611
|
||
|
|
01adbfc605
|
31
.githooks/pre-commit/01-docgen
Executable file
31
.githooks/pre-commit/01-docgen
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
orig="${PWD}"
|
||||||
|
|
||||||
|
if ! command -v asciidoctor &> /dev/null;
|
||||||
|
then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
for f in $(find . -type f -iname "README.adoc"); do
|
||||||
|
filename=$(basename -- "${f}")
|
||||||
|
docsdir=$(dirname -- "${f}")
|
||||||
|
nosuffix="${filename%.*}"
|
||||||
|
pfx="${docsdir}/${nosuffix}"
|
||||||
|
|
||||||
|
newf="${pfx}.html"
|
||||||
|
asciidoctor -a ROOTDIR="${orig}/" -o "${newf}" "${f}"
|
||||||
|
echo "Generated ${newf} from ${f}"
|
||||||
|
git add "${newf}"
|
||||||
|
if command -v pandoc &> /dev/null;
|
||||||
|
then
|
||||||
|
newf="${pfx}.md"
|
||||||
|
asciidoctor -a ROOTDIR="${orig}/" -b docbook -o - "${f}" | pandoc -f docbook -t markdown_strict -o "${newf}"
|
||||||
|
echo "Generated ${newf} from ${f}"
|
||||||
|
git add "${newf}"
|
||||||
|
fi
|
||||||
|
cd ${orig}
|
||||||
|
done
|
||||||
|
echo "Regenerated docs"
|
||||||
21
go.mod
21
go.mod
@@ -1,16 +1,27 @@
|
|||||||
module r00t2.io/goutils
|
module r00t2.io/goutils
|
||||||
|
|
||||||
go 1.24.5
|
go 1.25
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0
|
github.com/Masterminds/sprig/v3 v3.3.0
|
||||||
|
github.com/coreos/go-systemd/v22 v22.6.0
|
||||||
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||||
golang.org/x/sys v0.34.0
|
golang.org/x/sys v0.39.0
|
||||||
r00t2.io/sysutils v1.14.0
|
r00t2.io/sysutils v1.15.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
dario.cat/mergo v1.0.1 // indirect
|
||||||
|
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||||
|
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||||
github.com/djherbis/times v1.6.0 // indirect
|
github.com/djherbis/times v1.6.0 // indirect
|
||||||
golang.org/x/sync v0.16.0 // indirect
|
github.com/huandu/xstrings v1.5.0 // indirect
|
||||||
|
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||||
|
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||||
|
github.com/shopspring/decimal v1.4.0 // indirect
|
||||||
|
github.com/spf13/cast v1.7.0 // indirect
|
||||||
|
golang.org/x/crypto v0.26.0 // indirect
|
||||||
|
golang.org/x/sync v0.19.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
58
go.sum
58
go.sum
@@ -1,16 +1,56 @@
|
|||||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
|
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||||
|
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||||
|
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
|
||||||
|
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||||
|
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
|
||||||
|
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
|
||||||
|
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||||
|
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||||
|
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||||
|
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
|
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||||
|
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||||
|
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||||
|
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||||
|
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
||||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
||||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||||
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
r00t2.io/sysutils v1.14.0 h1:Lrio3uPi9CuUdg+sg3WkVV1CK/qcOpV9GdFCGFG1KJs=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
r00t2.io/sysutils v1.14.0/go.mod h1:ZJ7gZxFVQ7QIokQ5fPZr7wl0XO5Iu+LqtE8j3ciRINw=
|
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
r00t2.io/sysutils v1.15.1 h1:0EVZZAxTFqQN6jjfjqUKkXye0LMshUA5MO7l3Wd6wH8=
|
||||||
|
r00t2.io/sysutils v1.15.1/go.mod h1:T0iOnaZaSG5NE1hbXTqojRZc0ia/u8TB73lV7zhMz58=
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
Package iox includes extensions to the stdlib `io` module.
|
Package iox includes extensions to the stdlib `io` module.
|
||||||
|
|
||||||
|
Not everything in here is considered fully stabilized yet,
|
||||||
|
but it should be usable.
|
||||||
*/
|
*/
|
||||||
package iox
|
package iox
|
||||||
|
|||||||
@@ -6,4 +6,12 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
ErrBufTooSmall error = errors.New("buffer too small; buffer size must be > 0")
|
ErrBufTooSmall error = errors.New("buffer too small; buffer size must be > 0")
|
||||||
|
ErrChunkTooBig error = errors.New("chunk too big for method")
|
||||||
|
ErrChunkTooSmall error = errors.New("chunk too small for buffer")
|
||||||
|
ErrInvalidChunkSize error = errors.New("an invalid chunk size was passed")
|
||||||
|
ErrNilCtx error = errors.New("a nil context was passed")
|
||||||
|
ErrNilReader error = errors.New("a nil reader was passed")
|
||||||
|
ErrNilWriter error = errors.New("a nil writer was passed")
|
||||||
|
ErrShortRead error = errors.New("a read was cut short with no EOF")
|
||||||
|
ErrShortWrite error = errors.New("a write was cut short with no error")
|
||||||
)
|
)
|
||||||
|
|||||||
222
iox/funcs.go
222
iox/funcs.go
@@ -1,20 +1,21 @@
|
|||||||
package iox
|
package iox
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
`context`
|
||||||
`io`
|
`io`
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
CopyBufN is a mix between io.CopyN and io.CopyBuffer.
|
CopyBufN is a mix between [io.CopyN] and [io.CopyBuffer].
|
||||||
|
|
||||||
Despite what the docs may suggest, io.CopyN does NOT *read* n bytes from src AND write n bytes to dst.
|
Despite what the docs may suggest, [io.CopyN] does NOT *read* n bytes from src AND write n bytes to dst.
|
||||||
Instead, it always reads 32 KiB from src, and writes n bytes to dst.
|
Instead, it always reads 32 KiB from src, and writes n bytes to dst.
|
||||||
|
|
||||||
There are, of course, cases where this is deadfully undesired.
|
There are cases where this is dreadfully undesired.
|
||||||
|
|
||||||
One can, of course, use io.CopyBuffer, but this is a bit annoying since you then have to provide a buffer yourself.
|
One can, of course, use [io.CopyBuffer], but this is a bit annoying since you then have to provide a buffer yourself.
|
||||||
|
|
||||||
This convenience-wraps io.CopyBuffer to have a similar signature to io.CopyN but properly uses n for both reading and writing.
|
This convenience-wraps [io.CopyBuffer] to have a similar signature to [io.CopyN] but properly uses n for both reading and writing.
|
||||||
*/
|
*/
|
||||||
func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
||||||
|
|
||||||
@@ -32,10 +33,215 @@ func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyBufWith allows for specifying a buffer allocator function, otherwise acts as CopyBufN.
|
// CopyCtxBufN copies from `src` to `dst`, `n` bytes at a time, interruptible by `ctx`.
|
||||||
func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
|
func CopyCtxBufN(ctx context.Context, dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
||||||
|
|
||||||
written, err = io.CopyBuffer(dst, src, bufFunc())
|
var nr int
|
||||||
|
var nw int
|
||||||
|
var end bool
|
||||||
|
var buf []byte
|
||||||
|
|
||||||
|
if ctx == nil {
|
||||||
|
err = ErrNilCtx
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n <= 0 {
|
||||||
|
err = ErrBufTooSmall
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
endCopy:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err = ctx.Err()
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
buf = make([]byte, n)
|
||||||
|
nr, err = src.Read(buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
end = true
|
||||||
|
} else if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buf = buf[:nr]
|
||||||
|
|
||||||
|
if nw, err = dst.Write(buf); err != nil {
|
||||||
|
written += int64(nw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
written += int64(nw)
|
||||||
|
if len(buf) != nw {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if end {
|
||||||
|
break endCopy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
CopyBufWith allows for specifying a buffer allocator function, otherwise acts as [CopyBufN].
|
||||||
|
|
||||||
|
bufFunc *MUST NOT* return a nil or len == 0 buffer. [ErrBufTooSmall] will be returned if it does.
|
||||||
|
|
||||||
|
This uses a fixed buffer size from a single call to `bufFunc`.
|
||||||
|
If you need something with dynamic buffer sizing according to some state, use [CopyBufWithDynamic] instead.
|
||||||
|
(Note that CopyBufWithDynamic is generally a little slower, but it should only be noticeable on very large amounts of data.)
|
||||||
|
*/
|
||||||
|
func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
|
||||||
|
|
||||||
|
var buf []byte = bufFunc()
|
||||||
|
|
||||||
|
if buf == nil || len(buf) == 0 {
|
||||||
|
err = ErrBufTooSmall
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
written, err = io.CopyBuffer(dst, src, buf)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
CopyBufWithDynamic is like [CopyBufWith] except it will call bufFunc after each previous buffer is written.
|
||||||
|
|
||||||
|
That is to say (using a particularly contrived example):
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
func dynBuf() (b []byte) {
|
||||||
|
|
||||||
|
var t time.Time = time.Now()
|
||||||
|
|
||||||
|
b = make([]byte, t.Seconds())
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
Then:
|
||||||
|
|
||||||
|
CopyBufWithDynamic(w, r, dynBuf)
|
||||||
|
|
||||||
|
will use a buffer sized to the seconds of the time it reads in/writes out the next buffer, whereas with [CopyBufWith]:
|
||||||
|
|
||||||
|
CopyBufWith(w, r, dynBuf)
|
||||||
|
|
||||||
|
would use a *fixed* buffer size of whatever the seconds was equal to at the time of the *first call* to dynBuf.
|
||||||
|
|
||||||
|
`src` MUST return an [io.EOF] when its end is reached, but (as per e.g. [io.CopyBuffer]) the io.EOF error will not
|
||||||
|
be returned from CopyBufWithDynamic. (Any/all other errors encountered will be returned, however, and copying will
|
||||||
|
immediately cease.)
|
||||||
|
*/
|
||||||
|
func CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
|
||||||
|
|
||||||
|
var nr int
|
||||||
|
var nw int
|
||||||
|
var end bool
|
||||||
|
var buf []byte
|
||||||
|
|
||||||
|
for {
|
||||||
|
buf = bufFunc()
|
||||||
|
if buf == nil || len(buf) == 0 {
|
||||||
|
err = ErrBufTooSmall
|
||||||
|
return
|
||||||
|
}
|
||||||
|
nr, err = src.Read(buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
end = true
|
||||||
|
} else if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buf = buf[:nr]
|
||||||
|
|
||||||
|
if nw, err = dst.Write(buf); err != nil {
|
||||||
|
written += int64(nw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
written += int64(nw)
|
||||||
|
if len(buf) != nw {
|
||||||
|
err = ErrShortWrite
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if end {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunker returns a [ChunkLocker] ready to use.
|
||||||
|
func NewChunker(chunkSize uint) (c *ChunkLocker, err error) {
|
||||||
|
|
||||||
|
c = &ChunkLocker{}
|
||||||
|
err = c.SetChunkLen(chunkSize)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCtxIO returns a [CtxIO].
|
||||||
|
func NewCtxIO(ctx context.Context, r io.Reader, w io.Writer, chunkSize uint) (c *CtxIO, err error) {
|
||||||
|
|
||||||
|
if r == nil {
|
||||||
|
err = ErrNilReader
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if w == nil {
|
||||||
|
err = ErrNilWriter
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunkSize == 0 {
|
||||||
|
err = ErrInvalidChunkSize
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx == nil {
|
||||||
|
err = ErrNilCtx
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c = &CtxIO{
|
||||||
|
r: r,
|
||||||
|
w: w,
|
||||||
|
l: ChunkLocker{
|
||||||
|
chunkLen: chunkSize,
|
||||||
|
},
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewXIO returns a nil [XIO].
|
||||||
|
|
||||||
|
A weird "feature" of Golang is that a nil XIO is perfectly fine to use;
|
||||||
|
it's completely stateless and only has pointer receivers that only work with passed in
|
||||||
|
values so `new(XIO)` is completely unnecessary (as is NewXCopier).
|
||||||
|
In other words, this works fine:
|
||||||
|
|
||||||
|
var xc *iox.XIO
|
||||||
|
|
||||||
|
if n, err = xc.Copy(w, r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
This function is just to maintain cleaner-looking code if you should so need it,
|
||||||
|
or want an XIO without declaring one:
|
||||||
|
|
||||||
|
if n, err = iox.NewXCopier().Copy(w, r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
func NewXIO() (x *XIO) {
|
||||||
|
// No-op lel
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|||||||
28
iox/funcs_chunklocker.go
Normal file
28
iox/funcs_chunklocker.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package iox
|
||||||
|
|
||||||
|
// GetChunkLen returns the current chunk size/length in bytes.
|
||||||
|
func (c *ChunkLocker) GetChunkLen() (size uint) {
|
||||||
|
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
|
||||||
|
size = c.chunkLen
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChunkLen sets the current chunk size/length in bytes.
|
||||||
|
func (c *ChunkLocker) SetChunkLen(size uint) (err error) {
|
||||||
|
|
||||||
|
if size == 0 {
|
||||||
|
err = ErrInvalidChunkSize
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
c.chunkLen = size
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
173
iox/funcs_ctxio.go
Normal file
173
iox/funcs_ctxio.go
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
package iox
|
||||||
|
|
||||||
|
import (
|
||||||
|
`bytes`
|
||||||
|
`context`
|
||||||
|
`io`
|
||||||
|
`math`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *CtxIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||||
|
if c.l.chunkLen > math.MaxInt64 {
|
||||||
|
err = ErrChunkTooBig
|
||||||
|
}
|
||||||
|
return CopyCtxBufN(c.ctx, dst, src, int64(c.l.chunkLen))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
||||||
|
if n <= 0 {
|
||||||
|
err = ErrBufTooSmall
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return CopyCtxBufN(c.ctx, dst, src, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) GetChunkLen() (size uint) {
|
||||||
|
return c.l.GetChunkLen()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) Read(p []byte) (n int, err error) {
|
||||||
|
|
||||||
|
var nr int64
|
||||||
|
|
||||||
|
if nr, err = c.ReadWithContext(c.ctx, p); err != nil {
|
||||||
|
if nr > math.MaxInt {
|
||||||
|
n = math.MaxInt
|
||||||
|
} else {
|
||||||
|
n = int(nr)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if nr > math.MaxInt {
|
||||||
|
n = math.MaxInt
|
||||||
|
} else {
|
||||||
|
n = int(nr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) ReadWithContext(ctx context.Context, p []byte) (n int64, err error) {
|
||||||
|
|
||||||
|
var nr int
|
||||||
|
var off int
|
||||||
|
var buf []byte
|
||||||
|
|
||||||
|
if p == nil || len(p) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.buf.Len() == 0 {
|
||||||
|
err = io.EOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.l.chunkLen > uint(len(p)) {
|
||||||
|
// Would normally be a single chunk, so one-shot it.
|
||||||
|
nr, err = c.buf.Read(p)
|
||||||
|
n = int64(nr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk over it.
|
||||||
|
endRead:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err = ctx.Err()
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
/*
|
||||||
|
off(set) is the index of the *next position* to write to.
|
||||||
|
Therefore the last offset == len(p),
|
||||||
|
therefore:
|
||||||
|
|
||||||
|
* if off == len(p), "done" (return no error, do *not* read from buf)
|
||||||
|
* if off + c.l.chunkLen > len(p), buf should be len(p) - off instead
|
||||||
|
*/
|
||||||
|
if off == len(p) {
|
||||||
|
break endRead
|
||||||
|
}
|
||||||
|
if uint(off)+c.l.chunkLen > uint(len(p)) {
|
||||||
|
buf = make([]byte, len(p)-off)
|
||||||
|
} else {
|
||||||
|
buf = make([]byte, c.l.chunkLen)
|
||||||
|
}
|
||||||
|
nr, err = c.buf.Read(buf)
|
||||||
|
n += int64(nr)
|
||||||
|
if nr > 0 {
|
||||||
|
off += nr
|
||||||
|
copy(p[off:], buf[:nr])
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
break endRead
|
||||||
|
} else if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) SetChunkLen(size uint) (err error) {
|
||||||
|
return c.l.SetChunkLen(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) SetContext(ctx context.Context) (err error) {
|
||||||
|
|
||||||
|
if ctx == nil {
|
||||||
|
err = ErrNilCtx
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.ctx = ctx
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) Write(p []byte) (n int, err error) {
|
||||||
|
|
||||||
|
var nw int64
|
||||||
|
|
||||||
|
if c.l.chunkLen > math.MaxInt64 {
|
||||||
|
err = ErrChunkTooBig
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if nw, err = c.WriteNWithContext(c.ctx, p, int64(c.l.chunkLen)); err != nil {
|
||||||
|
if nw > math.MaxInt {
|
||||||
|
n = math.MaxInt
|
||||||
|
} else {
|
||||||
|
n = int(nw)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if nw > math.MaxInt {
|
||||||
|
n = math.MaxInt
|
||||||
|
} else {
|
||||||
|
n = int(nw)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error) {
|
||||||
|
return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) WriteRune(r rune) (n int, err error) {
|
||||||
|
|
||||||
|
// We don't even bother listening for the ctx.Done because it's a single rune.
|
||||||
|
n, err = c.buf.WriteRune(r)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CtxIO) WriteWithContext(ctx context.Context, p []byte) (n int64, err error) {
|
||||||
|
if c.l.chunkLen > math.MaxInt64 {
|
||||||
|
err = ErrChunkTooBig
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), int64(c.l.chunkLen))
|
||||||
|
}
|
||||||
40
iox/funcs_xio.go
Normal file
40
iox/funcs_xio.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package iox
|
||||||
|
|
||||||
|
import (
|
||||||
|
`io`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copy copies [io.Reader] `src` to [io.Writer] `dst`. It implements [Copier].
|
||||||
|
func (x *XIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||||
|
return io.Copy(dst, src)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyBuffer copies [io.Reader] `src` to [io.Writer] `dst` using buffer `buf`. It implements [CopyBufferer].
|
||||||
|
func (x *XIO) CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
|
||||||
|
return io.CopyBuffer(dst, src, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyBufWith copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc`. It implements [SizedCopyBufferInvoker].
|
||||||
|
func (x *XIO) CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
|
||||||
|
return CopyBufWith(dst, src, bufFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyBufWithDynamic copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc` for each chunk. It implements [DynamicSizedCopyBufferInvoker].
|
||||||
|
func (x *XIO) CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
|
||||||
|
return CopyBufWithDynamic(dst, src, bufFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
CopyBufN reads buffered bytes from [io.Reader] `src` and copies to [io.Writer] `dst`
|
||||||
|
using the synchronous buffer size `n`.
|
||||||
|
|
||||||
|
It implements [SizedCopyBufferer].
|
||||||
|
*/
|
||||||
|
func (x *XIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
||||||
|
return CopyBufN(dst, src, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyN copies from [io.Reader] `src` to [io.Writer] `w`, `n` bytes at a time. It implements [SizedCopier].
|
||||||
|
func (x *XIO) CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
||||||
|
return io.CopyN(dst, src, n)
|
||||||
|
}
|
||||||
203
iox/types.go
203
iox/types.go
@@ -1,8 +1,209 @@
|
|||||||
package iox
|
package iox
|
||||||
|
|
||||||
|
import (
|
||||||
|
`bytes`
|
||||||
|
`context`
|
||||||
|
`io`
|
||||||
|
`sync`
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// RuneWriter matches the behavior of *(bytes.Buffer).WriteRune and *(bufio.Writer).WriteRune
|
/*
|
||||||
|
RuneWriter matches the behavior of [bytes.Buffer.WriteRune] and [bufio.Writer.WriteRune].
|
||||||
|
|
||||||
|
(Note that this package does not have a "RuneReader"; see [io.RuneReader] instead.)
|
||||||
|
*/
|
||||||
RuneWriter interface {
|
RuneWriter interface {
|
||||||
WriteRune(r rune) (n int, err error)
|
WriteRune(r rune) (n int, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Copier matches the signature/behavior of [io.Copy]. Implemented by [XIO].
|
||||||
|
Copier interface {
|
||||||
|
Copy(dst io.Writer, src io.Reader) (written int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyBufferer matches the signature/behavior of [io.CopyBuffer]. Implemented by [XIO].
|
||||||
|
CopyBufferer interface {
|
||||||
|
CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizedCopier matches the signature/behavior of [io.CopyN]. Implemented by [XIO].
|
||||||
|
SizedCopier interface {
|
||||||
|
CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizedCopyBufferer matches the signature/behavior of [CopyBufN]. Implemented by [XIO].
|
||||||
|
SizedCopyBufferer interface {
|
||||||
|
CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizedCopyBufferInvoker matches the signature/behavior of [CopyBufWith]. Implemented by [XIO].
|
||||||
|
SizedCopyBufferInvoker interface {
|
||||||
|
CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DynamicSizedCopyBufferInvoker matches the signature/behavior of [CopyBufWithDynamic]. Implemented by [XIO].
|
||||||
|
DynamicSizedCopyBufferInvoker interface {
|
||||||
|
CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Chunker is used by both [ContextReader] and [ContextWriter] to set/get the current chunk size.
|
||||||
|
Chunking is inherently required to be specified in order to interrupt reads/writes/copies with a [context.Context].
|
||||||
|
|
||||||
|
Implementations *must* use a [sync.RWMutex] to get (RLock) and set (Lock) the chunk size.
|
||||||
|
The chunk size *must not* be directly accessible to maintain concurrency safety assumptions.
|
||||||
|
*/
|
||||||
|
Chunker interface {
|
||||||
|
// GetChunkLen returns the current chunk size/length in bytes.
|
||||||
|
GetChunkLen() (size uint)
|
||||||
|
// SetChunkLen sets the current chunk size/length in bytes.
|
||||||
|
SetChunkLen(size uint) (err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ChunkReader implements a chunking reader.
|
||||||
|
Third-party implementations *must* respect the chunk size locking (see [Chunker]).
|
||||||
|
|
||||||
|
The Read method should read in chunks of the internal chunk size.
|
||||||
|
*/
|
||||||
|
ChunkReader interface {
|
||||||
|
io.Reader
|
||||||
|
Chunker
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ChunkWriter implements a chunking writer.
|
||||||
|
Third-party implementations *must* respect the chunk size locking (see [Chunker]).
|
||||||
|
|
||||||
|
The Write method should write out in chunks of the internal chunk size.
|
||||||
|
*/
|
||||||
|
ChunkWriter interface {
|
||||||
|
io.Writer
|
||||||
|
Chunker
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkReadWriter implements a chunking reader/writer.
|
||||||
|
ChunkReadWriter interface {
|
||||||
|
ChunkReader
|
||||||
|
ChunkWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ContextSetter allows one to set an internal context.
|
||||||
|
|
||||||
|
A nil context should return an error.
|
||||||
|
*/
|
||||||
|
ContextSetter interface {
|
||||||
|
SetContext(context context.Context) (err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ContextCopier is defined to allow for consumer-provided types. See [CtxIO] for a package-provided type.
|
||||||
|
|
||||||
|
The Copy method should use an internal context and chunk size
|
||||||
|
(and thus wrap [CopyCtxBufN] internally on an external call to Copy, etc.).
|
||||||
|
*/
|
||||||
|
ContextCopier interface {
|
||||||
|
Copier
|
||||||
|
Chunker
|
||||||
|
ContextSetter
|
||||||
|
SizedCopyBufferer
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ContextReader is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type.
|
||||||
|
|
||||||
|
The Read method should use an internal context and chunk size.
|
||||||
|
|
||||||
|
The ReadWithContext method should use an internal chunk size.
|
||||||
|
*/
|
||||||
|
ContextReader interface {
|
||||||
|
ChunkReader
|
||||||
|
ContextSetter
|
||||||
|
ReadWithContext(ctx context.Context, p []byte) (n int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ContextWriter is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type.
|
||||||
|
|
||||||
|
The Write method should use an internal context.
|
||||||
|
|
||||||
|
The WriteWithContext should use an internal chunk size.
|
||||||
|
*/
|
||||||
|
ContextWriter interface {
|
||||||
|
ChunkWriter
|
||||||
|
ContextSetter
|
||||||
|
WriteWithContext(ctx context.Context, p []byte) (n int64, err error)
|
||||||
|
WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ContextReadWriter is primarily here to allow for consumer-provided types.
|
||||||
|
|
||||||
|
See [CtxIO] for a package-provided type.
|
||||||
|
*/
|
||||||
|
ContextReadWriter interface {
|
||||||
|
ContextReader
|
||||||
|
ContextWriter
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// ChunkLocker implements [Chunker].
|
||||||
|
ChunkLocker struct {
|
||||||
|
lock sync.RWMutex
|
||||||
|
chunkLen uint
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
CtxIO is a type used to demonstrate "stateful" I/O introduced by this package.
|
||||||
|
It implements:
|
||||||
|
|
||||||
|
* [Copier]
|
||||||
|
* [Chunker]
|
||||||
|
* [RuneWriter]
|
||||||
|
* [ChunkReader]
|
||||||
|
* [ChunkWriter]
|
||||||
|
* [ContextCopier]
|
||||||
|
* [ContextSetter]
|
||||||
|
* [ContextReader]
|
||||||
|
* [ContextWriter]
|
||||||
|
* [ChunkReadWriter]
|
||||||
|
* [ContextReadWriter]
|
||||||
|
* [SizedCopyBufferer]
|
||||||
|
|
||||||
|
Unlike [XIO], it must be non-nil (see [NewCtxIO]) since it maintains state
|
||||||
|
(though technically, one does not need to call [NewCtxIO] if they call
|
||||||
|
[CtxIO.SetChunkLen] and [CtxIO.SetContext] before any other methods).
|
||||||
|
|
||||||
|
[CtxIO.Read] and other Read methods writes to an internal buffer,
|
||||||
|
and [CtxIO.Write] and other Write methods writes out from it.
|
||||||
|
*/
|
||||||
|
CtxIO struct {
|
||||||
|
r io.Reader
|
||||||
|
w io.Writer
|
||||||
|
l ChunkLocker
|
||||||
|
buf bytes.Buffer
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
XIO is a type used to demonstrate "stateless" I/O introduced by this package.
|
||||||
|
It implements:
|
||||||
|
|
||||||
|
* [Copier]
|
||||||
|
* [CopyBufferer]
|
||||||
|
* [SizedCopier]
|
||||||
|
* [SizedCopyBufferer]
|
||||||
|
* [SizedCopyBufferInvoker]
|
||||||
|
* [DynamicSizedCopyBufferInvoker]
|
||||||
|
|
||||||
|
Unlike [CtxIO], the zero-value is ready to use since it holds no state
|
||||||
|
or configuration whatsoever.
|
||||||
|
|
||||||
|
A nil XIO is perfectly usable but if you want something more idiomatic,
|
||||||
|
see [NewXIO].
|
||||||
|
*/
|
||||||
|
XIO struct{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
- logging probably needs mutexes
|
||||||
|
|
||||||
- macOS support beyond the legacy NIX stuff. it apparently uses something called "ULS", "Unified Logging System".
|
- macOS support beyond the legacy NIX stuff. it apparently uses something called "ULS", "Unified Logging System".
|
||||||
-- https://developer.apple.com/documentation/os/logging
|
-- https://developer.apple.com/documentation/os/logging
|
||||||
-- https://developer.apple.com/documentation/os/generating-log-messages-from-your-code
|
-- https://developer.apple.com/documentation/os/generating-log-messages-from-your-code
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
Only the first logPaths entry that "works" will be used, later entries will be ignored.
|
Only the first logPaths entry that "works" will be used, later entries will be ignored.
|
||||||
Currently this will almost always return a WinLogger.
|
Currently this will almost always return a WinLogger.
|
||||||
*/
|
*/
|
||||||
func (m *MultiLogger) AddDefaultLogger(identifier string, eventIDs *WinEventID, logFlags int, logPaths ...string) (err error) {
|
func (m *MultiLogger) AddDefaultLogger(identifier string, logFlags int, logPaths ...string) (err error) {
|
||||||
|
|
||||||
var l Logger
|
var l Logger
|
||||||
var exists bool
|
var exists bool
|
||||||
@@ -36,9 +36,9 @@ func (m *MultiLogger) AddDefaultLogger(identifier string, eventIDs *WinEventID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if logPaths != nil {
|
if logPaths != nil {
|
||||||
l, err = GetLogger(m.EnableDebug, m.Prefix, eventIDs, logFlags, logPaths...)
|
l, err = GetLogger(m.EnableDebug, m.Prefix, logFlags, logPaths...)
|
||||||
} else {
|
} else {
|
||||||
l, err = GetLogger(m.EnableDebug, m.Prefix, eventIDs, logFlags)
|
l, err = GetLogger(m.EnableDebug, m.Prefix, logFlags)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -10,32 +10,63 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetLogger returns an instance of Logger that best suits your system's capabilities. Note that this is a VERY generalized interface to the Windows Event Log.
|
GetLogger returns an instance of Logger that best suits your system's capabilities.
|
||||||
|
Note that this is a VERY generalized interface to the Windows Event Log to conform with multiplatform compat.
|
||||||
|
You'd have a little more flexibility with [GetLoggerWindows] (this function wraps that one).
|
||||||
|
If you need more custom behavior than that, I recommend using [golang.org/x/sys/windows/svc/eventlog] directly
|
||||||
|
(or using another logging module).
|
||||||
|
|
||||||
|
If `enableDebug` is true, debug messages (which according to your program may or may not contain sensitive data) are rendered and written (otherwise they are ignored).
|
||||||
|
|
||||||
|
The `prefix` correlates to the `source` parameter in [GetLoggerWindows], and this function inherently uses [DefaultEventID],
|
||||||
|
but otherwise it remains the same as [GetLoggerWindows] - refer to it for documentation on the other parameters.
|
||||||
|
|
||||||
|
If you call [GetLogger], you will only get a single ("best") logger your system supports.
|
||||||
|
If you want to log to multiple [Logger] destinations at once (or want to log to an explicit [Logger] type),
|
||||||
|
use [GetMultiLogger].
|
||||||
|
*/
|
||||||
|
func GetLogger(enableDebug bool, prefix string, logConfigFlags int, logPaths ...string) (logger Logger, err error) {
|
||||||
|
|
||||||
|
if logger, err = GetLoggerWindows(enableDebug, prefix, DefaultEventID, logConfigFlags, logPaths...); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GetLoggerWindows returns an instance of Logger that best suits your system's capabilities.
|
||||||
|
This is a slightly less (but still quite) generalized interface to the Windows Event Log than [GetLogger].
|
||||||
|
|
||||||
If you require more robust logging capabilities (e.g. custom event IDs per uniquely identifiable event),
|
If you require more robust logging capabilities (e.g. custom event IDs per uniquely identifiable event),
|
||||||
you will want to set up your own logger (golang.org/x/sys/windows/svc/eventlog).
|
you will want to set up your own logger via [golang.org/x/sys/windows/svc/eventlog].
|
||||||
|
|
||||||
If enableDebug is true, debug messages (which according to your program may or may not contain sensitive data) are rendered and written (otherwise they are ignored).
|
If `enableDebug` is true, debug messages (which according to your program may or may not contain sensitive data)
|
||||||
|
are rendered and written (otherwise they are ignored).
|
||||||
|
|
||||||
A blank source will return an error as it's used as the source name. Other functions, struct fields, etc. will refer to this as the "prefix".
|
A blank `source` will return an error as it's used as the source name.
|
||||||
|
Throughout the rest of this documentation you will see this referred to as the `prefix` to remain platform-agnostic.
|
||||||
|
|
||||||
A pointer to a WinEventID struct may be specified for eventIDs to map extended logging levels (as Windows only supports three levels natively).
|
A pointer to a [WinEventID] struct may be specified for `eventIDs` to map extended logging levels
|
||||||
|
(as Windows only supports three levels natively).
|
||||||
If it is nil, a default one (DefaultEventID) will be used.
|
If it is nil, a default one (DefaultEventID) will be used.
|
||||||
|
|
||||||
logConfigFlags is the corresponding flag(s) OR'd for StdLogger.LogFlags / FileLogger.StdLogger.LogFlags if either is selected. See StdLogger.LogFlags and
|
`logConfigFlags` is the corresponding flag(s) OR'd for [StdLogger.LogFlags] (and/or the [StdLogger.LogFlags] for [FileLogger])
|
||||||
https://pkg.go.dev/log#pkg-constants for details.
|
if either is selected. See [StdLogger.LogFlags] and [stdlib log's constants] for details.
|
||||||
|
|
||||||
logPaths is an (optional) list of strings to use as paths to test for writing. If the file can be created/written to,
|
`logPaths` is an (optional) list of strings to use as paths to test for writing.
|
||||||
it will be used (assuming you have no higher-level loggers available).
|
If the file can be created/written to, it will be used (assuming you have no higher-level loggers available).
|
||||||
|
|
||||||
Only the first logPaths entry that "works" will be used, later entries will be ignored.
|
Only the first `logPaths` entry that "works" will be used, later entries will be ignored.
|
||||||
Currently this will almost always return a WinLogger.
|
Currently this will almost always return a [WinLogger].
|
||||||
|
|
||||||
If you call GetLogger, you will only get a single ("best") logger your system supports.
|
If you call [GetLoggerWindows], you will only get a single ("best") logger your system supports.
|
||||||
If you want to log to multiple Logger destinations at once (or want to log to an explicit Logger type),
|
If you want to log to multiple [Logger] destinations at once (or want to log to an explicit [Logger] type),
|
||||||
use GetMultiLogger.
|
use [GetMultiLogger].
|
||||||
|
|
||||||
|
[stdlib log's constants]: https://pkg.go.dev/log#pkg-constants
|
||||||
*/
|
*/
|
||||||
func GetLogger(enableDebug bool, source string, eventIDs *WinEventID, logConfigFlags int, logPaths ...string) (logger Logger, err error) {
|
func GetLoggerWindows(enableDebug bool, source string, eventIDs *WinEventID, logConfigFlags int, logPaths ...string) (logger Logger, err error) {
|
||||||
|
|
||||||
var logPath string
|
var logPath string
|
||||||
var logFlags bitmask.MaskBit
|
var logFlags bitmask.MaskBit
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ func TestDefaultLogger(t *testing.T) {
|
|||||||
t.Fatalf("error when closing handler for temporary log file '%v': %v", tempfile.Name(), err.Error())
|
t.Fatalf("error when closing handler for temporary log file '%v': %v", tempfile.Name(), err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, err = GetLogger(true, TestLogPrefix, DefaultEventID, logFlags, tempfilePath); err != nil {
|
if l, err = GetLoggerWindows(true, TestLogPrefix, DefaultEventID, logFlags, tempfilePath); err != nil {
|
||||||
t.Fatalf("error when spawning default Windows logger via GetLogger: %v", err.Error())
|
t.Fatalf("error when spawning default Windows logger via GetLogger: %v", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func TestMultiLogger(t *testing.T) {
|
|||||||
t.Fatalf("error when adding FileLogger to MultiLogger: %v", err.Error())
|
t.Fatalf("error when adding FileLogger to MultiLogger: %v", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = l.AddDefaultLogger("DefaultLogger", DefaultEventID, logFlags, tempfilePath); err != nil {
|
if err = l.AddDefaultLogger("DefaultLogger", logFlags, tempfilePath); err != nil {
|
||||||
t.Fatalf("error when adding default logger to MultiLogger: %v", err.Error())
|
t.Fatalf("error when adding default logger to MultiLogger: %v", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
4
mapsx/doc.go
Normal file
4
mapsx/doc.go
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
/*
|
||||||
|
Package mapsx includes functions that probably should have been in [maps] but aren't.
|
||||||
|
*/
|
||||||
|
package mapsx
|
||||||
9
mapsx/errs.go
Normal file
9
mapsx/errs.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package mapsx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`errors`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNotFound = errors.New("key not found")
|
||||||
|
)
|
||||||
43
mapsx/funcs.go
Normal file
43
mapsx/funcs.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package mapsx
|
||||||
|
|
||||||
|
/*
|
||||||
|
Get mimics Python's [dict.get()] behavior, returning value `v` if key `k`
|
||||||
|
is not found in map `m`.
|
||||||
|
|
||||||
|
See also [GetOk], [Must].
|
||||||
|
|
||||||
|
[dict.get()]: https://docs.python.org/3/library/stdtypes.html#dict.get
|
||||||
|
*/
|
||||||
|
func Get[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V) {
|
||||||
|
|
||||||
|
val, _ = GetOk(m, k, v)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOk is like [Get] but also explicitly indicates whether `k` was found or not. See also [Must].
|
||||||
|
func GetOk[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V, found bool) {
|
||||||
|
|
||||||
|
if val, found = m[k]; !found {
|
||||||
|
val = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Must, unlike [Get] or [GetOk], requires that `k` be in map `m`.
|
||||||
|
|
||||||
|
A panic with error [ErrNotFound] will be raised if `k` is not present.
|
||||||
|
Otherwise the found value will be returned.
|
||||||
|
*/
|
||||||
|
func Must[Map ~map[K]V, K comparable, V any](m Map, k K) (val V) {
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
if val, ok = m[k]; !ok {
|
||||||
|
panic(ErrNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
21
multierr/TODO
Normal file
21
multierr/TODO
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
- add unwrapping
|
||||||
|
https://go.dev/blog/go1.13-errors#the-unwrap-method
|
||||||
|
- add As method, takes a ptr to a slice of []error to return the first matching error type (errors.As) for each?
|
||||||
|
- add AsAll [][]error ptr param for multiple errors per type?
|
||||||
|
- add Map, returns map[string][]error, where key is k:
|
||||||
|
var sb strings.Builder
|
||||||
|
t = reflect.TypeOf(err)
|
||||||
|
if t.PkgPath() != "" {
|
||||||
|
sb.WriteString(t.PkgPath())
|
||||||
|
} else {
|
||||||
|
sb.WriteString("<UNKNOWN>")
|
||||||
|
}
|
||||||
|
sb.WriteString(".")
|
||||||
|
if t.Name() != "" {
|
||||||
|
sb.WriteString(t.Name())
|
||||||
|
} else {
|
||||||
|
sb.WriteString("<UNKNOWN>")
|
||||||
|
}
|
||||||
|
k = sb.String()
|
||||||
|
- support generics for similar to above?
|
||||||
|
- this might allow for "error filtering"
|
||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
/*
|
/*
|
||||||
AddrRfc returns an RFC-friendly string from an IP address ([net/netip.Addr]).
|
AddrRfc returns an RFC-friendly string from an IP address ([net/netip.Addr]).
|
||||||
|
|
||||||
If addr is an IPv4 address, it will simmply be the string representation (e.g. "203.0.113.1").
|
If addr is an IPv4 address, it will simply be the string representation (e.g. "203.0.113.1").
|
||||||
|
|
||||||
If addr is an IPv6 address, it will be enclosed in brackets (e.g. "[2001:db8::1]").
|
If addr is an IPv6 address, it will be enclosed in brackets (e.g. "[2001:db8::1]").
|
||||||
|
|
||||||
|
|||||||
@@ -10,11 +10,20 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// cksumMask is AND'd with a checksum to get the "carried ones".
|
/*
|
||||||
|
cksumMask is AND'd with a checksum to get the "carried ones"
|
||||||
|
(the lower 16 bits before folding carries).
|
||||||
|
*/
|
||||||
cksumMask uint32 = 0x0000ffff
|
cksumMask uint32 = 0x0000ffff
|
||||||
// cksumShift is used in the "carried-ones folding".
|
/*
|
||||||
|
cksumShift is used in the "carried-ones folding";
|
||||||
|
it's the number of bits to right-shift the carry-over.
|
||||||
|
*/
|
||||||
cksumShift uint32 = 0x00000010
|
cksumShift uint32 = 0x00000010
|
||||||
// padShift is used to "pad out" a checksum for odd-length buffers by left-shifting.
|
/*
|
||||||
|
padShift is used to "pad out" a checksum for odd-length buffers by left-shifting.
|
||||||
|
It positions the high-byte of a 16-byte "word" (big-endian, as per ord below).
|
||||||
|
*/
|
||||||
padShift uint32 = 0x00000008
|
padShift uint32 = 0x00000008
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -25,6 +25,9 @@ safety and no data retention, which can be used as a:
|
|||||||
* [io.StringWriter]
|
* [io.StringWriter]
|
||||||
* [io.Writer]
|
* [io.Writer]
|
||||||
|
|
||||||
|
If you don't need all these interfaces, a reasonable alternative may be
|
||||||
|
to use gVisor's [gvisor.dev/gvisor/pkg/tcpip/checksum] instead.
|
||||||
|
|
||||||
[RFC 1071]: https://datatracker.ietf.org/doc/html/rfc1071
|
[RFC 1071]: https://datatracker.ietf.org/doc/html/rfc1071
|
||||||
[RFC 1141]: https://datatracker.ietf.org/doc/html/rfc1141
|
[RFC 1141]: https://datatracker.ietf.org/doc/html/rfc1141
|
||||||
[RFC 1624]: https://datatracker.ietf.org/doc/html/rfc1624
|
[RFC 1624]: https://datatracker.ietf.org/doc/html/rfc1624
|
||||||
|
|||||||
@@ -7,8 +7,9 @@ import (
|
|||||||
// New returns a new initialized [InetChecksum]. It will never panic.
|
// New returns a new initialized [InetChecksum]. It will never panic.
|
||||||
func New() (i *InetChecksum) {
|
func New() (i *InetChecksum) {
|
||||||
|
|
||||||
i = &InetChecksum{}
|
i = &InetChecksum{
|
||||||
_ = i.Aligned()
|
aligned: true,
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -21,15 +22,14 @@ b may be nil or 0-length; this will not cause an error.
|
|||||||
func NewFromBytes(b []byte) (i *InetChecksum, copied int, err error) {
|
func NewFromBytes(b []byte) (i *InetChecksum, copied int, err error) {
|
||||||
|
|
||||||
var cksum InetChecksum
|
var cksum InetChecksum
|
||||||
|
var cptr *InetChecksum = &cksum
|
||||||
|
|
||||||
|
cksum.aligned = true
|
||||||
|
|
||||||
if b != nil && len(b) > 0 {
|
if b != nil && len(b) > 0 {
|
||||||
if copied, err = cksum.Write(b); err != nil {
|
if copied, err = cptr.Write(b); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_ = i.Aligned()
|
|
||||||
} else {
|
|
||||||
i = New()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
i = &cksum
|
i = &cksum
|
||||||
@@ -48,7 +48,64 @@ func NewFromBuf(buf io.Reader) (i *InetChecksum, copied int64, err error) {
|
|||||||
|
|
||||||
var cksum InetChecksum
|
var cksum InetChecksum
|
||||||
|
|
||||||
_ = i.Aligned()
|
cksum.aligned = true
|
||||||
|
|
||||||
|
if buf != nil {
|
||||||
|
if copied, err = io.Copy(&cksum, buf); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i = &cksum
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSimple returns a new initialized [InetChecksumSimple]. It will never panic.
|
||||||
|
func NewSimple() (i *InetChecksumSimple) {
|
||||||
|
|
||||||
|
i = &InetChecksumSimple{
|
||||||
|
aligned: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewSimpleFromBytes returns a new [InetChecksumSimple] initialized with explicit bytes.
|
||||||
|
|
||||||
|
b may be nil or 0-length; this will not cause an error.
|
||||||
|
*/
|
||||||
|
func NewSimpleFromBytes(b []byte) (i *InetChecksumSimple, copied int, err error) {
|
||||||
|
|
||||||
|
var cksum InetChecksumSimple
|
||||||
|
var cptr *InetChecksumSimple = &cksum
|
||||||
|
|
||||||
|
cksum.aligned = true
|
||||||
|
|
||||||
|
if b != nil && len(b) > 0 {
|
||||||
|
if copied, err = cptr.Write(b); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i = &cksum
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewSimpleFromBuf returns an [InetChecksumSimple] from a specified [io.Reader].
|
||||||
|
|
||||||
|
buf may be nil. If it isn't, NewSimpleFromBuf will call [io.Copy] on buf.
|
||||||
|
Note that this may exhaust your passed buf or advance its current seek position/offset,
|
||||||
|
depending on its type.
|
||||||
|
*/
|
||||||
|
func NewSimpleFromBuf(buf io.Reader) (i *InetChecksumSimple, copied int64, err error) {
|
||||||
|
|
||||||
|
var cksum InetChecksumSimple
|
||||||
|
|
||||||
|
cksum.aligned = true
|
||||||
|
|
||||||
if buf != nil {
|
if buf != nil {
|
||||||
if copied, err = io.Copy(&cksum, buf); err != nil {
|
if copied, err = io.Copy(&cksum, buf); err != nil {
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ func (i *InetChecksum) Aligned() (aligned bool) {
|
|||||||
defer i.alignLock.Unlock()
|
defer i.alignLock.Unlock()
|
||||||
|
|
||||||
i.bufLock.RLock()
|
i.bufLock.RLock()
|
||||||
aligned = i.buf.Len()&2 == 0
|
aligned = i.buf.Len()%2 == 0
|
||||||
i.bufLock.RUnlock()
|
i.bufLock.RUnlock()
|
||||||
|
|
||||||
i.aligned = aligned
|
i.aligned = aligned
|
||||||
@@ -113,7 +113,7 @@ func (i *InetChecksum) Reset() {
|
|||||||
i.sumLock.Lock()
|
i.sumLock.Lock()
|
||||||
i.lastLock.Lock()
|
i.lastLock.Lock()
|
||||||
|
|
||||||
i.aligned = false
|
i.aligned = true
|
||||||
i.alignLock.Unlock()
|
i.alignLock.Unlock()
|
||||||
|
|
||||||
i.buf.Reset()
|
i.buf.Reset()
|
||||||
@@ -308,7 +308,7 @@ func (i *InetChecksum) WriteByte(c byte) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !i.disabledBuf {
|
if !i.disabledBuf {
|
||||||
if err = i.WriteByte(c); err != nil {
|
if err = i.buf.WriteByte(c); err != nil {
|
||||||
i.sum = origSum
|
i.sum = origSum
|
||||||
i.aligned = origAligned
|
i.aligned = origAligned
|
||||||
i.last = origLast
|
i.last = origLast
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ func (i *InetChecksumSimple) Reset() {
|
|||||||
|
|
||||||
i.last = 0x00
|
i.last = 0x00
|
||||||
i.sum = 0
|
i.sum = 0
|
||||||
i.last = 0x00
|
i.aligned = true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ type (
|
|||||||
If [InetChecksum.Aligned] returns false, the checksum result of an
|
If [InetChecksum.Aligned] returns false, the checksum result of an
|
||||||
[InetChecksum.Sum] or [InetChecksum.Sum16] (or any other operation
|
[InetChecksum.Sum] or [InetChecksum.Sum16] (or any other operation
|
||||||
returning a sum) will INCLUDE THE PAD NULL BYTE (which is only
|
returning a sum) will INCLUDE THE PAD NULL BYTE (which is only
|
||||||
applied *at the time of the Sum/Sum32 call) and is NOT applied to
|
applied *at the time of the Sum/Sum32 call* and is NOT applied to
|
||||||
the persistent underlying storage.
|
the persistent underlying storage).
|
||||||
|
|
||||||
InetChecksum differs from [InetChecksumSimple] in that it:
|
InetChecksum differs from [InetChecksumSimple] in that it:
|
||||||
|
|
||||||
|
|||||||
10
remap/doc.go
10
remap/doc.go
@@ -1,4 +1,12 @@
|
|||||||
/*
|
/*
|
||||||
Package remap provides convenience functions around regular expressions, primarily offering maps for named capture groups.
|
Package remap provides convenience functions around regular expressions,
|
||||||
|
primarily offering maps for named capture groups.
|
||||||
|
|
||||||
|
It offers convenience equivalents of the following:
|
||||||
|
|
||||||
|
* [regexp.Compile] ([Compile])
|
||||||
|
* [regexp.CompilePOSIX] ([CompilePOSIX])
|
||||||
|
* [regexp.MustCompile] ([MustCompile])
|
||||||
|
* [regexp.MustCompilePOSIX] ([MustCompilePOSIX])
|
||||||
*/
|
*/
|
||||||
package remap
|
package remap
|
||||||
|
|||||||
11
remap/errs.go
Normal file
11
remap/errs.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package remap
|
||||||
|
|
||||||
|
import (
|
||||||
|
`errors`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidIdxPair error = errors.New("invalid index pair; [1] must be >= [0]")
|
||||||
|
ErrNoStr error = errors.New("no string to slice/reslice/subslice")
|
||||||
|
ErrShortStr error = errors.New("string too short to slice/reslice/subslice")
|
||||||
|
)
|
||||||
170
remap/funcs.go
Normal file
170
remap/funcs.go
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
package remap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Compile is a convenience shorthand for:
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var r *remap.ReMap = new(remap.ReMap)
|
||||||
|
|
||||||
|
if r.Regexp, err = regexp.Compile(expr); err != nil {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
It corresponds to [regexp.Compile].
|
||||||
|
*/
|
||||||
|
func Compile(expr string) (r *ReMap, err error) {
|
||||||
|
|
||||||
|
var p *regexp.Regexp
|
||||||
|
|
||||||
|
if p, err = regexp.Compile(expr); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
r = &ReMap{
|
||||||
|
Regexp: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
CompilePOSIX is a convenience shorthand for:
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var r *remap.ReMap = new(remap.ReMap)
|
||||||
|
|
||||||
|
if r.Regexp, err = regexp.CompilePOSIX(expr); err != nil {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
It corresponds to [regexp.CompilePOSIX].
|
||||||
|
*/
|
||||||
|
func CompilePOSIX(expr string) (r *ReMap, err error) {
|
||||||
|
|
||||||
|
var p *regexp.Regexp
|
||||||
|
|
||||||
|
if p, err = regexp.CompilePOSIX(expr); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
r = &ReMap{
|
||||||
|
Regexp: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
MustCompile is a convenience shorthand for:
|
||||||
|
|
||||||
|
var r *remap.ReMap = &remap.ReMap{
|
||||||
|
Regexp: regexp.MustCompile(expr),
|
||||||
|
}
|
||||||
|
|
||||||
|
It corresponds to [regexp.MustCompile].
|
||||||
|
*/
|
||||||
|
func MustCompile(expr string) (r *ReMap) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var p *regexp.Regexp
|
||||||
|
|
||||||
|
// We panic ourselves instead of wrapping regexp.MustCompile.
|
||||||
|
// Makes debuggers a little more explicit.
|
||||||
|
if p, err = regexp.Compile(expr); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r = &ReMap{
|
||||||
|
Regexp: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
MustCompilePOSIX is a convenience shorthand for:
|
||||||
|
|
||||||
|
var r *remap.ReMap = &remap.ReMap{
|
||||||
|
Regexp: regexp.MustCompilePOSIX(expr),
|
||||||
|
}
|
||||||
|
|
||||||
|
It corresponds to [regexp.MustCompilePOSIX].
|
||||||
|
*/
|
||||||
|
func MustCompilePOSIX(expr string) (r *ReMap) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var p *regexp.Regexp
|
||||||
|
|
||||||
|
// We panic ourselves instead of wrapping regexp.MustCompilePOSIX.
|
||||||
|
// Makes debuggers a little more explicit.
|
||||||
|
if p, err = regexp.CompilePOSIX(expr); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r = &ReMap{
|
||||||
|
Regexp: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
strIdxSlicer takes string s, and returns the substring marked by idxPair,
|
||||||
|
where:
|
||||||
|
|
||||||
|
idxPair = [2]int{
|
||||||
|
<substring START POSITION>,
|
||||||
|
<substring END BOUNDARY>,
|
||||||
|
}
|
||||||
|
|
||||||
|
That is, to get `oo` from `foobar`,
|
||||||
|
|
||||||
|
idxPair = [2]int{1, 3}
|
||||||
|
# NOT:
|
||||||
|
#idxPair = [2]int{1, 2}
|
||||||
|
|
||||||
|
subStr will be empty and matched will be false if:
|
||||||
|
|
||||||
|
* idxPair[0] < 0
|
||||||
|
* idxPair[1] < 0
|
||||||
|
|
||||||
|
It will panic with [ErrShortStr] if:
|
||||||
|
|
||||||
|
* idxPair[0] > len(s)-1
|
||||||
|
* idxPair[1] > len(s)
|
||||||
|
|
||||||
|
It will panic with [ErrInvalidIdxPair] if:
|
||||||
|
|
||||||
|
* idxPair[0] > idxPair[1]
|
||||||
|
|
||||||
|
It will properly handle single-character addresses (i.e. idxPair[0] == idxPair[1]).
|
||||||
|
*/
|
||||||
|
func strIdxSlicer(s string, idxPair [2]int) (subStr string, matched bool) {
|
||||||
|
|
||||||
|
if idxPair[0] < 0 || idxPair[1] < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
matched = true
|
||||||
|
|
||||||
|
if (idxPair[0] > (len(s) - 1)) ||
|
||||||
|
(idxPair[1] > len(s)) {
|
||||||
|
panic(ErrShortStr)
|
||||||
|
}
|
||||||
|
if idxPair[0] > idxPair[1] {
|
||||||
|
panic(ErrInvalidIdxPair)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idxPair[0] == idxPair[1] {
|
||||||
|
// single character
|
||||||
|
subStr = string(s[idxPair[0]])
|
||||||
|
} else {
|
||||||
|
// multiple characters
|
||||||
|
subStr = s[idxPair[0]:idxPair[1]]
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -5,9 +5,14 @@ Map returns a map[string][]<match bytes> for regexes with named capture groups m
|
|||||||
Note that this supports non-unique group names; [regexp.Regexp] allows for patterns with multiple groups
|
Note that this supports non-unique group names; [regexp.Regexp] allows for patterns with multiple groups
|
||||||
using the same group name (though your IDE might complain; I know GoLand does).
|
using the same group name (though your IDE might complain; I know GoLand does).
|
||||||
|
|
||||||
|
It will panic if the embedded [regexp.Regexp] is nil.
|
||||||
|
|
||||||
Each match for each group is in a slice keyed under that group name, with that slice
|
Each match for each group is in a slice keyed under that group name, with that slice
|
||||||
ordered by the indexing done by the regex match itself.
|
ordered by the indexing done by the regex match itself.
|
||||||
|
|
||||||
|
This operates on only the first found match (like [regexp.Regexp.FindSubmatch]).
|
||||||
|
To operate on *all* matches, use [ReMap.MapAll].
|
||||||
|
|
||||||
In summary, the parameters are as follows:
|
In summary, the parameters are as follows:
|
||||||
|
|
||||||
# inclNoMatch
|
# inclNoMatch
|
||||||
@@ -31,6 +36,7 @@ is provided but b does not match then matches will be:
|
|||||||
If true (and inclNoMatch is true), instead of a single nil the group's values will be
|
If true (and inclNoMatch is true), instead of a single nil the group's values will be
|
||||||
a slice of nil values explicitly matching the number of times the group name is specified
|
a slice of nil values explicitly matching the number of times the group name is specified
|
||||||
in the pattern.
|
in the pattern.
|
||||||
|
May be unpredictable if the same name is used multiple times for different capture groups across multiple patterns.
|
||||||
|
|
||||||
For example, if a pattern:
|
For example, if a pattern:
|
||||||
|
|
||||||
@@ -87,7 +93,7 @@ In detail, matches and/or its values may be nil or empty under the following con
|
|||||||
IF inclNoMatch is true
|
IF inclNoMatch is true
|
||||||
IF inclNoMatchStrict is true
|
IF inclNoMatchStrict is true
|
||||||
THEN matches[<group name>] is defined and non-nil, but populated with placeholder nils
|
THEN matches[<group name>] is defined and non-nil, but populated with placeholder nils
|
||||||
(matches[<group name>] == [][]byte{nil[, nil...]})
|
(matches[<group name>] == [][]byte{nil[, nil, ...]})
|
||||||
ELSE
|
ELSE
|
||||||
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
|
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
|
||||||
ELSE
|
ELSE
|
||||||
@@ -109,7 +115,7 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
names = r.Regexp.SubexpNames()
|
names = r.Regexp.SubexpNames()[:]
|
||||||
matchBytes = r.Regexp.FindSubmatch(b)
|
matchBytes = r.Regexp.FindSubmatch(b)
|
||||||
|
|
||||||
if matchBytes == nil {
|
if matchBytes == nil {
|
||||||
@@ -142,6 +148,9 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
|||||||
if inclNoMatch {
|
if inclNoMatch {
|
||||||
if len(names) >= 1 {
|
if len(names) >= 1 {
|
||||||
for _, grpNm = range names {
|
for _, grpNm = range names {
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
matches[grpNm] = nil
|
matches[grpNm] = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -154,7 +163,7 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
|||||||
grpNm = names[mIdx]
|
grpNm = names[mIdx]
|
||||||
/*
|
/*
|
||||||
Thankfully, it's actually a build error if a pattern specifies a named
|
Thankfully, it's actually a build error if a pattern specifies a named
|
||||||
capture group with an empty name.
|
capture group with an matched name.
|
||||||
So we don't need to worry about accounting for that,
|
So we don't need to worry about accounting for that,
|
||||||
and can just skip over grpNm == "" (which is an *unnamed* capture group).
|
and can just skip over grpNm == "" (which is an *unnamed* capture group).
|
||||||
*/
|
*/
|
||||||
@@ -190,6 +199,9 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
|||||||
// This *technically* should be completely handled above.
|
// This *technically* should be completely handled above.
|
||||||
if inclNoMatch {
|
if inclNoMatch {
|
||||||
for _, grpNm = range names {
|
for _, grpNm = range names {
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if _, ok = tmpMap[grpNm]; !ok {
|
if _, ok = tmpMap[grpNm]; !ok {
|
||||||
tmpMap[grpNm] = nil
|
tmpMap[grpNm] = nil
|
||||||
}
|
}
|
||||||
@@ -204,13 +216,147 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
MapString is exactly like ReMap.Map(), but operates on (and returns) strings instead.
|
MapAll behaves exactly like [ReMap.Map] but will "squash"/consolidate *all* found matches, not just the first occurrence,
|
||||||
(matches will always be nil if s == “.)
|
into the group name.
|
||||||
|
|
||||||
A small deviation, though; empty strings instead of nils (because duh) will occupy slice placeholders (if `inclNoMatchStrict` is specified).
|
You likely want to use this instead of [ReMap.Map] for multiline patterns.
|
||||||
|
*/
|
||||||
|
func (r *ReMap) MapAll(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (matches map[string][][]byte) {
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
var mIdx int
|
||||||
|
var isEmpty bool
|
||||||
|
var match []byte
|
||||||
|
var grpNm string
|
||||||
|
var names []string
|
||||||
|
var mbGrp [][]byte
|
||||||
|
var ptrnNms []string
|
||||||
|
var matchBytes [][][]byte
|
||||||
|
var tmpMap map[string][][]byte = make(map[string][][]byte)
|
||||||
|
|
||||||
|
if b == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
names = r.Regexp.SubexpNames()[:]
|
||||||
|
matchBytes = r.Regexp.FindAllSubmatch(b, -1)
|
||||||
|
|
||||||
|
if matchBytes == nil {
|
||||||
|
// b does not match pattern
|
||||||
|
if !mustMatch {
|
||||||
|
matches = make(map[string][][]byte)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if names == nil || len(names) == 0 || len(names) == 1 {
|
||||||
|
/*
|
||||||
|
no named capture groups;
|
||||||
|
technically only the last condition would be the case.
|
||||||
|
*/
|
||||||
|
if inclNoMatch {
|
||||||
|
matches = make(map[string][][]byte)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
names = names[1:]
|
||||||
|
|
||||||
|
tmpMap = make(map[string][][]byte)
|
||||||
|
|
||||||
|
// From here, it behaves (sort of) like ReMap.Map
|
||||||
|
// except mbGrp is like matchBytes in Map.
|
||||||
|
for _, mbGrp = range matchBytes {
|
||||||
|
|
||||||
|
// Unlike ReMap.Map, we have to do a little additional logic.
|
||||||
|
isEmpty = false
|
||||||
|
ptrnNms = make([]string, 0, len(names))
|
||||||
|
|
||||||
|
if mbGrp == nil {
|
||||||
|
isEmpty = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isEmpty {
|
||||||
|
if len(mbGrp) == 0 || len(mbGrp) == 1 {
|
||||||
|
/*
|
||||||
|
no submatches whatsoever.
|
||||||
|
*/
|
||||||
|
isEmpty = true
|
||||||
|
} else {
|
||||||
|
mbGrp = mbGrp[1:]
|
||||||
|
|
||||||
|
for mIdx, match = range mbGrp {
|
||||||
|
if mIdx > len(names) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
grpNm = names[mIdx]
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ptrnNms = append(ptrnNms, grpNm)
|
||||||
|
|
||||||
|
if match == nil {
|
||||||
|
// This specific group didn't match, but it matched the whole pattern.
|
||||||
|
if !inclNoMatch {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok = tmpMap[grpNm]; !ok {
|
||||||
|
if !inclNoMatchStrict {
|
||||||
|
tmpMap[grpNm] = nil
|
||||||
|
} else {
|
||||||
|
tmpMap[grpNm] = [][]byte{nil}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if inclNoMatchStrict {
|
||||||
|
tmpMap[grpNm] = append(tmpMap[grpNm], nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok = tmpMap[grpNm]; !ok {
|
||||||
|
tmpMap[grpNm] = make([][]byte, 0)
|
||||||
|
}
|
||||||
|
tmpMap[grpNm] = append(tmpMap[grpNm], match)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// I can't recall why I capture this.
|
||||||
|
_ = ptrnNms
|
||||||
|
}
|
||||||
|
|
||||||
|
// *Theoretically* all of these should be populated with at least a nil.
|
||||||
|
if inclNoMatch {
|
||||||
|
for _, grpNm = range names {
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok = tmpMap[grpNm]; !ok {
|
||||||
|
tmpMap[grpNm] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tmpMap) > 0 {
|
||||||
|
matches = tmpMap
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
MapString is exactly like [ReMap.Map], but operates on (and returns) strings instead.
|
||||||
|
(matches will always be nil if s == "".)
|
||||||
|
|
||||||
|
It will panic if the embedded [regexp.Regexp] is nil.
|
||||||
|
|
||||||
|
This operates on only the first found match (like [regexp.Regexp.FindStringSubmatch]).
|
||||||
|
To operate on *all* matches, use [ReMap.MapStringAll].
|
||||||
|
|
||||||
|
A small deviation and caveat, though; empty strings instead of nils (because duh) will occupy slice placeholders (if `inclNoMatchStrict` is specified).
|
||||||
This unfortunately *does not provide any indication* if an empty string positively matched the pattern (a "hit") or if it was simply
|
This unfortunately *does not provide any indication* if an empty string positively matched the pattern (a "hit") or if it was simply
|
||||||
not matched at all (a "miss"). If you need definitive determination between the two conditions, it is instead recommended to either
|
not matched at all (a "miss"). If you need definitive determination between the two conditions, it is instead recommended to either
|
||||||
*not* use inclNoMatchStrict or to use ReMap.Map() instead and convert any non-nil values to strings after.
|
*not* use inclNoMatchStrict or to use [ReMap.Map] instead and convert any non-nil values to strings after.
|
||||||
|
|
||||||
Particularly:
|
Particularly:
|
||||||
|
|
||||||
@@ -233,8 +379,9 @@ is provided but s does not match then matches will be:
|
|||||||
# inclNoMatchStrict
|
# inclNoMatchStrict
|
||||||
|
|
||||||
If true (and inclNoMatch is true), instead of a single nil the group's values will be
|
If true (and inclNoMatch is true), instead of a single nil the group's values will be
|
||||||
a slice of eempty string values explicitly matching the number of times the group name is specified
|
a slice of empty string values explicitly matching the number of times the group name is specified
|
||||||
in the pattern.
|
in the pattern.
|
||||||
|
May be unpredictable if the same name is used multiple times for different capture groups across multiple patterns.
|
||||||
|
|
||||||
For example, if a pattern:
|
For example, if a pattern:
|
||||||
|
|
||||||
@@ -290,8 +437,8 @@ In detail, matches and/or its values may be nil or empty under the following con
|
|||||||
IF <group name> does not have a match
|
IF <group name> does not have a match
|
||||||
IF inclNoMatch is true
|
IF inclNoMatch is true
|
||||||
IF inclNoMatchStrict is true
|
IF inclNoMatchStrict is true
|
||||||
THEN matches[<group name>] is defined and non-nil, but populated with placeholder nils
|
THEN matches[<group name>] is defined and non-nil, but populated with placeholder strings
|
||||||
(matches[<group name>] == []string{""[, ""...]})
|
(matches[<group name>] == []string{""[, "", ...]})
|
||||||
ELSE
|
ELSE
|
||||||
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
|
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
|
||||||
ELSE
|
ELSE
|
||||||
@@ -304,27 +451,19 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
|||||||
var ok bool
|
var ok bool
|
||||||
var endIdx int
|
var endIdx int
|
||||||
var startIdx int
|
var startIdx int
|
||||||
var chunkIdx int
|
var grpIdx int
|
||||||
var grpNm string
|
var grpNm string
|
||||||
var names []string
|
var names []string
|
||||||
var matchStr string
|
var matchStr string
|
||||||
/*
|
var si stringIndexer
|
||||||
A slice of indices or index pairs.
|
|
||||||
For each element `e` in idxChunks,
|
|
||||||
* if `e` is nil, no group match.
|
|
||||||
* if len(e) == 1, only a single character was matched.
|
|
||||||
* otherwise len(e) == 2, the start and end of the match.
|
|
||||||
*/
|
|
||||||
var idxChunks [][]int
|
|
||||||
var matchIndices []int
|
var matchIndices []int
|
||||||
var chunkIndices []int // always 2 elements; start pos and end pos
|
|
||||||
var tmpMap map[string][]string = make(map[string][]string)
|
var tmpMap map[string][]string = make(map[string][]string)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
OK so this is a bit of a deviation.
|
OK so this is a bit of a deviation.
|
||||||
|
|
||||||
It's not as straightforward as above, because there isn't an explicit way
|
It's not as straightforward as above, because there isn't an explicit way
|
||||||
like above to determine if a pattern was *matched as an empty string* vs.
|
like above to determine if a pattern was *matched as an matched string* vs.
|
||||||
*not matched*.
|
*not matched*.
|
||||||
|
|
||||||
So instead do roundabout index-y things.
|
So instead do roundabout index-y things.
|
||||||
@@ -334,7 +473,8 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
I'm not entirely sure how serious they are about "the slice should not be modified"...
|
I'm not entirely sure how serious they are about
|
||||||
|
"the slice should not be modified"...
|
||||||
|
|
||||||
DO NOT sort or dedupe `names`! If the same name for groups is duplicated,
|
DO NOT sort or dedupe `names`! If the same name for groups is duplicated,
|
||||||
it will be duplicated here in proper order and the ordering is tied to
|
it will be duplicated here in proper order and the ordering is tied to
|
||||||
@@ -351,7 +491,7 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if names == nil || len(names) <= 1 {
|
if names == nil || len(names) == 0 || len(names) == 1 {
|
||||||
/*
|
/*
|
||||||
No named capture groups;
|
No named capture groups;
|
||||||
technically only the last condition would be the case,
|
technically only the last condition would be the case,
|
||||||
@@ -363,6 +503,7 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
names = names[1:]
|
||||||
|
|
||||||
if len(matchIndices) == 0 || len(matchIndices) == 1 {
|
if len(matchIndices) == 0 || len(matchIndices) == 1 {
|
||||||
/*
|
/*
|
||||||
@@ -378,26 +519,34 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
|||||||
matches = make(map[string][]string)
|
matches = make(map[string][]string)
|
||||||
if inclNoMatch {
|
if inclNoMatch {
|
||||||
for _, grpNm = range names {
|
for _, grpNm = range names {
|
||||||
if grpNm != "" {
|
if grpNm == "" {
|
||||||
matches[grpNm] = nil
|
continue
|
||||||
}
|
}
|
||||||
|
matches[grpNm] = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
A reslice of `matchIndices` could technically start at 2 (as long as `names` is sliced [1:])
|
The reslice of `matchIndices` starts at 2 because they're in pairs:
|
||||||
because they're in pairs: []int{<start>, <end>, <start>, <end>, ...}
|
|
||||||
and the first pair is the entire pattern match (un-resliced names[0]).
|
[]int{<start>, <end>, <start>, <end>, ...}
|
||||||
Thus the len(matchIndices) == 2*len(names), *even* if you
|
|
||||||
|
and the first pair is the entire pattern match (un-resliced names[0],
|
||||||
|
un-resliced matchIndices[0]).
|
||||||
|
|
||||||
|
Thus the len(matchIndices) == 2*len(names) (*should*, that is), *even* if you reslice.
|
||||||
Keep in mind that since the first element of names is removed,
|
Keep in mind that since the first element of names is removed,
|
||||||
the first pair here is skipped.
|
we reslice matchIndices as well.
|
||||||
This provides a bit more consistent readability, though.
|
|
||||||
*/
|
*/
|
||||||
idxChunks = make([][]int, len(names))
|
matchIndices = matchIndices[2:]
|
||||||
chunkIdx = 0
|
|
||||||
endIdx = 0
|
tmpMap = make(map[string][]string)
|
||||||
|
|
||||||
|
// Note that the second index is the *upper boundary*, not a *position in the string*
|
||||||
|
// so these indices are perfectly usable as-is as returned from the regexp methods.
|
||||||
|
// http://golang.org/ref/spec#Slice_expressions
|
||||||
for startIdx = 0; endIdx < len(matchIndices); startIdx += 2 {
|
for startIdx = 0; endIdx < len(matchIndices); startIdx += 2 {
|
||||||
endIdx = startIdx + 2
|
endIdx = startIdx + 2
|
||||||
// This technically should never happen.
|
// This technically should never happen.
|
||||||
@@ -405,75 +554,253 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
|||||||
endIdx = len(matchIndices)
|
endIdx = len(matchIndices)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkIndices = matchIndices[startIdx:endIdx]
|
if grpIdx >= len(names) {
|
||||||
|
break
|
||||||
if chunkIndices[0] == -1 || chunkIndices[1] == -1 {
|
|
||||||
// group did not match
|
|
||||||
chunkIndices = nil
|
|
||||||
} else {
|
|
||||||
if chunkIndices[0] == chunkIndices[1] {
|
|
||||||
chunkIndices = []int{chunkIndices[0]}
|
|
||||||
} else {
|
|
||||||
chunkIndices = matchIndices[startIdx:endIdx]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
idxChunks[chunkIdx] = chunkIndices
|
|
||||||
chunkIdx++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now associate with names and pull the string sequence.
|
si = stringIndexer{
|
||||||
for chunkIdx, chunkIndices = range idxChunks {
|
group: grpIdx,
|
||||||
grpNm = names[chunkIdx]
|
start: matchIndices[startIdx],
|
||||||
/*
|
end: matchIndices[endIdx-1],
|
||||||
Thankfully, it's actually a build error if a pattern specifies a named
|
matched: true,
|
||||||
capture group with an empty name.
|
nm: names[grpIdx],
|
||||||
So we don't need to worry about accounting for that,
|
grpS: "",
|
||||||
and can just skip over grpNm == ""
|
s: &matchStr,
|
||||||
(which is either an *unnamed* capture group
|
ptrn: r.Regexp,
|
||||||
OR the first element in `names`, which is always
|
}
|
||||||
the entire match).
|
grpIdx++
|
||||||
*/
|
|
||||||
if grpNm == "" {
|
if si.nm == "" {
|
||||||
|
// unnamed capture group
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if chunkIndices == nil || len(chunkIndices) == 0 {
|
// sets si.matched and si.grpS
|
||||||
// group did not match
|
si.idxSlice(&s)
|
||||||
|
|
||||||
|
if !si.matched {
|
||||||
if !inclNoMatch {
|
if !inclNoMatch {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok = tmpMap[grpNm]; !ok {
|
if _, ok = tmpMap[si.nm]; !ok {
|
||||||
if !inclNoMatchStrict {
|
if !inclNoMatchStrict {
|
||||||
tmpMap[grpNm] = nil
|
tmpMap[si.nm] = nil
|
||||||
} else {
|
} else {
|
||||||
tmpMap[grpNm] = []string{""}
|
tmpMap[si.nm] = []string{""}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if inclNoMatchStrict {
|
if inclNoMatchStrict {
|
||||||
tmpMap[grpNm] = append(tmpMap[grpNm], "")
|
tmpMap[si.nm] = append(tmpMap[si.nm], "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
switch len(chunkIndices) {
|
if _, ok = tmpMap[si.nm]; !ok {
|
||||||
case 1:
|
tmpMap[si.nm] = make([]string, 0)
|
||||||
// Single character
|
|
||||||
matchStr = string(s[chunkIndices[0]])
|
|
||||||
case 2:
|
|
||||||
// Multiple characters
|
|
||||||
matchStr = s[chunkIndices[0]:chunkIndices[1]]
|
|
||||||
}
|
}
|
||||||
|
tmpMap[si.nm] = append(tmpMap[si.nm], si.grpS)
|
||||||
if _, ok = tmpMap[grpNm]; !ok {
|
|
||||||
tmpMap[grpNm] = make([]string, 0)
|
|
||||||
}
|
|
||||||
tmpMap[grpNm] = append(tmpMap[grpNm], matchStr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This *technically* should be completely handled above.
|
// This *technically* should be completely handled above.
|
||||||
if inclNoMatch {
|
if inclNoMatch {
|
||||||
for _, grpNm = range names {
|
for _, grpNm = range names {
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok = tmpMap[grpNm]; !ok {
|
||||||
|
tmpMap[grpNm] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tmpMap) > 0 {
|
||||||
|
matches = tmpMap
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
MapStringAll behaves exactly like [ReMap.MapString] but will "squash"/consolidate *all* found matches, not just the first occurrence,
|
||||||
|
into the group name.
|
||||||
|
|
||||||
|
You likely want to use this instead of [ReMap.MapString] for multiline patterns.
|
||||||
|
*/
|
||||||
|
func (r *ReMap) MapStringAll(s string, inclNoMatch, inclNoMatchStrict, mustMatch bool) (matches map[string][]string) {
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
var endIdx int
|
||||||
|
var startIdx int
|
||||||
|
var grpIdx int
|
||||||
|
var grpNm string
|
||||||
|
var names []string
|
||||||
|
var matchStr string
|
||||||
|
var si stringIndexer
|
||||||
|
var matchIndices []int
|
||||||
|
var allMatchIndices [][]int
|
||||||
|
var tmpMap map[string][]string = make(map[string][]string)
|
||||||
|
|
||||||
|
if s == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
names = r.Regexp.SubexpNames()[:]
|
||||||
|
allMatchIndices = r.Regexp.FindAllStringSubmatchIndex(s, -1)
|
||||||
|
|
||||||
|
if allMatchIndices == nil {
|
||||||
|
// s does not match pattern at all.
|
||||||
|
if !mustMatch {
|
||||||
|
matches = make(map[string][]string)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if names == nil || len(names) == 0 || len(names) == 1 {
|
||||||
|
/*
|
||||||
|
No named capture groups;
|
||||||
|
technically only the last condition would be the case,
|
||||||
|
as (regexp.Regexp).SubexpNames() will ALWAYS at the LEAST
|
||||||
|
return a `[]string{""}`.
|
||||||
|
*/
|
||||||
|
if inclNoMatch {
|
||||||
|
matches = make(map[string][]string)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
names = names[1:]
|
||||||
|
|
||||||
|
if len(allMatchIndices) == 0 {
|
||||||
|
// No matches (and thus submatches) whatsoever.
|
||||||
|
// I think this is actually covered by the `if allMatchIndices == nil { ... }` above,
|
||||||
|
// but this is still here for safety and efficiency - early return on no matches to iterate.
|
||||||
|
matches = make(map[string][]string)
|
||||||
|
if inclNoMatch {
|
||||||
|
for _, grpNm = range names {
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matches[grpNm] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Do *NOT* trim/reslice allMatchIndices!
|
||||||
|
// The reslicing is done below, *inside* each matchIndices iteration!
|
||||||
|
|
||||||
|
tmpMap = make(map[string][]string)
|
||||||
|
|
||||||
|
// From here, it behaves (sort of) like ReMap.MapString.
|
||||||
|
|
||||||
|
// Build the strictly-paired chunk indexes and populate them.
|
||||||
|
// We are iterating over *match sets*; matchIndices here should be analgous
|
||||||
|
// to matchIndices in ReMap.MapString.
|
||||||
|
for _, matchIndices = range allMatchIndices {
|
||||||
|
|
||||||
|
if matchIndices == nil {
|
||||||
|
// I *think* the exception with the *All* variant here
|
||||||
|
// is the *entire* return (allMatchIndices) is nil if there
|
||||||
|
// aren't any matches; I can't imagine there'd be any feasible
|
||||||
|
// way it'd insert a nil *element* for an index mapping group.
|
||||||
|
// So just continuing here should be fine;
|
||||||
|
// this continue SHOULD be unreachable.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reslice *here*, on the particular match index group.
|
||||||
|
// Grap the matchStr first; it's not currently *used* by anything but may in the future.
|
||||||
|
matchStr, ok = strIdxSlicer(
|
||||||
|
s,
|
||||||
|
*(*[2]int)(matchIndices[0:2]),
|
||||||
|
)
|
||||||
|
if len(matchIndices) == 0 || len(matchIndices) == 1 {
|
||||||
|
// No *sub*matches (capture groups) in this match, but it still matched the pattern.
|
||||||
|
if inclNoMatch {
|
||||||
|
for _, grpNm = range names {
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// We don't immediately return, though; we just stage out group names just in case.
|
||||||
|
// That's why we use tmpMap and not matches.
|
||||||
|
if _, ok = tmpMap[grpNm]; !ok {
|
||||||
|
tmpMap[grpNm] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matchIndices = matchIndices[2:]
|
||||||
|
|
||||||
|
// Reset from previous loop
|
||||||
|
endIdx = 0
|
||||||
|
grpIdx = 0
|
||||||
|
|
||||||
|
for startIdx = 0; endIdx < len(matchIndices); startIdx += 2 {
|
||||||
|
endIdx = startIdx + 2
|
||||||
|
if endIdx > len(matchIndices) {
|
||||||
|
endIdx = len(matchIndices)
|
||||||
|
}
|
||||||
|
|
||||||
|
if grpIdx >= len(names) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
si = stringIndexer{
|
||||||
|
group: grpIdx,
|
||||||
|
start: matchIndices[startIdx],
|
||||||
|
end: matchIndices[endIdx-1],
|
||||||
|
matched: true,
|
||||||
|
nm: names[grpIdx],
|
||||||
|
grpS: "",
|
||||||
|
ptrn: r.Regexp,
|
||||||
|
}
|
||||||
|
grpIdx++
|
||||||
|
// We do not include the entire match string here;
|
||||||
|
// we don't need it for this. Waste of memory.
|
||||||
|
_ = matchStr
|
||||||
|
/*
|
||||||
|
si.s = new(string)
|
||||||
|
*si.s = matchStr
|
||||||
|
*/
|
||||||
|
|
||||||
|
if si.nm == "" {
|
||||||
|
// unnamed capture group
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// sets si.matched and si.grpS
|
||||||
|
si.idxSlice(&s)
|
||||||
|
|
||||||
|
if !si.matched {
|
||||||
|
if !inclNoMatch {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok = tmpMap[si.nm]; !ok {
|
||||||
|
if !inclNoMatchStrict {
|
||||||
|
tmpMap[si.nm] = nil
|
||||||
|
} else {
|
||||||
|
tmpMap[si.nm] = []string{""}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if inclNoMatchStrict {
|
||||||
|
tmpMap[si.nm] = append(tmpMap[si.nm], "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok = tmpMap[si.nm]; !ok {
|
||||||
|
tmpMap[si.nm] = make([]string, 0)
|
||||||
|
}
|
||||||
|
tmpMap[si.nm] = append(tmpMap[si.nm], si.grpS)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if inclNoMatch {
|
||||||
|
for _, grpNm = range names {
|
||||||
|
if grpNm == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if _, ok = tmpMap[grpNm]; !ok {
|
if _, ok = tmpMap[grpNm]; !ok {
|
||||||
tmpMap[grpNm] = nil
|
tmpMap[grpNm] = nil
|
||||||
}
|
}
|
||||||
|
|||||||
344
remap/funcs_remap_test.go
Normal file
344
remap/funcs_remap_test.go
Normal file
@@ -0,0 +1,344 @@
|
|||||||
|
package remap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
testMatcher struct {
|
||||||
|
Nm string
|
||||||
|
S string
|
||||||
|
M *ReMap
|
||||||
|
All bool
|
||||||
|
Expected map[string][][]byte
|
||||||
|
ExpectedStr map[string][]string
|
||||||
|
ParamInclNoMatch bool
|
||||||
|
ParamInclNoMatchStrict bool
|
||||||
|
ParamInclMustMatch bool
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRemap(t *testing.T) {
|
||||||
|
|
||||||
|
var matches map[string][][]byte
|
||||||
|
|
||||||
|
for midx, m := range []testMatcher{
|
||||||
|
// 1
|
||||||
|
testMatcher{
|
||||||
|
Nm: "No matches",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: nil,
|
||||||
|
},
|
||||||
|
// 2
|
||||||
|
testMatcher{
|
||||||
|
Nm: "Single mid match",
|
||||||
|
S: "This contains a single match in the middle of a string",
|
||||||
|
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match)\s+`)},
|
||||||
|
Expected: map[string][][]byte{
|
||||||
|
"g1": [][]byte{[]byte("match")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 3
|
||||||
|
testMatcher{
|
||||||
|
Nm: "multi mid match",
|
||||||
|
S: "This contains a single match and another match in the middle of a string",
|
||||||
|
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another (?P<g1>match)\s+`)},
|
||||||
|
Expected: map[string][][]byte{
|
||||||
|
"g1": [][]byte{
|
||||||
|
[]byte("match"),
|
||||||
|
[]byte("match"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 4
|
||||||
|
testMatcher{
|
||||||
|
Nm: "line match",
|
||||||
|
S: "This\ncontains a\nsingle\nmatch\non a dedicated line",
|
||||||
|
M: &ReMap{regexp.MustCompile(`(?m)^(?P<g1>match)$`)},
|
||||||
|
Expected: map[string][][]byte{
|
||||||
|
"g1": [][]byte{
|
||||||
|
[]byte("match"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 5
|
||||||
|
testMatcher{
|
||||||
|
Nm: "multiline match",
|
||||||
|
S: "This\ncontains a\nsingle match and another\nmatch\nin the middle of a string",
|
||||||
|
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another\s+(?P<g1>match)\s+`)},
|
||||||
|
All: true,
|
||||||
|
Expected: map[string][][]byte{
|
||||||
|
"g1": [][]byte{
|
||||||
|
[]byte("match"),
|
||||||
|
[]byte("match"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 6
|
||||||
|
// More closely mirrors something closer to real-life
|
||||||
|
testMatcher{
|
||||||
|
Nm: "mixed match",
|
||||||
|
S: " # No longer log hits/reqs/resps to file.\n" +
|
||||||
|
" #access_log /mnt/nginx_logs/vhost/tenant/site/access.log main;\n" +
|
||||||
|
" #error_log /mnt/nginx_logs/vhost/tenant/site/error.log;\n" +
|
||||||
|
" access_log off;\n" +
|
||||||
|
" error_log /dev/null;\n\n" +
|
||||||
|
" ssl_certificate /etc/nginx/tls/crt/tenant.pem;\n" +
|
||||||
|
" ssl_certificate_key /etc/nginx/tls/key/tenant.pem;\n\n",
|
||||||
|
M: &ReMap{regexp.MustCompile(`(?m)^\s*(?:error|access)_log\s+(?P<logpath>.+);\s*$`)},
|
||||||
|
All: true,
|
||||||
|
Expected: map[string][][]byte{
|
||||||
|
"logpath": [][]byte{
|
||||||
|
[]byte("off"),
|
||||||
|
[]byte("/dev/null"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if m.All {
|
||||||
|
matches = m.M.MapAll([]byte(m.S), false, false, false)
|
||||||
|
} else {
|
||||||
|
matches = m.M.Map([]byte(m.S), false, false, false)
|
||||||
|
}
|
||||||
|
t.Logf(
|
||||||
|
"#%d:\n\tsrc:\t'%s'\n\tptrn:\t'%s'\n\tmatch:\t%s\n",
|
||||||
|
midx+1,
|
||||||
|
m.S,
|
||||||
|
m.M.Regexp.String(),
|
||||||
|
testBmapToStrMap(matches),
|
||||||
|
)
|
||||||
|
if !reflect.DeepEqual(matches, m.Expected) {
|
||||||
|
t.Fatalf("Case #%d (\"%s\"): expected '%#v' != received '%#v'", midx+1, m.Nm, m.Expected, matches)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemapParams(t *testing.T) {
|
||||||
|
|
||||||
|
var matches map[string][][]byte
|
||||||
|
|
||||||
|
for midx, m := range []testMatcher{
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: nil,
|
||||||
|
ParamInclNoMatch: false,
|
||||||
|
ParamInclNoMatchStrict: false,
|
||||||
|
ParamInclMustMatch: false,
|
||||||
|
},
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: nil,
|
||||||
|
ParamInclNoMatch: false,
|
||||||
|
ParamInclNoMatchStrict: true,
|
||||||
|
ParamInclMustMatch: false,
|
||||||
|
},
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: nil,
|
||||||
|
ParamInclNoMatch: false,
|
||||||
|
ParamInclNoMatchStrict: true,
|
||||||
|
ParamInclMustMatch: true,
|
||||||
|
},
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: nil,
|
||||||
|
ParamInclNoMatch: false,
|
||||||
|
ParamInclNoMatchStrict: false,
|
||||||
|
ParamInclMustMatch: true,
|
||||||
|
},
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: make(map[string][][]byte),
|
||||||
|
ParamInclNoMatch: true,
|
||||||
|
ParamInclNoMatchStrict: false,
|
||||||
|
ParamInclMustMatch: false,
|
||||||
|
},
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: make(map[string][][]byte),
|
||||||
|
ParamInclNoMatch: true,
|
||||||
|
ParamInclNoMatchStrict: true,
|
||||||
|
ParamInclMustMatch: false,
|
||||||
|
},
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: make(map[string][][]byte),
|
||||||
|
ParamInclNoMatch: true,
|
||||||
|
ParamInclNoMatchStrict: true,
|
||||||
|
ParamInclMustMatch: true,
|
||||||
|
},
|
||||||
|
testMatcher{
|
||||||
|
Nm: "",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
Expected: make(map[string][][]byte),
|
||||||
|
ParamInclNoMatch: true,
|
||||||
|
ParamInclNoMatchStrict: false,
|
||||||
|
ParamInclMustMatch: true,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if m.All {
|
||||||
|
matches = m.M.MapAll([]byte(m.S), m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch)
|
||||||
|
} else {
|
||||||
|
matches = m.M.Map([]byte(m.S), m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch)
|
||||||
|
}
|
||||||
|
t.Logf(
|
||||||
|
"%d: %v/%v/%v: %#v\n",
|
||||||
|
midx+1, m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch, matches,
|
||||||
|
)
|
||||||
|
if !reflect.DeepEqual(matches, m.Expected) {
|
||||||
|
t.Fatalf("Case #%d (\"%s\"): '%#v' != '%#v'", midx+1, m.Nm, m.ExpectedStr, matches)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemapString(t *testing.T) {
|
||||||
|
|
||||||
|
var matches map[string][]string
|
||||||
|
|
||||||
|
for midx, m := range []testMatcher{
|
||||||
|
// 1
|
||||||
|
testMatcher{
|
||||||
|
Nm: "No matches",
|
||||||
|
S: "this is a test",
|
||||||
|
M: &ReMap{regexp.MustCompile(``)},
|
||||||
|
ExpectedStr: nil,
|
||||||
|
},
|
||||||
|
// 2
|
||||||
|
testMatcher{
|
||||||
|
Nm: "Single mid match",
|
||||||
|
S: "This contains a single match in the middle of a string",
|
||||||
|
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match)\s+`)},
|
||||||
|
ExpectedStr: map[string][]string{
|
||||||
|
"g1": []string{"match"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 3
|
||||||
|
testMatcher{
|
||||||
|
Nm: "multi mid match",
|
||||||
|
S: "This contains a single match and another match in the middle of a string",
|
||||||
|
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another (?P<g1>match)\s+`)},
|
||||||
|
ExpectedStr: map[string][]string{
|
||||||
|
"g1": []string{
|
||||||
|
"match",
|
||||||
|
"match",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 4
|
||||||
|
testMatcher{
|
||||||
|
Nm: "line match",
|
||||||
|
S: "This\ncontains a\nsingle\nmatch\non a dedicated line",
|
||||||
|
M: &ReMap{regexp.MustCompile(`(?m)^(?P<g1>match)$`)},
|
||||||
|
ExpectedStr: map[string][]string{
|
||||||
|
"g1": []string{
|
||||||
|
"match",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 5
|
||||||
|
testMatcher{
|
||||||
|
Nm: "multiline match",
|
||||||
|
S: "This\ncontains a\nsingle match and another\nmatch\nin the middle of a string",
|
||||||
|
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another\s+(?P<g1>match)\s+`)},
|
||||||
|
All: true,
|
||||||
|
ExpectedStr: map[string][]string{
|
||||||
|
"g1": []string{
|
||||||
|
"match",
|
||||||
|
"match",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// 6
|
||||||
|
// More closely mirrors something closer to real-life
|
||||||
|
testMatcher{
|
||||||
|
Nm: "mixed match",
|
||||||
|
S: " # No longer log hits/reqs/resps to file.\n" +
|
||||||
|
" #access_log /mnt/nginx_logs/vhost/tenant/site/access.log main;\n" +
|
||||||
|
" #error_log /mnt/nginx_logs/vhost/tenant/site/error.log;\n" +
|
||||||
|
" access_log off;\n" +
|
||||||
|
" error_log /dev/null;\n\n" +
|
||||||
|
" ssl_certificate /etc/nginx/tls/crt/tenant.pem;\n" +
|
||||||
|
" ssl_certificate_key /etc/nginx/tls/key/tenant.pem;\n\n",
|
||||||
|
M: &ReMap{regexp.MustCompile(`(?m)^\s*(?:error|access)_log\s+(?P<logpath>.+);\s*$`)},
|
||||||
|
All: true,
|
||||||
|
ExpectedStr: map[string][]string{
|
||||||
|
"logpath": []string{
|
||||||
|
"off",
|
||||||
|
"/dev/null",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if m.All {
|
||||||
|
matches = m.M.MapStringAll(m.S, false, false, false)
|
||||||
|
} else {
|
||||||
|
matches = m.M.MapString(m.S, false, false, false)
|
||||||
|
}
|
||||||
|
t.Logf(
|
||||||
|
"#%d:\n\tsrc:\t'%s'\n\tptrn:\t'%s'\n\tmatch:\t%s\n",
|
||||||
|
midx+1,
|
||||||
|
m.S,
|
||||||
|
m.M.Regexp.String(),
|
||||||
|
testSmapToStrMap(matches),
|
||||||
|
)
|
||||||
|
if !reflect.DeepEqual(matches, m.ExpectedStr) {
|
||||||
|
t.Fatalf("Case #%d (\"%s\"): '%#v' != '%#v'", midx+1, m.Nm, m.ExpectedStr, matches)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBmapToStrMap(bmap map[string][][]byte) (s string) {
|
||||||
|
|
||||||
|
if bmap == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s = "\n"
|
||||||
|
for k, v := range bmap {
|
||||||
|
s += fmt.Sprintf("\t%s\n", k)
|
||||||
|
for _, i := range v {
|
||||||
|
s += fmt.Sprintf("\t\t%s\n", string(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSmapToStrMap(smap map[string][]string) (s string) {
|
||||||
|
|
||||||
|
if smap == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s = "\n"
|
||||||
|
for k, v := range smap {
|
||||||
|
s += fmt.Sprintf("\t%s\n", k)
|
||||||
|
for _, i := range v {
|
||||||
|
s += fmt.Sprintf("\t\t%s\n", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
34
remap/funcs_stringindexer.go
Normal file
34
remap/funcs_stringindexer.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package remap
|
||||||
|
|
||||||
|
// idx returns []int{s.start, s.end}.
|
||||||
|
func (s *stringIndexer) idx() (i []int) {
|
||||||
|
return []int{s.start, s.end}
|
||||||
|
}
|
||||||
|
|
||||||
|
// idxStrict returns [2]int{s.start, s.end}.
|
||||||
|
func (s *stringIndexer) idxStrict() (i [2]int) {
|
||||||
|
return [2]int{s.start, s.end}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
idxSlice populates s.grpS using s.start and s.end.
|
||||||
|
|
||||||
|
If str is nil, it will use s.s.
|
||||||
|
If str is nil and s.s is nil, it will panic with [ErrNoStr].
|
||||||
|
|
||||||
|
If the pattern does not match (s.start < 0 or s.end < 0),
|
||||||
|
s.matched will be set to false (otherwise true).
|
||||||
|
*/
|
||||||
|
func (s *stringIndexer) idxSlice(str *string) {
|
||||||
|
|
||||||
|
if str == nil {
|
||||||
|
if s.s == nil {
|
||||||
|
panic(ErrNoStr)
|
||||||
|
}
|
||||||
|
str = s.s
|
||||||
|
}
|
||||||
|
|
||||||
|
s.grpS, s.matched = strIdxSlicer(*str, s.idxStrict())
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// ReMap provides some map-related functions around a regexp.Regexp.
|
// ReMap provides some map-related functions around a [regexp.Regexp].
|
||||||
ReMap struct {
|
ReMap struct {
|
||||||
*regexp.Regexp
|
*regexp.Regexp
|
||||||
}
|
}
|
||||||
@@ -24,4 +24,45 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
stringIndexer struct {
|
||||||
|
// group is the capture group index for this match.
|
||||||
|
group int
|
||||||
|
// start is the string index (from the original string) where the matched group starts
|
||||||
|
start int
|
||||||
|
// end is the string index where the matched group ends
|
||||||
|
end int
|
||||||
|
/*
|
||||||
|
matched indicates if explicitly no match was found.
|
||||||
|
(This is normally indeterminate with string regex returns,
|
||||||
|
as e.g. `(?P<mygrp>\s*)`, `(?P<mygrp>(?:somestring)?)`, etc. all can be a *matched* "".)
|
||||||
|
|
||||||
|
If grpS == "" and matched == true, it DID match an empty string.
|
||||||
|
If grpS == "" and matched == false, it DID NOT MATCH the pattern.
|
||||||
|
If grpS != "", matched can be completely disregarded.
|
||||||
|
*/
|
||||||
|
matched bool
|
||||||
|
// nm is the match group name.
|
||||||
|
nm string
|
||||||
|
/*
|
||||||
|
grpS is the actual group-matched *substring*.
|
||||||
|
|
||||||
|
It will ALWAYS be either:
|
||||||
|
|
||||||
|
* the entirety of s
|
||||||
|
* a substring of s
|
||||||
|
* an empty string
|
||||||
|
|
||||||
|
it will never, and cannot be, a SUPERset of s.
|
||||||
|
it may not always be included/populated to save on memory.
|
||||||
|
*/
|
||||||
|
grpS string
|
||||||
|
/*
|
||||||
|
s is the *entire* MATCHED (sub)string.
|
||||||
|
It may not always be populated if not needed to save memory.
|
||||||
|
*/
|
||||||
|
s *string
|
||||||
|
// ptrn is the pattern applied to s.
|
||||||
|
ptrn *regexp.Regexp
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,8 +4,3 @@ const (
|
|||||||
// DefMaskStr is the string used as the default maskStr if left empty in [Redact].
|
// DefMaskStr is the string used as the default maskStr if left empty in [Redact].
|
||||||
DefMaskStr string = "***"
|
DefMaskStr string = "***"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// DefIndentStr is the string used as the default indent if left empty in [Indent].
|
|
||||||
DefIndentStr string = "\t"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,4 +1,17 @@
|
|||||||
/*
|
/*
|
||||||
Package stringsx aims to extend functionality of the stdlib [strings] module.
|
Package stringsx aims to extend functionality of the stdlib [strings] module.
|
||||||
|
|
||||||
|
Note that if you need a way of mimicking Bash's shell quoting rules, [desertbit/shlex] or [buildkite/shellwords]
|
||||||
|
would be better options than [google/shlex] but this package does not attempt to reproduce
|
||||||
|
any of that functionality.
|
||||||
|
|
||||||
|
For line splitting, one should use [muesli/reflow/wordwrap].
|
||||||
|
Likewise for indentation, one should use [muesli/reflow/indent].
|
||||||
|
|
||||||
|
[desertbit/shlex]: https://pkg.go.dev/github.com/desertbit/go-shlex
|
||||||
|
[buildkite/shellwords]: https://pkg.go.dev/github.com/buildkite/shellwords
|
||||||
|
[google/shlex]: https://pkg.go.dev/github.com/google/shlex
|
||||||
|
[muesli/reflow/wordwrap]: https://pkg.go.dev/github.com/muesli/reflow/wordwrap
|
||||||
|
[muesli/reflow/indent]: https://pkg.go.dev/github.com/muesli/reflow/indent
|
||||||
*/
|
*/
|
||||||
package stringsx
|
package stringsx
|
||||||
|
|||||||
@@ -1,96 +1,170 @@
|
|||||||
package stringsx
|
package stringsx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
`fmt`
|
||||||
`strings`
|
`strings`
|
||||||
`unicode`
|
`unicode`
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Indent takes string s and indents it with string `indent` `level` times.
|
LenSplit formats string `s` to break at, at most, every `width` characters.
|
||||||
|
|
||||||
If indent is an empty string, [DefIndentStr] will be used.
|
Any existing newlines (e.g. \r\n) will be removed during a string/
|
||||||
|
substring/line's length calculation. (e.g. `foobarbaz\n` and `foobarbaz\r\n` are
|
||||||
|
both considered to be lines of length 9, not 10 and 11 respectively).
|
||||||
|
|
||||||
If ws is true, lines consisting of only whitespace will be indented as well.
|
This also means that any newlines (\n or \r\n) are inherently removed from
|
||||||
(To then trim any extraneous trailing space, you may want to use [TrimSpaceRight]
|
`out` (even if included in `wordWrap`; see below).
|
||||||
or [TrimLines].)
|
|
||||||
|
|
||||||
If empty is true, lines with no content will be replaced with lines that purely
|
Note that if `s` is multiline (already contains newlines), they will be respected
|
||||||
consist of (indent * level) (otherwise they will be left as empty lines).
|
as-is - that is, if a line ends with less than `width` chars and then has a newline,
|
||||||
|
it will be preserved as an empty element. That is to say:
|
||||||
|
|
||||||
This function can also be used to prefix lines with arbitrary strings as well.
|
"foo\nbar\n\n" → []string{"foo", "bar", ""}
|
||||||
e.g:
|
"foo\n\nbar\n" → []string{"foo", "", "bar"}
|
||||||
|
|
||||||
Indent("foo\nbar\nbaz\n", "# ", 1, false, false)
|
This splitter is particularly simple. If you need wordwrapping, it should be done
|
||||||
|
with e.g. [github.com/muesli/reflow/wordwrap].
|
||||||
would yield:
|
|
||||||
|
|
||||||
# foo
|
|
||||||
# bar
|
|
||||||
# baz
|
|
||||||
<empty line>
|
|
||||||
|
|
||||||
thus allowing you to "comment out" multiple lines at once.
|
|
||||||
*/
|
*/
|
||||||
func Indent(s, indent string, level uint, ws, empty bool) (indented string) {
|
func LenSplit(s string, width uint) (out []string) {
|
||||||
|
|
||||||
var i string
|
var end int
|
||||||
var nl string
|
var line string
|
||||||
var endsNewline bool
|
var lineRunes []rune
|
||||||
var sb strings.Builder
|
|
||||||
var lineStripped string
|
|
||||||
|
|
||||||
if indent == "" {
|
if width == 0 {
|
||||||
indent = DefIndentStr
|
out = []string{s}
|
||||||
}
|
|
||||||
|
|
||||||
// This condition functionally won't do anything, so just return the input as-is.
|
|
||||||
if level == 0 {
|
|
||||||
indented = s
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
i = strings.Repeat(indent, int(level))
|
for line = range strings.Lines(s) {
|
||||||
|
line = strings.TrimRight(line, "\n")
|
||||||
|
line = strings.TrimRight(line, "\r")
|
||||||
|
|
||||||
// This condition functionally won't do anything, so just return the input as-is.
|
lineRunes = []rune(line)
|
||||||
if s == "" {
|
|
||||||
if empty {
|
|
||||||
indented = i
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for line := range strings.Lines(s) {
|
if uint(len(lineRunes)) <= width {
|
||||||
lineStripped = strings.TrimSpace(line)
|
out = append(out, line)
|
||||||
nl = getNewLine(line)
|
|
||||||
endsNewline = nl != ""
|
|
||||||
// fmt.Printf("%#v => %#v\n", line, lineStripped)
|
|
||||||
if lineStripped == "" {
|
|
||||||
// fmt.Printf("WS/EMPTY LINE (%#v) (ws %v, empty %v): \n", s, ws, empty)
|
|
||||||
if line != (lineStripped + nl) {
|
|
||||||
// whitespace-only line
|
|
||||||
if ws {
|
|
||||||
sb.WriteString(i)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// empty line
|
|
||||||
if empty {
|
|
||||||
sb.WriteString(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sb.WriteString(line)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// non-empty/non-whitespace-only line.
|
|
||||||
sb.WriteString(i + line)
|
for i := 0; i < len(lineRunes); i += int(width) {
|
||||||
|
end = i + int(width)
|
||||||
|
if end > len(lineRunes) {
|
||||||
|
end = len(lineRunes)
|
||||||
|
}
|
||||||
|
out = append(out, string(lineRunes[i:end]))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it ends with a trailing newline and nothing after, strings.Lines() will skip the last (empty) line.
|
return
|
||||||
if endsNewline && empty {
|
|
||||||
nl = getNewLine(s)
|
|
||||||
sb.WriteString(i)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
indented = sb.String()
|
/*
|
||||||
|
LenSplitStr wraps [LenSplit] but recombines into a new string with newlines.
|
||||||
|
|
||||||
|
It's mostly just a convenience wrapper.
|
||||||
|
|
||||||
|
All arguments remain the same as in [LenSplit] with an additional one,
|
||||||
|
`winNewLine`, which if true will use \r\n as the newline instead of \n.
|
||||||
|
*/
|
||||||
|
func LenSplitStr(s string, width uint, winNewline bool) (out string) {
|
||||||
|
|
||||||
|
var outSl []string = LenSplit(s, width)
|
||||||
|
|
||||||
|
if winNewline {
|
||||||
|
out = strings.Join(outSl, "\r\n")
|
||||||
|
} else {
|
||||||
|
out = strings.Join(outSl, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Pad pads each element in `s` to length `width` using `pad`.
|
||||||
|
If `pad` is empty, a single space (0x20) will be assumed.
|
||||||
|
Note that `width` operates on rune size, not byte size.
|
||||||
|
(In ASCII, they will be the same size.)
|
||||||
|
|
||||||
|
If a line in `s` is greater than or equal to `width`,
|
||||||
|
no padding will be performed.
|
||||||
|
|
||||||
|
If `leftPad` is true, padding will be applied to the "left" (beginning")
|
||||||
|
of each element instead of the "right" ("end").
|
||||||
|
*/
|
||||||
|
func Pad(s []string, width uint, pad string, leftPad bool) (out []string) {
|
||||||
|
|
||||||
|
var idx int
|
||||||
|
var padIdx int
|
||||||
|
var runeIdx int
|
||||||
|
var padLen uint
|
||||||
|
var elem string
|
||||||
|
var unpadLen uint
|
||||||
|
var tmpPadLen int
|
||||||
|
var padRunes []rune
|
||||||
|
var tmpPad []rune
|
||||||
|
|
||||||
|
if width == 0 {
|
||||||
|
out = s
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = make([]string, len(s))
|
||||||
|
|
||||||
|
// Easy; supported directly in fmt.
|
||||||
|
if pad == "" {
|
||||||
|
for idx, elem = range s {
|
||||||
|
if leftPad {
|
||||||
|
out[idx] = fmt.Sprintf("%*s", width, elem)
|
||||||
|
} else {
|
||||||
|
out[idx] = fmt.Sprintf("%-*s", width, elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// This gets a little more tricky.
|
||||||
|
padRunes = []rune(pad)
|
||||||
|
padLen = uint(len(padRunes))
|
||||||
|
for idx, elem = range s {
|
||||||
|
// First we need to know the number of runes in elem.
|
||||||
|
unpadLen = uint(len([]rune(elem)))
|
||||||
|
// If it's more than/equal to width, as-is.
|
||||||
|
if unpadLen >= width {
|
||||||
|
out[idx] = elem
|
||||||
|
} else {
|
||||||
|
// Otherwise, we need to construct/calculate a pad.
|
||||||
|
if (width-unpadLen)%padLen == 0 {
|
||||||
|
// Also easy enough.
|
||||||
|
if leftPad {
|
||||||
|
out[idx] = fmt.Sprintf("%s%s", strings.Repeat(pad, int((width-unpadLen)/padLen)), elem)
|
||||||
|
} else {
|
||||||
|
out[idx] = fmt.Sprintf("%s%s", elem, strings.Repeat(pad, int((width-unpadLen)/padLen)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// This is where it gets a little hairy.
|
||||||
|
tmpPad = []rune{}
|
||||||
|
tmpPadLen = int(width - unpadLen)
|
||||||
|
idx = 0
|
||||||
|
padIdx = 0
|
||||||
|
for runeIdx = range tmpPadLen {
|
||||||
|
tmpPad[runeIdx] = padRunes[padIdx]
|
||||||
|
if uint(padIdx) >= padLen {
|
||||||
|
padIdx = 0
|
||||||
|
} else {
|
||||||
|
padIdx++
|
||||||
|
}
|
||||||
|
runeIdx++
|
||||||
|
}
|
||||||
|
if leftPad {
|
||||||
|
out[idx] = fmt.Sprintf("%s%s", string(tmpPad), elem)
|
||||||
|
} else {
|
||||||
|
out[idx] = fmt.Sprintf("%s%s", elem, string(tmpPad))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -118,6 +192,9 @@ As a safety precaution, if:
|
|||||||
len(s) <= (leading + trailing)
|
len(s) <= (leading + trailing)
|
||||||
|
|
||||||
then the entire string will be *masked* and no unmasking will be performed.
|
then the entire string will be *masked* and no unmasking will be performed.
|
||||||
|
|
||||||
|
Note that this DOES NOT do a string *replace*, it provides a masked version of `s` itself.
|
||||||
|
Wrap Redact with [strings.ReplaceAll] if you want to replace a certain value with a masked one.
|
||||||
*/
|
*/
|
||||||
func Redact(s, maskStr string, leading, trailing uint, newlines bool) (redacted string) {
|
func Redact(s, maskStr string, leading, trailing uint, newlines bool) (redacted string) {
|
||||||
|
|
||||||
@@ -218,7 +295,7 @@ func TrimLines(s string, left, right bool) (trimmed string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrimSpaceLeft is like [strings.TrimSpace] but only removes leading whitespace from string s.
|
// TrimSpaceLeft is like [strings.TrimSpace] but only removes leading whitespace from string `s`.
|
||||||
func TrimSpaceLeft(s string) (trimmed string) {
|
func TrimSpaceLeft(s string) (trimmed string) {
|
||||||
|
|
||||||
trimmed = strings.TrimLeftFunc(s, unicode.IsSpace)
|
trimmed = strings.TrimLeftFunc(s, unicode.IsSpace)
|
||||||
@@ -236,7 +313,7 @@ func TrimSpaceRight(s string) (trimmed string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// getNewLine is too unpredictable to be used outside of this package so it isn't exported.
|
// getNewLine is too unpredictable/nuanced to be used as part of a public API promise so it isn't exported.
|
||||||
func getNewLine(s string) (nl string) {
|
func getNewLine(s string) (nl string) {
|
||||||
|
|
||||||
if strings.HasSuffix(s, "\r\n") {
|
if strings.HasSuffix(s, "\r\n") {
|
||||||
|
|||||||
@@ -37,113 +37,6 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIndent(t *testing.T) {
|
|
||||||
|
|
||||||
var out string
|
|
||||||
var tests []testIndentSet = []testIndentSet{
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, no trailing newline",
|
|
||||||
orig: "foo\nbar\nbaz",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: false,
|
|
||||||
empty: false,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, trailing newline",
|
|
||||||
orig: "foo\nbar\nbaz\n",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: false,
|
|
||||||
empty: false,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz\n",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, trailing newline with empty",
|
|
||||||
orig: "foo\nbar\nbaz\n",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: false,
|
|
||||||
empty: true,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz\n\t",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, trailing newline with ws",
|
|
||||||
orig: "foo\nbar\nbaz\n",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: true,
|
|
||||||
empty: false,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz\n",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, trailing newline with ws and empty",
|
|
||||||
orig: "foo\nbar\nbaz\n",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: true,
|
|
||||||
empty: true,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz\n\t",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, trailing ws newline with empty",
|
|
||||||
orig: "foo\nbar\nbaz\n ",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: false,
|
|
||||||
empty: true,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz\n ",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, trailing ws newline with ws",
|
|
||||||
orig: "foo\nbar\nbaz\n ",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: true,
|
|
||||||
empty: false,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz\n\t ",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "standard, trailing ws newline with ws and empty",
|
|
||||||
orig: "foo\nbar\nbaz\n \n",
|
|
||||||
indent: "",
|
|
||||||
lvl: 1,
|
|
||||||
ws: true,
|
|
||||||
empty: true,
|
|
||||||
tgt: "\tfoo\n\tbar\n\tbaz\n\t \n\t",
|
|
||||||
},
|
|
||||||
testIndentSet{
|
|
||||||
name: "comment",
|
|
||||||
orig: "foo\nbar\nbaz",
|
|
||||||
indent: "# ",
|
|
||||||
lvl: 1,
|
|
||||||
ws: false,
|
|
||||||
empty: false,
|
|
||||||
tgt: "# foo\n# bar\n# baz",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for idx, ts := range tests {
|
|
||||||
out = Indent(ts.orig, ts.indent, ts.lvl, ts.ws, ts.empty)
|
|
||||||
if out == ts.tgt {
|
|
||||||
t.Logf("[%d] OK (%s): %#v: got %#v", idx, ts.name, ts.orig, out)
|
|
||||||
} else {
|
|
||||||
t.Errorf(
|
|
||||||
"[%d] FAIL (%s): %#v (len %d):\n"+
|
|
||||||
"\t\t\texpected (len %d): %#v\n"+
|
|
||||||
"\t\t\tgot (len %d): %#v\n"+
|
|
||||||
"\t\t%#v",
|
|
||||||
idx, ts.name, ts.orig, len(ts.orig),
|
|
||||||
len(ts.tgt), ts.tgt,
|
|
||||||
len(out), out,
|
|
||||||
ts,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRedact(t *testing.T) {
|
func TestRedact(t *testing.T) {
|
||||||
|
|
||||||
var out string
|
var out string
|
||||||
|
|||||||
4
timex/doc.go
Normal file
4
timex/doc.go
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
/*
|
||||||
|
Package timex provides some handy [time]-related functions.
|
||||||
|
*/
|
||||||
|
package timex
|
||||||
35
timex/funcs.go
Normal file
35
timex/funcs.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package timex
|
||||||
|
|
||||||
|
import (
|
||||||
|
`time`
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
F64Seconds returns [time.Time] `t` as a 64-bit float of <seconds>.<nanoseconds>
|
||||||
|
(where <nanoseconds> is the number of nanoseconds since <seconds>,
|
||||||
|
and <seconds> is the number of seconds since the UNIX epoch).
|
||||||
|
|
||||||
|
This can be used to represent a UNIX Epoch timestamp as seconds but with nanosecond precision.
|
||||||
|
*/
|
||||||
|
func F64Seconds(t time.Time) (f64 float64) {
|
||||||
|
return F64Nanoseconds(t) / float64(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
F64Milliseconds is like [F64Seconds] but with a millisecond integer.
|
||||||
|
*/
|
||||||
|
func F64Milliseconds(t time.Time) (f64 float64) {
|
||||||
|
return F64Nanoseconds(t) / float64(time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
F64Microseconds is like [F64Seconds] but with a microsecond integer.
|
||||||
|
*/
|
||||||
|
func F64Microseconds(t time.Time) (f64 float64) {
|
||||||
|
return F64Nanoseconds(t) / float64(time.Microsecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// F64Nanoseconds returns [time.Time.UnixNano] as a float64.
|
||||||
|
func F64Nanoseconds(t time.Time) (f64 float64) {
|
||||||
|
return float64(t.UnixNano())
|
||||||
|
}
|
||||||
30
timex/funcs_test.go
Normal file
30
timex/funcs_test.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package timex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
`time`
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestF64(t *testing.T) {
|
||||||
|
|
||||||
|
var tmNano float64 = 1766533329999999999
|
||||||
|
var tmSeconds float64 = 1766533329.999999999
|
||||||
|
var tmMilli float64 = 1766533329999.999999
|
||||||
|
var tmMicro float64 = 1766533329999999.999
|
||||||
|
// 2025-12-23 23:42:09.999999999 +0000 UTC
|
||||||
|
var tm time.Time = time.Unix(1766533329, int64(time.Second-1))
|
||||||
|
|
||||||
|
if F64Seconds(tm) != tmSeconds {
|
||||||
|
t.Fatalf("Failed seconds: %f != %f", F64Seconds(tm), tmSeconds)
|
||||||
|
}
|
||||||
|
if F64Milliseconds(tm) != tmMilli {
|
||||||
|
t.Fatalf("Failed milliseconds: %f != %f", F64Milliseconds(tm), tmMilli)
|
||||||
|
}
|
||||||
|
if F64Microseconds(tm) != tmMicro {
|
||||||
|
t.Fatalf("Failed microseconds: %f != %f", F64Microseconds(tm), tmMicro)
|
||||||
|
}
|
||||||
|
if F64Nanoseconds(tm) != tmNano {
|
||||||
|
t.Fatalf("Failed nanoseconds: %f != %f", F64Nanoseconds(tm), tmNano)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
6
tplx/consts.go
Normal file
6
tplx/consts.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
package tplx
|
||||||
|
|
||||||
|
const (
|
||||||
|
TplTypeText tplType = iota
|
||||||
|
TplTypeHtml
|
||||||
|
)
|
||||||
4
tplx/doc.go
Normal file
4
tplx/doc.go
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
/*
|
||||||
|
Package tplx provides some "shortcuts" to [text/template] and [html/template] rendering.
|
||||||
|
*/
|
||||||
|
package tplx
|
||||||
9
tplx/errs.go
Normal file
9
tplx/errs.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package tplx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`errors`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidTplType = errors.New("unknown/invalid template type")
|
||||||
|
)
|
||||||
235
tplx/funcs.go
Normal file
235
tplx/funcs.go
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
package tplx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`bytes`
|
||||||
|
htmlTpl `html/template`
|
||||||
|
txtTpl `text/template`
|
||||||
|
)
|
||||||
|
|
||||||
|
// MustTplStrToStr wraps [TplStrToStr] but will panic on a non-nil error instead of returning it.
|
||||||
|
func MustTplStrToStr(tplStr string, typ tplType, obj any) (s string) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if s, err = TplStrToStr(tplStr, typ, obj); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustTplToStr wraps [TplToStr] but will panic on error instead of returning it.
|
||||||
|
func MustTplToStr[T Template](tpl T, obj any) (s string) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if s, err = TplToStr(tpl, obj); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustTplToStrWith wraps [TplToStrWith] but will panic on error instead of returning it.
|
||||||
|
func MustTplToStrWith[T Template](tpl T, tplNm string, obj any) (s string) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if s, err = TplToStrWith(tpl, tplNm, obj); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
TplStrToStr takes in a template string, a template type (see i.e. [TplTypeText], [TplTypeHtml]),
|
||||||
|
and an object and renders to a string.
|
||||||
|
|
||||||
|
This is obviously quite inflexible - there's no way to provide a [text/template.FuncMap]/[html/template.FuncMap],
|
||||||
|
for instance, but if more advanced template features aren't needed then this might just do the trick.
|
||||||
|
|
||||||
|
If you need something more flexible, see [TplToStr] instead.
|
||||||
|
*/
|
||||||
|
func TplStrToStr(tplStr string, typ tplType, obj any) (out string, err error) {
|
||||||
|
|
||||||
|
var ttpl *txtTpl.Template
|
||||||
|
var htpl *htmlTpl.Template
|
||||||
|
var buf *bytes.Buffer = new(bytes.Buffer)
|
||||||
|
|
||||||
|
switch typ {
|
||||||
|
case TplTypeText:
|
||||||
|
if ttpl, err = txtTpl.New("").Parse(tplStr); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = ttpl.Execute(buf, obj); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case TplTypeHtml:
|
||||||
|
if htpl, err = htmlTpl.New("").Parse(tplStr); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = htpl.Execute(buf, obj); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = ErrInvalidTplType
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = buf.String()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
TplToStr takes in an [html/template] or [text/template] and an object and executes it.
|
||||||
|
|
||||||
|
PLEASE NOTE that it is expected that `tpl` has already had at least one template string `.Parse()`'d in.
|
||||||
|
|
||||||
|
If you haven't used generics in Golang yet, this function would be used via something like the following complete example
|
||||||
|
for both a [text/template.Template] (import-aliased as `txtT.Template`) and
|
||||||
|
an [html/template.Template] (import-aliased as `htmlT.Template`).
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
txtT "text/template"
|
||||||
|
htmlT "html/template"
|
||||||
|
|
||||||
|
`r00t2.io/goutils/tplx`
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
S struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tTpl *txtT.Template
|
||||||
|
hTpl *htmlT.Template
|
||||||
|
)
|
||||||
|
|
||||||
|
const tTplStr string = "Greetings, {{ .Name }}!\n"
|
||||||
|
const hTplStr string = `<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Hello, {{ .Name }}!</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>Hello, {{ .Name }}. Good to see you.</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
`
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var err error
|
||||||
|
var s string
|
||||||
|
var o *S
|
||||||
|
|
||||||
|
o = &S{
|
||||||
|
Name: "Bob",
|
||||||
|
}
|
||||||
|
|
||||||
|
// A text template.
|
||||||
|
if tTpl, err = txtT.
|
||||||
|
New("my_txt_template").
|
||||||
|
Parse(tTplStr); err != nil {
|
||||||
|
log.Panicf("Failed to parse text template string '%s': %v\n", tTplStr, err)
|
||||||
|
}
|
||||||
|
if s, err = tplx.TplToStr[*txtT.Template](tTpl, o); err != nil {
|
||||||
|
log.Panicf("Failed to render text template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
fmt.Println(s)
|
||||||
|
|
||||||
|
// An HTML template.
|
||||||
|
if hTpl, err = htmlT.
|
||||||
|
New("index.html").
|
||||||
|
Parse(hTplStr); err != nil {
|
||||||
|
log.Panicf("Failed to parse HTML template string '%s': %v\n", hTplStr, err)
|
||||||
|
}
|
||||||
|
if s, err = tplx.TplToStr[*htmlT.Template](hTpl, o); err != nil {
|
||||||
|
log.Panicf("Failed to render HTML template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
fmt.Println(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
Additionally, because this function uses a union type [Template],
|
||||||
|
you can even leave the type indicator off.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
// ...
|
||||||
|
if s, err = tplx.TplToStr(tTpl, o); err != nil {
|
||||||
|
log.Panicf("Failed to render text template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
if s, err = tplx.TplToStr(hTpl, o); err != nil {
|
||||||
|
log.Panicf("Failed to render HTML template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
|
||||||
|
However, this is not recommended for readability purposes - including
|
||||||
|
the type indicator indicates (heh heh) to others reading your code
|
||||||
|
what type `tTpl` and `hTpl` are without needing to cross-reference
|
||||||
|
their declaration/assignment/definition.
|
||||||
|
|
||||||
|
For more information on generics in Golang, see:
|
||||||
|
|
||||||
|
* The introductory [blog post]
|
||||||
|
* The official [tutorial]
|
||||||
|
* The syntax [reference doc]
|
||||||
|
* The (community-maintained/unofficial) [Go by Example: Generics]
|
||||||
|
|
||||||
|
[blog post]: https://go.dev/blog/intro-generics
|
||||||
|
[tutorial]: https://go.dev/doc/tutorial/generics
|
||||||
|
[reference doc]: https://go.dev/ref/spec#Instantiations
|
||||||
|
[Go by Example: Generics]: https://gobyexample.com/generics
|
||||||
|
*/
|
||||||
|
func TplToStr[T Template](tpl T, obj any) (out string, err error) {
|
||||||
|
|
||||||
|
var buf *bytes.Buffer = new(bytes.Buffer)
|
||||||
|
|
||||||
|
if err = tpl.Execute(buf, obj); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = buf.String()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
TplToStrWith functions the exact same as [TplToStr] but allows you to specify the
|
||||||
|
template entry point (template name) named `nm`.
|
||||||
|
|
||||||
|
For example (see [TplToStr] for a full example):
|
||||||
|
|
||||||
|
// ...
|
||||||
|
var tplNm string = "index.html"
|
||||||
|
|
||||||
|
if s, err = tplx.TplToStrWith(tTpl, tplNm, o); err != nil {
|
||||||
|
log.Panicf("Failed to render HTML template '%s' to string: %v\n", tplNm, err)
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
|
||||||
|
would call the equivalent of:
|
||||||
|
|
||||||
|
// ...
|
||||||
|
if err = tpl.ExecuteTemplate(<internal buffer>, tplNm, o); err != nil {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
func TplToStrWith[T Template](tpl T, tplNm string, obj any) (out string, err error) {
|
||||||
|
|
||||||
|
var buf *bytes.Buffer = new(bytes.Buffer)
|
||||||
|
|
||||||
|
if err = tpl.ExecuteTemplate(buf, tplNm, obj); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = buf.String()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
103
tplx/funcs_test.go
Normal file
103
tplx/funcs_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package tplx
|
||||||
|
|
||||||
|
import (
|
||||||
|
htmlT `html/template`
|
||||||
|
`log`
|
||||||
|
"testing"
|
||||||
|
txtT `text/template`
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
txtTplNm string = "my_txt_template"
|
||||||
|
htmlTplNm string = "index.html"
|
||||||
|
tgtTxt string = "Greetings, Bob!\n"
|
||||||
|
tgtHtml string = "<!DOCTYPE html>\n<html lang=\"en\">\n\t<head>\n\t\t<meta charset=\"utf-8\">\n\t\t<title>Hello, Bob!</title>\n\t</head>\n\t<body>\n\t\t<p>Hello, Bob. Good to see you.</p>\n\t</body>\n</html>\n"
|
||||||
|
tTplStr string = "Greetings, {{ .Name }}!\n"
|
||||||
|
hTplStr string = `<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Hello, {{ .Name }}!</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>Hello, {{ .Name }}. Good to see you.</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tTpl *txtT.Template = txtT.Must(txtT.New(txtTplNm).Parse(tTplStr))
|
||||||
|
hTpl *htmlT.Template = htmlT.Must(htmlT.New(htmlTplNm).Parse(hTplStr))
|
||||||
|
o struct{ Name string } = struct{ Name string }{
|
||||||
|
Name: "Bob",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTpl(t *testing.T) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var s string
|
||||||
|
|
||||||
|
// if s, err = TplToStr[*txtT.Template](tTpl, o); err != nil {
|
||||||
|
if s, err = TplToStr(tTpl, o); err != nil {
|
||||||
|
t.Fatalf("Failed to render text template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
t.Logf("Text template (%#v): '%s'", s, s)
|
||||||
|
if s != tgtTxt {
|
||||||
|
t.Fatalf("Mismatch on text template '%s'", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if s, err = TplToStr[*htmlT.Template](hTpl, o); err != nil {
|
||||||
|
if s, err = TplToStr(hTpl, o); err != nil {
|
||||||
|
log.Panicf("Failed to render HTML template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
t.Logf("HTML template (%#v):\n%s", s, s)
|
||||||
|
if s != tgtHtml {
|
||||||
|
t.Fatalf("Mismatch on HTML template '%s'", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTplStr(t *testing.T) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var s string
|
||||||
|
|
||||||
|
if s, err = TplStrToStr(tTplStr, TplTypeText, o); err != nil {
|
||||||
|
t.Fatalf("Failed to render text template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
t.Logf("Text template (%#v): '%s'", s, s)
|
||||||
|
if s != tgtTxt {
|
||||||
|
t.Fatalf("Mismatch on text template '%s'", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s, err = TplStrToStr(hTplStr, TplTypeHtml, o); err != nil {
|
||||||
|
log.Panicf("Failed to render HTML template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
t.Logf("HTML template (%#v):\n%s", s, s)
|
||||||
|
if s != tgtHtml {
|
||||||
|
t.Fatalf("Mismatch on HTML template '%s'", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTplWith(t *testing.T) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var s string
|
||||||
|
|
||||||
|
if s, err = TplToStrWith(tTpl, txtTplNm, o); err != nil {
|
||||||
|
t.Fatalf("Failed to render text template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
t.Logf("Text template (%#v): '%s'", s, s)
|
||||||
|
if s != tgtTxt {
|
||||||
|
t.Fatalf("Mismatch on text template '%s'", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s, err = TplToStrWith(hTpl, htmlTplNm, o); err != nil {
|
||||||
|
log.Panicf("Failed to render HTML template to string: %v\n", err)
|
||||||
|
}
|
||||||
|
t.Logf("HTML template (%#v):\n%s", s, s)
|
||||||
|
if s != tgtHtml {
|
||||||
|
t.Fatalf("Mismatch on HTML template '%s'", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
631
tplx/sprigx/README.adoc
Normal file
631
tplx/sprigx/README.adoc
Normal file
@@ -0,0 +1,631 @@
|
|||||||
|
= SprigX
|
||||||
|
Brent Saner <bts@square-r00t.net>
|
||||||
|
Last rendered {localdatetime}
|
||||||
|
:doctype: book
|
||||||
|
:docinfo: shared
|
||||||
|
:data-uri:
|
||||||
|
:imagesdir: images
|
||||||
|
:sectlinks:
|
||||||
|
:sectnums:
|
||||||
|
:sectnumlevels: 7
|
||||||
|
:toc: preamble
|
||||||
|
:toc2: left
|
||||||
|
:idprefix:
|
||||||
|
:toclevels: 7
|
||||||
|
:source-highlighter: rouge
|
||||||
|
:docinfo: shared
|
||||||
|
|
||||||
|
[id="wat"]
|
||||||
|
== What is SprigX?
|
||||||
|
SprigX are extensions to https://masterminds.github.io/sprig/[the `sprig` library^] (https://pkg.go.dev/github.com/Masterminds/sprig/v3[Go docs^]).
|
||||||
|
|
||||||
|
They provide functions that offer more enriched use cases and domain-specific data.
|
||||||
|
|
||||||
|
[id="use"]
|
||||||
|
== How do I Use SprigX?
|
||||||
|
|
||||||
|
[%collapsible]
|
||||||
|
.The same way you would `sprig`!
|
||||||
|
====
|
||||||
|
[source,go]
|
||||||
|
----
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
htmlTplLib "html/template"
|
||||||
|
txtTplLib "text/template"
|
||||||
|
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
txtTpl *txtTplLib.Template = txtTplLib.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprigx.TxtFuncMap(),
|
||||||
|
)
|
||||||
|
htmlTpl *htmlTplLib.Template = htmlTplLib.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprigx.HtmlFuncMap(),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
----
|
||||||
|
====
|
||||||
|
|
||||||
|
[%collapsible]
|
||||||
|
.They can even be combined/used together.
|
||||||
|
====
|
||||||
|
[source,go]
|
||||||
|
----
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var txtTpl *template.Template = template.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprigx.TxtFuncMap(),
|
||||||
|
).
|
||||||
|
Funcs(
|
||||||
|
sprig.TxtFuncMap(),
|
||||||
|
)
|
||||||
|
// Or:
|
||||||
|
/*
|
||||||
|
var txtTpl *template.Template = template.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprig.TxtFuncMap(),
|
||||||
|
).
|
||||||
|
Funcs(
|
||||||
|
sprigx.TxtFuncMap(),
|
||||||
|
)
|
||||||
|
*/
|
||||||
|
----
|
||||||
|
====
|
||||||
|
|
||||||
|
If a `<template>.FuncMap` is added via `.Funcs()` *after* template parsing, it will override any functions of the same name of a `<template>.FuncMap` *before* parsing.
|
||||||
|
|
||||||
|
For example, if both `sprig` and `sprigx` provide a function `foo`:
|
||||||
|
|
||||||
|
[%collapsible]
|
||||||
|
.this will use `foo` from `sprigx`
|
||||||
|
====
|
||||||
|
[source,go]
|
||||||
|
----
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
myTpl string = `{{ "This is an example template string." | foo }}`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tpl *template.Template = template.Must(
|
||||||
|
template.
|
||||||
|
New("").
|
||||||
|
Funcs(sprig.TxtFuncMap()).
|
||||||
|
Parse(myTpl),
|
||||||
|
).
|
||||||
|
Funcs(sprigx.TxtFuncMap())
|
||||||
|
)
|
||||||
|
----
|
||||||
|
====
|
||||||
|
|
||||||
|
whereas
|
||||||
|
|
||||||
|
[%collapsible]
|
||||||
|
.this will use `foo` from `sprig`
|
||||||
|
====
|
||||||
|
[source,go]
|
||||||
|
----
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
myTpl string = `{{ "This is an example template string." | foo }}`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tpl *template.Template = template.Must(
|
||||||
|
template.
|
||||||
|
New("").
|
||||||
|
Funcs(sprigx.TxtFuncMap()).
|
||||||
|
Parse(myTpl),
|
||||||
|
).
|
||||||
|
Funcs(sprig.TxtFuncMap())
|
||||||
|
)
|
||||||
|
----
|
||||||
|
====
|
||||||
|
|
||||||
|
and a function can even be
|
||||||
|
|
||||||
|
[%collapsible]
|
||||||
|
.explicitly overridden.
|
||||||
|
====
|
||||||
|
This would override a function `foo` and `foo2` in `sprigx` from `foo` and `foo2` from `sprig`, but leave all other `sprig` functions untouched.
|
||||||
|
|
||||||
|
[source,go]
|
||||||
|
----
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
myTpl string = `{{ "This is an example template string." | foo }}`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
overrideFuncs template.FuncMap = sprig.TxtFuncMap()
|
||||||
|
tpl *template.Template = template.Must(
|
||||||
|
template.
|
||||||
|
New("").
|
||||||
|
Funcs(sprigx.TxtFuncMap()).
|
||||||
|
Parse(myTpl),
|
||||||
|
).
|
||||||
|
Funcs(
|
||||||
|
template.FuncMap(
|
||||||
|
map[string]any{
|
||||||
|
"foo": overrideFuncs["foo"],
|
||||||
|
"foo2": overrideFuncs["foo2"],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
----
|
||||||
|
====
|
||||||
|
|
||||||
|
[id="fn"]
|
||||||
|
== Functions
|
||||||
|
Expect this list to grow over time, and potentially more frequently than the `sprigx` functions.
|
||||||
|
|
||||||
|
[id="fn_sys"]
|
||||||
|
=== System/OS/Platform
|
||||||
|
|
||||||
|
[id="fn_sys_arch"]
|
||||||
|
==== `sysArch`
|
||||||
|
Returns the https://pkg.go.dev/runtime#GOARCH[`runtime.GOARCH`^] constant.
|
||||||
|
|
||||||
|
[id="fn_sys_numcpu"]
|
||||||
|
==== `sysNumCpu`
|
||||||
|
Returns the value from https://pkg.go.dev/runtime#NumCPU[`runtime.NumCPU`^].
|
||||||
|
|
||||||
|
[id="fn_sys_os"]
|
||||||
|
==== `sysOsName`
|
||||||
|
Returns the https://pkg.go.dev/runtime#GOOS[`runtime.GOOS`^] constant.
|
||||||
|
|
||||||
|
[id="fn_sys_rntm"]
|
||||||
|
==== `sysRuntime`
|
||||||
|
This function returns a `map[string]string` of various information from the https://pkg.go.dev/runtime[`runtime` stdlib library^].
|
||||||
|
|
||||||
|
Specifically, the following are returned.
|
||||||
|
|
||||||
|
[TIP]
|
||||||
|
====
|
||||||
|
The value type is a direct link to the `runtime` documentation providing more detail about the associated value.
|
||||||
|
|
||||||
|
Because all values are mapped as strings, they can be converted back to their native type via e.g. the https://masterminds.github.io/sprig/conversion.html[Sprig conversion functions^] if necessary.
|
||||||
|
====
|
||||||
|
|
||||||
|
.`sysRuntime` Values
|
||||||
|
[cols="^.^3m,^.^3",options="header"]
|
||||||
|
|===
|
||||||
|
| Key | Value Type
|
||||||
|
|
||||||
|
| compiler | https://pkg.go.dev/runtime#Compiler[string^]
|
||||||
|
| arch | https://pkg.go.dev/runtime#GOARCH[string^]
|
||||||
|
| os | https://pkg.go.dev/runtime#GOOS[string^]
|
||||||
|
| maxprocs | https://pkg.go.dev/runtime#GOMAXPROCS[int^] footnote:[For safety concerns, `sprigx` does not allow *setting* `GOMAXPROCS`, this value only contains the *current* `GOMAXPROCS` value.]
|
||||||
|
| cpu_cnt | https://pkg.go.dev/runtime#NumCPU[int^]
|
||||||
|
| num_cgo | https://pkg.go.dev/runtime#NumCgoCall[int^]
|
||||||
|
| num_go | https://pkg.go.dev/runtime#NumGoroutine[int^]
|
||||||
|
| go_ver | https://pkg.go.dev/runtime#Version[string^]
|
||||||
|
|===
|
||||||
|
|
||||||
|
As a convenience, some of these values also have their own dedicated functions as well:
|
||||||
|
|
||||||
|
* <<fn_sys_arch>>
|
||||||
|
* <<fn_sys_numcpu>>
|
||||||
|
* <<fn_sys_os>>
|
||||||
|
|
||||||
|
[id="fn_path"]
|
||||||
|
=== Paths
|
||||||
|
|
||||||
|
[id="fn_path_gnrc"]
|
||||||
|
==== Generic
|
||||||
|
These operate similar to https://pkg.go.dev/path[the `path` stdlib library^] and use a fixed `/` path separator.
|
||||||
|
|
||||||
|
[id="fn_path_gnrc_pj"]
|
||||||
|
===== `pathJoin`
|
||||||
|
`pathJoin` operates *exactly* like https://pkg.go.dev/path#Join[`path.Join`^] in stdlib.
|
||||||
|
|
||||||
|
[WARNING]
|
||||||
|
====
|
||||||
|
If you are joining paths in a pipeline, you almost assuredly want <<fn_path_gnrc_ppj>> or <<fn_path_gnrc_pspj>> instead unless you are explicitly *appending* a pipeline result to a path.
|
||||||
|
====
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- pathJoin "a" "b" "c" }}
|
||||||
|
{{- pathJoin "/" "a" "b" "c" }}
|
||||||
|
{{- pathJoin "/a/b" "c" }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[source,text]
|
||||||
|
----
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|
||||||
|
[id="fn_path_gnrc_ppj"]
|
||||||
|
===== `pathPipeJoin`
|
||||||
|
`pathPipeJoin` operates like <<fn_path_gnrc_pj>> with one deviation: the root/base path is expected to be *last* in the arguments.
|
||||||
|
|
||||||
|
This makes it much more suitable for use in template pipelines, as the previous value in a pipeline is passed in as the last element to the next pipe function.
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- pathPipeJoin "b" "c" "a" }}
|
||||||
|
{{- pathPipeJoin "a" "b" "c" "/" }}
|
||||||
|
{{- $myBase | pathPipeJoin "b" "c" }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[source,text]
|
||||||
|
----
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|
||||||
|
[id="fn_path_gnrc_psj"]
|
||||||
|
===== `pathSliceJoin`
|
||||||
|
`pathSliceJoin` joins a slice of path segment strings (`[]string`) instead of a variadic sequence of strings.
|
||||||
|
|
||||||
|
[TIP]
|
||||||
|
====
|
||||||
|
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
|
||||||
|
====
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- $myList := "a,b,c" | splitList "," -}}
|
||||||
|
{{- $myList | pathSliceJoin }}
|
||||||
|
{{- ("a,b,c" | splitList ",") | pathSliceJoin }}
|
||||||
|
{{- ("/,a,b,c" | splitList ",") | pathSliceJoin }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[source,text]
|
||||||
|
----
|
||||||
|
a/b/c
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|
||||||
|
[id="fn_path_gnrc_pspj"]
|
||||||
|
===== `pathSlicePipeJoin`
|
||||||
|
`pathSlicePipeJoin` operates like <<fn_path_gnrc_ppj>> in that it is suitable for pipeline use in which the root/base path is passed in from the pipeline, but it is like <<fn_path_gnrc_psj>> in that it then also accepts a slice of path segments (`[]string`) to append to that base path.
|
||||||
|
|
||||||
|
[TIP]
|
||||||
|
====
|
||||||
|
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
|
||||||
|
====
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- $myList := "b,c,d" | splitList "." -}}
|
||||||
|
{{- pathSlicePipeJoin $myList $myBase }}
|
||||||
|
{{- $myBase | pathSlicePipeJoin $myList }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[source,text]
|
||||||
|
----
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|
||||||
|
[id="fn_path_gnrc_psubj"]
|
||||||
|
===== `pathSubJoin`
|
||||||
|
`pathSubJoin` operates like <<fn_path_gnrc_pj>> but it expects an explicit root/base path.
|
||||||
|
|
||||||
|
The pipeline-friendly equivalent of this is <<fn_path_gnrc_ppj>>.
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- pathSubJoin "/a/b" "c" }}
|
||||||
|
{{- pathSubJoin "/" "a" "b" "c" }}
|
||||||
|
{{- "c" | pathSubJoin "/" "a" "b" }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[source,text]
|
||||||
|
----
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|
||||||
|
[id="fn_path_os"]
|
||||||
|
==== OS/Platform-Tailored
|
||||||
|
These operate similar to https://pkg.go.dev/path/filepath[the `path/filepath` stdlib library^], and use the OS-specific https://pkg.go.dev/os#PathSeparator[`os.PathSeparator`^].
|
||||||
|
|
||||||
|
[WARNING]
|
||||||
|
====
|
||||||
|
Take special note of the oddness around specifying Windows paths and drive letters in e.g. <<fn_path_os_pj>>!
|
||||||
|
|
||||||
|
It is recommended to make use of <<fn_sys_os>> to conditionally format path bases/roots if needed.
|
||||||
|
====
|
||||||
|
|
||||||
|
[id="fn_path_os_pj"]
|
||||||
|
===== `osPathJoin`
|
||||||
|
`osPathJoin` operates *exactly* like https://pkg.go.dev/path/filepath#Join[`path/filepath.Join`^] in stdlib.
|
||||||
|
|
||||||
|
[WARNING]
|
||||||
|
====
|
||||||
|
If you are joining paths in a pipeline, you almost assuredly want <<fn_path_os_ppj>> or <<fn_path_os_pspj>> instead unless you are explicitly *appending* a pipeline result to a path.
|
||||||
|
====
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- osPathJoin "a" "b" "c" }}
|
||||||
|
{{- osPathJoin "/" "a" "b" "c" }}
|
||||||
|
{{- osPathJoin "C:\\" "a" "b" "c" }}
|
||||||
|
{{- osPathJoin "C:" "a" "b" "c" }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[cols="^.^2,.^4a",options="header"]
|
||||||
|
|===
|
||||||
|
| OS ^| Result
|
||||||
|
|
||||||
|
| Windows | [source,text]
|
||||||
|
----
|
||||||
|
a\b\c
|
||||||
|
\a\b\c
|
||||||
|
\a\b\c
|
||||||
|
C:\a\b\c
|
||||||
|
C:a\b\c
|
||||||
|
----
|
||||||
|
| Others (e.g. Linux, macOS) | [source,text]
|
||||||
|
----
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
C:\/a/b/c
|
||||||
|
C:/a/b/c
|
||||||
|
----
|
||||||
|
|===
|
||||||
|
|
||||||
|
[id="fn_path_os_ppj"]
|
||||||
|
===== `osPathPipeJoin`
|
||||||
|
`osPathPipeJoin` operates like <<fn_path_gnrc_ppj>> (except using OS-specific path separators).
|
||||||
|
|
||||||
|
This makes it much more suitable for use in template pipelines, as the previous value in a pipeline is passed in as the last element to the next pipe function.
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- osPathPipeJoin "b" "c" "a" }}
|
||||||
|
{{- osPathPipeJoin "a" "b" "c" "/" }}
|
||||||
|
{{- $myBase | osPathPipeJoin "b" "c" }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[cols="^.^2,.^4a",options="header"]
|
||||||
|
|===
|
||||||
|
| OS ^| Result
|
||||||
|
|
||||||
|
| Windows | [source,text]
|
||||||
|
----
|
||||||
|
a\b\c
|
||||||
|
\a\b\c
|
||||||
|
\a\b\c
|
||||||
|
----
|
||||||
|
| Others (e.g. Linux, macOS) | [source,text]
|
||||||
|
----
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|===
|
||||||
|
|
||||||
|
[id="fn_path_ossep"]
|
||||||
|
===== `osPathSep`
|
||||||
|
`osPathSep` returns the https://pkg.go.dev/os#PathSeparator[`os.PathSeparator`^] for this OS.
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- osPathSep }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[cols="^.^2,.^4a",options="header"]
|
||||||
|
|===
|
||||||
|
| OS ^| Result
|
||||||
|
|
||||||
|
| Windows | [source,text]
|
||||||
|
----
|
||||||
|
\
|
||||||
|
----
|
||||||
|
| Others (e.g. Linux, macOS) | [source,text]
|
||||||
|
----
|
||||||
|
/
|
||||||
|
----
|
||||||
|
|===
|
||||||
|
|
||||||
|
[id="fn_path_os_psj"]
|
||||||
|
===== `osPathSliceJoin`
|
||||||
|
`osPathSliceJoin` operates like <<fn_path_gnrc_psj>> but with OS-specific path separators.
|
||||||
|
|
||||||
|
[TIP]
|
||||||
|
====
|
||||||
|
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
|
||||||
|
====
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- $myList := "a,b,c" | splitList "," -}}
|
||||||
|
{{- $myList | osPathSliceJoin }}
|
||||||
|
{{- ("a,b,c" | splitList ",") | osPathSliceJoin }}
|
||||||
|
{{- ("/,a,b,c" | splitList ",") | osPathSliceJoin }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[cols="^.^2,.^4a",options="header"]
|
||||||
|
|===
|
||||||
|
| OS ^| Result
|
||||||
|
|
||||||
|
| Windows | [source,text]
|
||||||
|
----
|
||||||
|
a\b\c
|
||||||
|
a\b\c
|
||||||
|
\a\b\c
|
||||||
|
----
|
||||||
|
| Others (e.g. Linux, macOS) | [source,text]
|
||||||
|
----
|
||||||
|
a/b/c
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|===
|
||||||
|
|
||||||
|
[id="fn_path_os_pspj"]
|
||||||
|
===== `osPathSlicePipeJoin`
|
||||||
|
`osPathSlicePipeJoin` operates like <<fn_path_gnrc_pspj>> but with OS-specific separators.
|
||||||
|
|
||||||
|
[TIP]
|
||||||
|
====
|
||||||
|
The `splitList` function shown below is from the https://masterminds.github.io/sprig/string_slice.html[`sprig` string slice functions^].
|
||||||
|
====
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- $myList := "b,c,d" | splitList "." -}}
|
||||||
|
{{- osPathSlicePipeJoin $myList $myBase }}
|
||||||
|
{{- $myBase | osPathSlicePipeJoin $myList }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[cols="^.^2,.^4a",options="header"]
|
||||||
|
|===
|
||||||
|
| OS ^| Result
|
||||||
|
|
||||||
|
| Windows | [source,text]
|
||||||
|
----
|
||||||
|
\a\b\c\d
|
||||||
|
\a\b\c\d
|
||||||
|
----
|
||||||
|
| Others (e.g. Linux, macOS) | [source,text]
|
||||||
|
----
|
||||||
|
/a/b/c/d
|
||||||
|
/a/b/c/d
|
||||||
|
----
|
||||||
|
|===
|
||||||
|
|
||||||
|
[id="fn_path_os_psubj"]
|
||||||
|
===== `osPathSubJoin`
|
||||||
|
`osPathSubJoin` operates like <<fn_path_gnrc_psubj>> but with OS-specific separators.
|
||||||
|
|
||||||
|
The pipeline-friendly equivalent of this is <<fn_path_os_ppj>>.
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{- osPathSubJoin "/a/b" "c" }}
|
||||||
|
{{- osPathSubJoin "/" "a" "b" "c" }}
|
||||||
|
{{- "c" | osPathSubJoin "/" "a" "b" }}
|
||||||
|
----
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
[cols="^.^2,.^4a",options="header"]
|
||||||
|
|===
|
||||||
|
| OS ^| Result
|
||||||
|
|
||||||
|
| Windows | [source,text]
|
||||||
|
----
|
||||||
|
\a\b\c
|
||||||
|
\a\b\c
|
||||||
|
\a\b\c
|
||||||
|
----
|
||||||
|
| Others (e.g. Linux, macOS) | [source,text]
|
||||||
|
----
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
----
|
||||||
|
|===
|
||||||
|
|
||||||
|
[id="fn_str"]
|
||||||
|
=== Strings
|
||||||
|
|
||||||
|
[id="fn_str_extindent"]
|
||||||
|
==== `extIndent`
|
||||||
|
`extIndent` allows for a MUCH more flexible indenter than the `sprig` `indent` function.
|
||||||
|
|
||||||
|
It works with both Windows (`\r\n`) and POSIX (`\n`) linebreaks.
|
||||||
|
|
||||||
|
[TIP]
|
||||||
|
====
|
||||||
|
If `<indentString>` is set to `\n` and `<levels>` is always set to `1`, this function can even be used to doubelspace text!
|
||||||
|
====
|
||||||
|
|
||||||
|
It has quite a few arguments, however:
|
||||||
|
|
||||||
|
[source,gotemplate]
|
||||||
|
----
|
||||||
|
{{ extIndent <levels> <skipFirst> <skipEmpty> <skipWhitespace> <indentString> <input> }}
|
||||||
|
----
|
||||||
|
|
||||||
|
Where:
|
||||||
|
|
||||||
|
* `<levels>`: The level of indentation for the text. If less than or equal to `0`, `extIndent` just returns `<input>` as-is and NO-OPs otherwise.
|
||||||
|
* `<skipFirst>`: If true, skip indenting the first line. This is particularly handy if you like to visually align your function calls in your templates.
|
||||||
|
* `<skipEmpty>`: If true, do not add an indent to *empty* lines (where an "empty line" means "only has a linebreak").
|
||||||
|
* `<skipWhitespace>`: If true, do not add an indent to lines that *only* consist of whitespace (spaces, tabs, etc.) and a linebreak.
|
||||||
|
* `<indentString>`: The string to use as the "indent character". This can be any string, such as `" "`, `"\t"`, `"."`, `"|"`, `"=="` etc.
|
||||||
|
* `<input>`: The text to be indented. Because it is the last argument, `extIndent` works with pipelined text as well.
|
||||||
|
|
||||||
|
[id="fn_dbg"]
|
||||||
|
=== Debugging
|
||||||
|
|
||||||
|
[id="fn_dbg_dump"]
|
||||||
|
==== `dump`
|
||||||
|
The `dump` function calls https://pkg.go.dev/github.com/davecgh/go-spew/spew#Sdump[the `Sdump` function^] from https://github.com/davecgh/go-spew[`go-spew`] (https://pkg.go.dev/github.com/davecgh/go-spew/spew[`github.com/davecgh/go-spew/spew`^]) for whatever object(s) is/are passed to it.
|
||||||
1532
tplx/sprigx/README.html
Normal file
1532
tplx/sprigx/README.html
Normal file
File diff suppressed because it is too large
Load Diff
656
tplx/sprigx/README.md
Normal file
656
tplx/sprigx/README.md
Normal file
@@ -0,0 +1,656 @@
|
|||||||
|
# What is SprigX?
|
||||||
|
|
||||||
|
SprigX are extensions to [the `sprig`
|
||||||
|
library](https://masterminds.github.io/sprig/) ([Go
|
||||||
|
docs](https://pkg.go.dev/github.com/Masterminds/sprig/v3)).
|
||||||
|
|
||||||
|
They provide functions that offer more enriched use cases and
|
||||||
|
domain-specific data.
|
||||||
|
|
||||||
|
# How do I Use SprigX?
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
htmlTplLib "html/template"
|
||||||
|
txtTplLib "text/template"
|
||||||
|
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
txtTpl *txtTplLib.Template = txtTplLib.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprigx.TxtFuncMap(),
|
||||||
|
)
|
||||||
|
htmlTpl *htmlTplLib.Template = htmlTplLib.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprigx.HtmlFuncMap(),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var txtTpl *template.Template = template.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprigx.TxtFuncMap(),
|
||||||
|
).
|
||||||
|
Funcs(
|
||||||
|
sprig.TxtFuncMap(),
|
||||||
|
)
|
||||||
|
// Or:
|
||||||
|
/*
|
||||||
|
var txtTpl *template.Template = template.
|
||||||
|
New("").
|
||||||
|
Funcs(
|
||||||
|
sprig.TxtFuncMap(),
|
||||||
|
).
|
||||||
|
Funcs(
|
||||||
|
sprigx.TxtFuncMap(),
|
||||||
|
)
|
||||||
|
*/
|
||||||
|
|
||||||
|
If a `<template>.FuncMap` is added via `.Funcs()` **after** template
|
||||||
|
parsing, it will override any functions of the same name of a
|
||||||
|
`<template>.FuncMap` **before** parsing.
|
||||||
|
|
||||||
|
For example, if both `sprig` and `sprigx` provide a function `foo`:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
myTpl string = `{{ "This is an example template string." | foo }}`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tpl *template.Template = template.Must(
|
||||||
|
template.
|
||||||
|
New("").
|
||||||
|
Funcs(sprig.TxtFuncMap()).
|
||||||
|
Parse(myTpl),
|
||||||
|
).
|
||||||
|
Funcs(sprigx.TxtFuncMap())
|
||||||
|
)
|
||||||
|
|
||||||
|
whereas
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
myTpl string = `{{ "This is an example template string." | foo }}`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tpl *template.Template = template.Must(
|
||||||
|
template.
|
||||||
|
New("").
|
||||||
|
Funcs(sprigx.TxtFuncMap()).
|
||||||
|
Parse(myTpl),
|
||||||
|
).
|
||||||
|
Funcs(sprig.TxtFuncMap())
|
||||||
|
)
|
||||||
|
|
||||||
|
and a function can even be
|
||||||
|
|
||||||
|
This would override a function `foo` and `foo2` in `sprigx` from `foo`
|
||||||
|
and `foo2` from `sprig`, but leave all other `sprig` functions
|
||||||
|
untouched.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
"r00t2.io/goutils/tplx/sprigx"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
myTpl string = `{{ "This is an example template string." | foo }}`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
overrideFuncs template.FuncMap = sprig.TxtFuncMap()
|
||||||
|
tpl *template.Template = template.Must(
|
||||||
|
template.
|
||||||
|
New("").
|
||||||
|
Funcs(sprigx.TxtFuncMap()).
|
||||||
|
Parse(myTpl),
|
||||||
|
).
|
||||||
|
Funcs(
|
||||||
|
template.FuncMap(
|
||||||
|
map[string]any{
|
||||||
|
"foo": overrideFuncs["foo"],
|
||||||
|
"foo2": overrideFuncs["foo2"],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Functions
|
||||||
|
|
||||||
|
Expect this list to grow over time, and potentially more frequently than
|
||||||
|
the `sprigx` functions.
|
||||||
|
|
||||||
|
## System/OS/Platform
|
||||||
|
|
||||||
|
### `sysArch`
|
||||||
|
|
||||||
|
Returns the [`runtime.GOARCH`](https://pkg.go.dev/runtime#GOARCH)
|
||||||
|
constant.
|
||||||
|
|
||||||
|
### `sysNumCpu`
|
||||||
|
|
||||||
|
Returns the value from
|
||||||
|
[`runtime.NumCPU`](https://pkg.go.dev/runtime#NumCPU).
|
||||||
|
|
||||||
|
### `sysOsName`
|
||||||
|
|
||||||
|
Returns the [`runtime.GOOS`](https://pkg.go.dev/runtime#GOOS) constant.
|
||||||
|
|
||||||
|
### `sysRuntime`
|
||||||
|
|
||||||
|
This function returns a `map[string]string` of various information from
|
||||||
|
the [`runtime` stdlib library](https://pkg.go.dev/runtime).
|
||||||
|
|
||||||
|
Specifically, the following are returned.
|
||||||
|
|
||||||
|
The value type is a direct link to the `runtime` documentation providing
|
||||||
|
more detail about the associated value.
|
||||||
|
|
||||||
|
Because all values are mapped as strings, they can be converted back to
|
||||||
|
their native type via e.g. the [Sprig conversion
|
||||||
|
functions](https://masterminds.github.io/sprig/conversion.html) if
|
||||||
|
necessary.
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<caption><code>sysRuntime</code> Values</caption>
|
||||||
|
<colgroup>
|
||||||
|
<col style="width: 50%" />
|
||||||
|
<col style="width: 50%" />
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th style="text-align: center;">Key</th>
|
||||||
|
<th style="text-align: center;">Value Type</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>compiler</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#Compiler">string</a></p></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>arch</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#GOARCH">string</a></p></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>os</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#GOOS">string</a></p></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>maxprocs</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#GOMAXPROCS">int</a> <a href="#fn1"
|
||||||
|
class="footnote-ref" id="fnref1"
|
||||||
|
role="doc-noteref"><sup>1</sup></a></p></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>cpu_cnt</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#NumCPU">int</a></p></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>num_cgo</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#NumCgoCall">int</a></p></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>num_go</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#NumGoroutine">int</a></p></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p><code>go_ver</code></p></td>
|
||||||
|
<td style="text-align: center;"><p><a
|
||||||
|
href="https://pkg.go.dev/runtime#Version">string</a></p></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
<section id="footnotes" class="footnotes footnotes-end-of-document"
|
||||||
|
role="doc-endnotes">
|
||||||
|
<hr />
|
||||||
|
<ol>
|
||||||
|
<li id="fn1"><p>For safety concerns, <code>sprigx</code> does not allow
|
||||||
|
<strong>setting</strong> <code>GOMAXPROCS</code>, this value only
|
||||||
|
contains the <strong>current</strong> <code>GOMAXPROCS</code> value.<a
|
||||||
|
href="#fnref1" class="footnote-back" role="doc-backlink">↩︎</a></p></li>
|
||||||
|
</ol>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
As a convenience, some of these values also have their own dedicated
|
||||||
|
functions as well:
|
||||||
|
|
||||||
|
- [](#fn_sys_arch)
|
||||||
|
|
||||||
|
- [](#fn_sys_numcpu)
|
||||||
|
|
||||||
|
- [](#fn_sys_os)
|
||||||
|
|
||||||
|
## Paths
|
||||||
|
|
||||||
|
### Generic
|
||||||
|
|
||||||
|
These operate similar to [the `path` stdlib
|
||||||
|
library](https://pkg.go.dev/path) and use a fixed `/` path separator.
|
||||||
|
|
||||||
|
#### `pathJoin`
|
||||||
|
|
||||||
|
`pathJoin` operates **exactly** like
|
||||||
|
[`path.Join`](https://pkg.go.dev/path#Join) in stdlib.
|
||||||
|
|
||||||
|
If you are joining paths in a pipeline, you almost assuredly want
|
||||||
|
[](#fn_path_gnrc_ppj) or [](#fn_path_gnrc_pspj) instead.
|
||||||
|
|
||||||
|
{{- pathJoin "a" "b" "c" }}
|
||||||
|
{{- pathJoin "/" "a" "b" "c" }}
|
||||||
|
{{- pathJoin "/a/b" "c" }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
|
||||||
|
#### `pathPipeJoin`
|
||||||
|
|
||||||
|
`pathPipeJoin` operates like [](#fn_path_gnrc_pj) with one deviation:
|
||||||
|
the root/base path is expected to be **last** in the arguments.
|
||||||
|
|
||||||
|
This makes it much more suitable for use in template pipelines, as the
|
||||||
|
previous value in a pipeline is passed in as the last element to the
|
||||||
|
next pipe function.
|
||||||
|
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- pathPipeJoin "b" "c" "a" }}
|
||||||
|
{{- pathPipeJoin "a" "b" "c" "/" }}
|
||||||
|
{{- $myBase | pathPipeJoin "b" "c" }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
|
||||||
|
#### `pathSliceJoin`
|
||||||
|
|
||||||
|
`pathSliceJoin` joins a slice of path segment strings (`[]string`)
|
||||||
|
instead of a variadic sequence of strings.
|
||||||
|
|
||||||
|
The `splitList` function shown below is from the [`sprig` string slice
|
||||||
|
functions](https://masterminds.github.io/sprig/string_slice.html).
|
||||||
|
|
||||||
|
{{- $myList := "a,b,c" | splitList "," -}}
|
||||||
|
{{- $myList | pathSliceJoin }}
|
||||||
|
{{- ("a,b,c" | splitList ",") | pathSliceJoin }}
|
||||||
|
{{- ("/,a,b,c" | splitList ",") | pathSliceJoin }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
a/b/c
|
||||||
|
a/b/c
|
||||||
|
/a/b/c
|
||||||
|
|
||||||
|
#### `pathSlicePipeJoin`
|
||||||
|
|
||||||
|
`pathSlicePipeJoin` operates like [](#fn_path_gnrc_ppj) in that it is
|
||||||
|
suitable for pipeline use in which the root/base path is passed in from
|
||||||
|
the pipeline, but it is like [](#fn_path_gnrc_psj) in that it then also
|
||||||
|
accepts a slice of path segments (`[]string`) to append to that base
|
||||||
|
path.
|
||||||
|
|
||||||
|
The `splitList` function shown below is from the [`sprig` string slice
|
||||||
|
functions](https://masterminds.github.io/sprig/string_slice.html).
|
||||||
|
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- $myList := "b,c,d" | splitList "." -}}
|
||||||
|
{{- pathSlicePipeJoin $myList $myBase }}
|
||||||
|
{{- $myBase | pathSlicePipeJoin $myList }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
|
||||||
|
#### `pathSubJoin`
|
||||||
|
|
||||||
|
`pathSubJoin` operates like [](#fn_path_gnrc_pj) but it expects an
|
||||||
|
explicit root/base path.
|
||||||
|
|
||||||
|
The pipeline-friendly equivalent of this is [](#fn_path_gnrc_ppj).
|
||||||
|
|
||||||
|
{{- pathSubJoin "/a/b" "c" }}
|
||||||
|
{{- pathSubJoin "/" "a" "b" "c" }}
|
||||||
|
{{- "c" | pathSubJoin "/" "a" "b" }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
|
||||||
|
### OS/Platform-Tailored
|
||||||
|
|
||||||
|
These operate similar to [the `path/filepath` stdlib
|
||||||
|
library](https://pkg.go.dev/path/filepath), and use the OS-specific
|
||||||
|
[`os.PathSeparator`](https://pkg.go.dev/os#PathSeparator).
|
||||||
|
|
||||||
|
Take special note of the oddness around specifying Windows paths and
|
||||||
|
drive letters in e.g. [](#fn_path_os_pj)!
|
||||||
|
|
||||||
|
It is recommended to make use of [](#fn_sys_os) to conditionally format
|
||||||
|
path bases/roots if needed.
|
||||||
|
|
||||||
|
#### `osPathJoin`
|
||||||
|
|
||||||
|
`osPathJoin` operates **exactly** like
|
||||||
|
[`path/filepath.Join`](https://pkg.go.dev/path/filepath#Join) in stdlib.
|
||||||
|
|
||||||
|
If you are joining paths in a pipeline, you almost assuredly want
|
||||||
|
[](#fn_path_os_ppj) or [](#fn_path_os_pspj) instead.
|
||||||
|
|
||||||
|
{{- osPathJoin "a" "b" "c" }}
|
||||||
|
{{- osPathJoin "/" "a" "b" "c" }}
|
||||||
|
{{- osPathJoin "C:\\" "a" "b" "c" }}
|
||||||
|
{{- osPathJoin "C:" "a" "b" "c" }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<colgroup>
|
||||||
|
<col style="width: 33%" />
|
||||||
|
<col style="width: 66%" />
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th style="text-align: center;">OS</th>
|
||||||
|
<th style="text-align: center;">Result</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Windows</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>a\b\c
|
||||||
|
\a\b\c
|
||||||
|
\a\b\c
|
||||||
|
C:\a\b\c
|
||||||
|
C:a\b\c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>a/b/c
|
||||||
|
/a/b/c
|
||||||
|
C:\/a/b/c
|
||||||
|
C:/a/b/c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
#### `osPathPipeJoin`
|
||||||
|
|
||||||
|
`osPathPipeJoin` operates like [](#fn_path_gnrc_ppj) (except using
|
||||||
|
OS-specific path separators).
|
||||||
|
|
||||||
|
This makes it much more suitable for use in template pipelines, as the
|
||||||
|
previous value in a pipeline is passed in as the last element to the
|
||||||
|
next pipe function.
|
||||||
|
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- osPathPipeJoin "b" "c" "a" }}
|
||||||
|
{{- osPathPipeJoin "a" "b" "c" "/" }}
|
||||||
|
{{- $myBase | osPathPipeJoin "b" "c" }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<colgroup>
|
||||||
|
<col style="width: 33%" />
|
||||||
|
<col style="width: 66%" />
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th style="text-align: center;">OS</th>
|
||||||
|
<th style="text-align: center;">Result</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Windows</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>a\b\c
|
||||||
|
\a\b\c
|
||||||
|
\a\b\c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
#### `osPathSep`
|
||||||
|
|
||||||
|
`osPathSep` returns the
|
||||||
|
[`os.PathSeparator`](https://pkg.go.dev/os#PathSeparator) for this OS.
|
||||||
|
|
||||||
|
{{- osPathSep }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<colgroup>
|
||||||
|
<col style="width: 33%" />
|
||||||
|
<col style="width: 66%" />
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th style="text-align: center;">OS</th>
|
||||||
|
<th style="text-align: center;">Result</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Windows</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>\</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>/</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
#### `osPathSliceJoin`
|
||||||
|
|
||||||
|
`osPathSliceJoin` operates like [](#fn_path_gnrc_psj) but with
|
||||||
|
OS-specific path separators.
|
||||||
|
|
||||||
|
The `splitList` function shown below is from the [`sprig` string slice
|
||||||
|
functions](https://masterminds.github.io/sprig/string_slice.html).
|
||||||
|
|
||||||
|
{{- $myList := "a,b,c" | splitList "," -}}
|
||||||
|
{{- $myList | osPathSliceJoin }}
|
||||||
|
{{- ("a,b,c" | splitList ",") | osPathSliceJoin }}
|
||||||
|
{{- ("/,a,b,c" | splitList ",") | osPathSliceJoin }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<colgroup>
|
||||||
|
<col style="width: 33%" />
|
||||||
|
<col style="width: 66%" />
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th style="text-align: center;">OS</th>
|
||||||
|
<th style="text-align: center;">Result</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Windows</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>a\b\c
|
||||||
|
a\b\c
|
||||||
|
\a\b\c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>a/b/c
|
||||||
|
a/b/c
|
||||||
|
/a/b/c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
#### `osPathSlicePipeJoin`
|
||||||
|
|
||||||
|
`osPathSlicePipeJoin` operates like [](#fn_path_gnrc_pspj) but with
|
||||||
|
OS-specific separators.
|
||||||
|
|
||||||
|
The `splitList` function shown below is from the [`sprig` string slice
|
||||||
|
functions](https://masterminds.github.io/sprig/string_slice.html).
|
||||||
|
|
||||||
|
{{- $myBase := "/a" -}}
|
||||||
|
{{- $myList := "b,c,d" | splitList "." -}}
|
||||||
|
{{- osPathSlicePipeJoin $myList $myBase }}
|
||||||
|
{{- $myBase | osPathSlicePipeJoin $myList }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<colgroup>
|
||||||
|
<col style="width: 33%" />
|
||||||
|
<col style="width: 66%" />
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th style="text-align: center;">OS</th>
|
||||||
|
<th style="text-align: center;">Result</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Windows</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>\a\b\c\d
|
||||||
|
\a\b\c\d</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>/a/b/c/d
|
||||||
|
/a/b/c/d</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
#### `osPathSubJoin`
|
||||||
|
|
||||||
|
`osPathSubJoin` operates like [](#fn_path_gnrc_psubj) but with
|
||||||
|
OS-specific separators.
|
||||||
|
|
||||||
|
The pipeline-friendly equivalent of this is [](#fn_path_os_ppj).
|
||||||
|
|
||||||
|
{{- osPathSubJoin "/a/b" "c" }}
|
||||||
|
{{- osPathSubJoin "/" "a" "b" "c" }}
|
||||||
|
{{- "c" | osPathSubJoin "/" "a" "b" }}
|
||||||
|
|
||||||
|
renders as:
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<colgroup>
|
||||||
|
<col style="width: 33%" />
|
||||||
|
<col style="width: 66%" />
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th style="text-align: center;">OS</th>
|
||||||
|
<th style="text-align: center;">Result</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Windows</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>\a\b\c
|
||||||
|
\a\b\c
|
||||||
|
\a\b\c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td style="text-align: center;"><p>Others (e.g. Linux, macOS)</p></td>
|
||||||
|
<td style="text-align: left;"><pre class="text"><code>/a/b/c
|
||||||
|
/a/b/c
|
||||||
|
/a/b/c</code></pre></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
## Strings
|
||||||
|
|
||||||
|
### `extIndent`
|
||||||
|
|
||||||
|
`extIndent` allows for a MUCH more flexible indenter than the `sprig`
|
||||||
|
`indent` function.
|
||||||
|
|
||||||
|
It works with both Windows (`\r\n`) and POSIX (`\n`) linebreaks.
|
||||||
|
|
||||||
|
It has quite a few arguments, however:
|
||||||
|
|
||||||
|
{{ extIndent <levels> <skipFirst> <skipEmpty> <skipWhitespace> <indentString> <input> }}
|
||||||
|
|
||||||
|
Where:
|
||||||
|
|
||||||
|
- `<levels>`: The level of indentation for the text. If less than or
|
||||||
|
equal to `0`, `extIndent` just returns `<input>` as-is and NO-OPs
|
||||||
|
otherwise.
|
||||||
|
|
||||||
|
- `<skipFirst>`: If true, skip indenting the first line. This is
|
||||||
|
particularly handy if you like to visually align your function calls
|
||||||
|
in your templates.
|
||||||
|
|
||||||
|
- `<skipEmpty>`: If true, do not add an indent to **empty** lines
|
||||||
|
(where an "empty line" means "only has a linebreak").
|
||||||
|
|
||||||
|
- `<skipWhitespace>`: If true, do not add an indent to lines that
|
||||||
|
**only** consist of whitespace (spaces, tabs, etc.) and a linebreak.
|
||||||
|
|
||||||
|
- `<indentString>`: The string to use as the "indent character". This
|
||||||
|
can be any string, such as `" "`, `"\t"`, `"."`, `"|"`, `"=="` etc.
|
||||||
|
|
||||||
|
- `<input>`: The text to be indented. Because it is the last argument,
|
||||||
|
`extIndent` works with pipelined text as well.
|
||||||
101
tplx/sprigx/_test.tpl
Normal file
101
tplx/sprigx/_test.tpl
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
################################################################################
|
||||||
|
# RUNTIME #
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
{{- $rntm := sysRuntime }}
|
||||||
|
|
||||||
|
Arch: {{ sysArch }}
|
||||||
|
CPUs: {{ sysNumCpu }}
|
||||||
|
OS: {{ sysNumCpu }}
|
||||||
|
|
||||||
|
RUNTIME: {{ $rntm }}
|
||||||
|
{{ range $rntmk, $rntmv := $rntm }}
|
||||||
|
{{ $rntmk }}:
|
||||||
|
{{ $rntmv }}
|
||||||
|
{{- end }}
|
||||||
|
{{ dump $rntm }}
|
||||||
|
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# PATHS #
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Generic #
|
||||||
|
###########
|
||||||
|
|
||||||
|
pathJoin "a" "b" "c"
|
||||||
|
{{ pathJoin "a" "b" "c" }}
|
||||||
|
|
||||||
|
pathJoin "/" "a" "b" "c"
|
||||||
|
{{ pathJoin "/" "a" "b" "c" }}
|
||||||
|
|
||||||
|
pathJoin "/a" "b" "c"
|
||||||
|
{{ pathJoin "/a" "b" "c" }}
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
pathPipeJoin "b" "c" "d" "a"
|
||||||
|
{{ pathPipeJoin "b" "c" "d" "a" }}
|
||||||
|
|
||||||
|
"a" | pathPipeJoin "b" "c" "d"
|
||||||
|
{{ "a" | pathPipeJoin "b" "c" "d"}}
|
||||||
|
#
|
||||||
|
|
||||||
|
$base := "/"
|
||||||
|
$myPsjSlice := "a,b,c" | splitList ","
|
||||||
|
pathSliceJoin $myPsjSlice
|
||||||
|
{{- $base := "/" }}
|
||||||
|
{{- $myPsjSlice := "a,b,c" | splitList "," }}
|
||||||
|
{{ pathSliceJoin $myPsjSlice }}
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
$base | pathSlicePipeJoin $myPsjSlice
|
||||||
|
{{ $base | pathSlicePipeJoin $myPsjSlice }}
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
pathSubJoin $base "a" "b" "c"
|
||||||
|
{{ pathSubJoin $base "a" "b" "c" }}
|
||||||
|
|
||||||
|
|
||||||
|
######################
|
||||||
|
# OS/System/Platform #
|
||||||
|
######################
|
||||||
|
|
||||||
|
osPathJoin "a" "b" "c"
|
||||||
|
{{ osPathJoin "a" "b" "c" }}
|
||||||
|
|
||||||
|
osPathJoin "/" "a" "b" "c"
|
||||||
|
{{ osPathJoin "a" "b" "c" }}
|
||||||
|
|
||||||
|
osPathJoin "/a" "b" "c"
|
||||||
|
{{ osPathJoin "a" "b" "c" }}
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
osPathPipeJoin "b" "c" "d" "a"
|
||||||
|
{{ osPathPipeJoin "b" "c" "d" "a" }}
|
||||||
|
|
||||||
|
"a" | osPathPipeJoin "b" "c" "d"
|
||||||
|
{{ "a" | osPathPipeJoin "b" "c" "d" }}
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
$osBase := "/"
|
||||||
|
$myOsPsjSlice := "a,b,c" | splitList ","
|
||||||
|
osPathSliceJoin $myOsPsjSlice
|
||||||
|
{{- $osBase := "/" }}
|
||||||
|
{{- $myOsPsjSlice := "a,b,c" | splitList "," }}
|
||||||
|
{{ osPathSliceJoin $myOsPsjSlice }}
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
$osBase | osPathSlicePipeJoin $myOsPsjSlice
|
||||||
|
{{ $osBase | osPathSlicePipeJoin $myOsPsjSlice }}
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
osPathSubJoin $osBase "a" "b" "c"
|
||||||
|
{{ osPathSubJoin $osBase "a" "b" "c" }}
|
||||||
40
tplx/sprigx/consts.go
Normal file
40
tplx/sprigx/consts.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package sprigx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// genericMap holds functions usable/intended for use in either an [html/template.FuncMap] or [text/template.FuncMap].
|
||||||
|
genericMap map[string]any = map[string]any{
|
||||||
|
// Debugging
|
||||||
|
"dump": dump,
|
||||||
|
// Strings
|
||||||
|
"extIndent": extIndent, // PR in: https://github.com/Masterminds/sprig/pull/468
|
||||||
|
// OS/System
|
||||||
|
"sysArch": sysArch,
|
||||||
|
"sysNumCpu": sysNumCpu,
|
||||||
|
"sysOsName": sysOsNm,
|
||||||
|
"sysRuntime": sysRuntime,
|
||||||
|
// Paths: Generic
|
||||||
|
"pathJoin": path.Join,
|
||||||
|
"pathPipeJoin": pathPipeJoin,
|
||||||
|
"pathSliceJoin": pathSliceJoin,
|
||||||
|
"pathSlicePipeJoin": pathSlicePipeJoin,
|
||||||
|
"pathSubJoin": pathSubJoin,
|
||||||
|
// Paths: OS/Platform
|
||||||
|
"osPathJoin": filepath.Join,
|
||||||
|
"osPathPipeJoin": osPathPipeJoin,
|
||||||
|
"osPathSep": osPathSep,
|
||||||
|
"osPathSliceJoin": osPathSliceJoin,
|
||||||
|
"osPathSlicePipeJoin": osPathSlicePipeJoin,
|
||||||
|
"osPathSubJoin": osPathSubJoin,
|
||||||
|
}
|
||||||
|
|
||||||
|
// htmlMap holds functions usable/intended for use in only an [html/template.FuncMap].
|
||||||
|
htmlMap map[string]any = map[string]any{}
|
||||||
|
|
||||||
|
// txtMap holds functions usable/intended for use in only a [text/template.FuncMap].
|
||||||
|
txtMap map[string]any = map[string]any{}
|
||||||
|
)
|
||||||
16
tplx/sprigx/doc.go
Normal file
16
tplx/sprigx/doc.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
/*
|
||||||
|
Package sprigx aims to provide additional functions that the author believes are missing from [sprig] ([Go docs]).
|
||||||
|
|
||||||
|
It's a decent enough "basics" library, but I frequently find it falls short once you start needing domain-specific data.
|
||||||
|
|
||||||
|
These may get merged into sprig, they may not. It all depends on how responsive they are to PRs.
|
||||||
|
Given that they only update it every 6 months or so, however...
|
||||||
|
|
||||||
|
See the [full documentation] on the [repo].
|
||||||
|
|
||||||
|
[sprig]: https://masterminds.github.io/sprig/
|
||||||
|
[Go docs]: https://pkg.go.dev/github.com/Masterminds/sprig/v3
|
||||||
|
[full documentation]: https://git.r00t2.io/r00t2/go_goutils/src/branch/master/tplx/sprigx/README.adoc
|
||||||
|
[repo]: https://git.r00t2.io/r00t2/go_goutils
|
||||||
|
*/
|
||||||
|
package sprigx
|
||||||
64
tplx/sprigx/funcs.go
Normal file
64
tplx/sprigx/funcs.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package sprigx
|
||||||
|
|
||||||
|
import (
|
||||||
|
htpl "html/template"
|
||||||
|
ttpl "text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Many of these functions are modeled after sprig's.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
FuncMap returns a generic function map.
|
||||||
|
|
||||||
|
You probably want [HtmlFuncMap] or [TxtFuncMap] instead,
|
||||||
|
as they wrap this with the appropriate type.
|
||||||
|
*/
|
||||||
|
func FuncMap() (fmap map[string]any) {
|
||||||
|
|
||||||
|
var fn string
|
||||||
|
var f any
|
||||||
|
|
||||||
|
fmap = make(map[string]any, len(genericMap))
|
||||||
|
|
||||||
|
for fn, f = range genericMap {
|
||||||
|
fmap[fn] = f
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// HtmlFuncMap returns an [html/template.FuncMap].
|
||||||
|
func HtmlFuncMap() (fmap htpl.FuncMap) {
|
||||||
|
|
||||||
|
var fn string
|
||||||
|
var f any
|
||||||
|
|
||||||
|
fmap = htpl.FuncMap(FuncMap())
|
||||||
|
|
||||||
|
if htmlMap != nil && len(htmlMap) > 0 {
|
||||||
|
for fn, f = range htmlMap {
|
||||||
|
fmap[fn] = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxtFuncMap returns a [text/template.FuncMap].
|
||||||
|
func TxtFuncMap() (fmap ttpl.FuncMap) {
|
||||||
|
|
||||||
|
var fn string
|
||||||
|
var f any
|
||||||
|
|
||||||
|
fmap = ttpl.FuncMap(FuncMap())
|
||||||
|
|
||||||
|
if txtMap != nil && len(txtMap) > 0 {
|
||||||
|
for fn, f = range txtMap {
|
||||||
|
fmap[fn] = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
33
tplx/sprigx/funcs_test.go
Normal file
33
tplx/sprigx/funcs_test.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package sprigx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`bytes`
|
||||||
|
_ "embed"
|
||||||
|
"testing"
|
||||||
|
`text/template`
|
||||||
|
|
||||||
|
"github.com/Masterminds/sprig/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
//go:embed "_test.tpl"
|
||||||
|
testTplBytes []byte
|
||||||
|
testTpl *template.Template = template.Must(
|
||||||
|
template.
|
||||||
|
New("").
|
||||||
|
Funcs(sprig.TxtFuncMap()).
|
||||||
|
Funcs(TxtFuncMap()).
|
||||||
|
Parse(string(testTplBytes)),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFuncs(t *testing.T) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var buf *bytes.Buffer = new(bytes.Buffer)
|
||||||
|
|
||||||
|
if err = testTpl.Execute(buf, nil); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Log(buf.String())
|
||||||
|
}
|
||||||
17
tplx/sprigx/funcs_tpl_dbg.go
Normal file
17
tplx/sprigx/funcs_tpl_dbg.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package sprigx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`github.com/davecgh/go-spew/spew`
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
dump calls [spew.Sdump] on obj.
|
||||||
|
|
||||||
|
[spew.Sdump]: https://pkg.go.dev/github.com/davecgh/go-spew/spew
|
||||||
|
*/
|
||||||
|
func dump(obj any) (out string) {
|
||||||
|
|
||||||
|
out = spew.Sdump(obj)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
155
tplx/sprigx/funcs_tpl_paths.go
Normal file
155
tplx/sprigx/funcs_tpl_paths.go
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
package sprigx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`os`
|
||||||
|
`path`
|
||||||
|
`path/filepath`
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
//
|
||||||
|
// GENERIC
|
||||||
|
//
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
pathPipeJoin wraps path.Join with the root element at the *end* instead of the beginning.
|
||||||
|
|
||||||
|
{{ pathPipeJoin "b" "c" "a" }}
|
||||||
|
|
||||||
|
is equivalent to
|
||||||
|
|
||||||
|
path.Join("a", "b", "c")
|
||||||
|
|
||||||
|
This order variation is better suited for pipelines that pass the root path.
|
||||||
|
*/
|
||||||
|
func pathPipeJoin(elems ...string) (out string) {
|
||||||
|
|
||||||
|
var rootIdx int
|
||||||
|
|
||||||
|
if elems == nil || len(elems) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rootIdx = len(elems) - 1
|
||||||
|
out = elems[rootIdx]
|
||||||
|
|
||||||
|
if len(elems) == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = pathSubJoin(out, elems[:rootIdx]...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// pathSliceJoin joins a slice of path segments.
|
||||||
|
func pathSliceJoin(sl []string) (out string) {
|
||||||
|
|
||||||
|
out = path.Join(sl...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
pathSlicePipeJoin behaves like a mix of pathPipeJoin (in that it accepts the root element last)
|
||||||
|
and pathSliceJoin (in that it accepts a slice of subpath segments).
|
||||||
|
|
||||||
|
It's essentially like pathSubJoin in reverse, and with an explicit slice.
|
||||||
|
*/
|
||||||
|
func pathSlicePipeJoin(sl []string, root string) (out string) {
|
||||||
|
|
||||||
|
out = pathSubJoin(root, sl...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
pathSubJoin is like path.Join except it takes an explicit root
|
||||||
|
and additional slice of subpaths to sequentially join to it.
|
||||||
|
*/
|
||||||
|
func pathSubJoin(root string, elems ...string) (out string) {
|
||||||
|
|
||||||
|
if elems == nil || len(elems) == 0 {
|
||||||
|
out = root
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = path.Join(
|
||||||
|
root,
|
||||||
|
path.Join(
|
||||||
|
elems...,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
//
|
||||||
|
// OS/PLATFORM
|
||||||
|
//
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
osPathPipeJoin is like pathPipeJoin but uses the rendering OS' path separator (os.PathSeparator).
|
||||||
|
*/
|
||||||
|
func osPathPipeJoin(elems ...string) (out string) {
|
||||||
|
|
||||||
|
var rootIdx int
|
||||||
|
|
||||||
|
if elems == nil || len(elems) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rootIdx = len(elems) - 1
|
||||||
|
out = elems[rootIdx]
|
||||||
|
|
||||||
|
if len(elems) == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = osPathSubJoin(out, elems[:rootIdx]...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// osPathSep returns os.PathSeparator.
|
||||||
|
func osPathSep() (out string) {
|
||||||
|
|
||||||
|
out = string(os.PathSeparator)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// osPathSliceJoin is the OS-specific implementation of pathSliceJoin.
|
||||||
|
func osPathSliceJoin(sl []string) (out string) {
|
||||||
|
out = filepath.Join(sl...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// osPathSlicePipeJoin is the OS-specific implementation of pathSlicePipeJoin.
|
||||||
|
func osPathSlicePipeJoin(sl []string, root string) (out string) {
|
||||||
|
|
||||||
|
out = osPathSubJoin(root, sl...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// osPathSubJoin is the OS-specific implementation of pathSubJoin.
|
||||||
|
func osPathSubJoin(root string, elems ...string) (out string) {
|
||||||
|
|
||||||
|
if elems == nil || len(elems) == 0 {
|
||||||
|
out = root
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out = filepath.Join(
|
||||||
|
root,
|
||||||
|
filepath.Join(
|
||||||
|
elems...,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
52
tplx/sprigx/funcs_tpl_strings.go
Normal file
52
tplx/sprigx/funcs_tpl_strings.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
package sprigx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`strings`
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
extIndent serves as a much more flexible alternative to the Sprig `indent`.
|
||||||
|
|
||||||
|
It has 6 arguments (the last of which may be passed in via pipeline):
|
||||||
|
|
||||||
|
* levels: The level of indentation for the text. If less than or equal to `0`, `extIndent` just returns `<input>` as-is and NO-OPs otherwise.
|
||||||
|
* skipFirst: If true, skip indenting the first line. This is particularly handy if you like to visually align your function calls in your templates.
|
||||||
|
* skipEmpty: If true, do not add an indent to *empty* lines (where an "empty line" means "only has a linebreak").
|
||||||
|
* skipWhitespace: If true, do not add an indent to lines that *only* consist of whitespace (spaces, tabs, etc.) and a linebreak.
|
||||||
|
* indentString: The string to use as the "indent character". This can be any string, such as `" "`, `"\t"`, `"."`, `"|"`, `"=="` etc.
|
||||||
|
(In fact, if indentString is set to "\n" and levels is always set to 1, this function can even be used to doubelspace text!)
|
||||||
|
* input: The text to be indented. Because it is the last argument, `extIndent` works with pipelined text as well.
|
||||||
|
|
||||||
|
*/
|
||||||
|
func extIndent(levels int, skipFirst, skipEmpty, skipWhitespace bool, indentString, input string) (out string) {
|
||||||
|
|
||||||
|
var idx int
|
||||||
|
var pad string
|
||||||
|
var line string
|
||||||
|
var lines []string
|
||||||
|
|
||||||
|
if levels <= 0 {
|
||||||
|
out = input
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pad = strings.Repeat(indentString, levels)
|
||||||
|
lines = strings.Split(input, "\n")
|
||||||
|
|
||||||
|
for idx, line = range lines {
|
||||||
|
if idx == 0 && skipFirst {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if skipWhitespace && strings.TrimSpace(line) == "" && line != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if skipEmpty && (line == "" || line == "\r") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lines[idx] = pad + line
|
||||||
|
}
|
||||||
|
|
||||||
|
out = strings.Join(lines, "\n")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
47
tplx/sprigx/funcs_tpl_sys.go
Normal file
47
tplx/sprigx/funcs_tpl_sys.go
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
package sprigx
|
||||||
|
|
||||||
|
import (
|
||||||
|
`fmt`
|
||||||
|
`runtime`
|
||||||
|
)
|
||||||
|
|
||||||
|
// sysRuntime returns various information from [runtime].
|
||||||
|
func sysRuntime() (out map[string]string) {
|
||||||
|
|
||||||
|
out = map[string]string{
|
||||||
|
"compiler": runtime.Compiler,
|
||||||
|
"arch": runtime.GOARCH,
|
||||||
|
"os": runtime.GOOS,
|
||||||
|
"maxprocs": fmt.Sprintf("%d", runtime.GOMAXPROCS(-1)),
|
||||||
|
"cpu_cnt": fmt.Sprintf("%d", runtime.NumCPU()),
|
||||||
|
"num_cgo": fmt.Sprintf("%d", runtime.NumCgoCall()),
|
||||||
|
"num_go": fmt.Sprintf("%d", runtime.NumGoroutine()),
|
||||||
|
"go_ver": runtime.Version(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// sysArch returns [runtime.GOARCH].
|
||||||
|
func sysArch() (out string) {
|
||||||
|
|
||||||
|
out = runtime.GOARCH
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// sysNumCpu returns the reuslt from [runtime.NumCPU].
|
||||||
|
func sysNumCpu() (out string) {
|
||||||
|
|
||||||
|
out = fmt.Sprintf("%d", runtime.NumCPU())
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// sysOsNm returns [runtime.GOOS].
|
||||||
|
func sysOsNm() (out string) {
|
||||||
|
|
||||||
|
out = runtime.GOOS
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
19
tplx/types.go
Normal file
19
tplx/types.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package tplx
|
||||||
|
|
||||||
|
import (
|
||||||
|
htmlTpl `html/template`
|
||||||
|
`io`
|
||||||
|
txtTpl `text/template`
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
tplType uint8
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Template interface {
|
||||||
|
*txtTpl.Template | *htmlTpl.Template
|
||||||
|
Execute(w io.Writer, obj any) (err error)
|
||||||
|
ExecuteTemplate(w io.Writer, tplNm string, obj any) (err error)
|
||||||
|
}
|
||||||
|
)
|
||||||
Reference in New Issue
Block a user