build(deps): bump github.com/olekukonko/tablewriter from 1.1.3 to 1.1.4

Bumps [github.com/olekukonko/tablewriter](https://github.com/olekukonko/tablewriter) from 1.1.3 to 1.1.4.
- [Release notes](https://github.com/olekukonko/tablewriter/releases)
- [Commits](https://github.com/olekukonko/tablewriter/compare/v1.1.3...v1.1.4)

---
updated-dependencies:
- dependency-name: github.com/olekukonko/tablewriter
  dependency-version: 1.1.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2026-03-12 14:46:28 +00:00
committed by Ralf Haferkamp
parent 8d30364a82
commit 57fdbb2d4c
69 changed files with 6062 additions and 3435 deletions

11
go.mod
View File

@@ -58,7 +58,7 @@ require (
github.com/nats-io/nats-server/v2 v2.12.4
github.com/nats-io/nats.go v1.49.0
github.com/oklog/run v1.2.0
github.com/olekukonko/tablewriter v1.1.3
github.com/olekukonko/tablewriter v1.1.4
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.28.1
github.com/onsi/gomega v1.39.1
@@ -166,9 +166,8 @@ require (
github.com/ceph/go-ceph v0.37.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73 // indirect
github.com/clipperhouse/displaywidth v0.6.2 // indirect
github.com/clipperhouse/stringish v0.1.1 // indirect
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
github.com/clipperhouse/displaywidth v0.10.0 // indirect
github.com/clipperhouse/uax29/v2 v2.6.0 // indirect
github.com/cloudflare/circl v1.6.3 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
@@ -311,8 +310,8 @@ require (
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect
github.com/olekukonko/errors v1.1.0 // indirect
github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0 // indirect
github.com/olekukonko/errors v1.2.0 // indirect
github.com/olekukonko/ll v0.1.6 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect

22
go.sum
View File

@@ -221,12 +221,10 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/clipperhouse/displaywidth v0.6.2 h1:ZDpTkFfpHOKte4RG5O/BOyf3ysnvFswpyYrV7z2uAKo=
github.com/clipperhouse/displaywidth v0.6.2/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o=
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/clipperhouse/displaywidth v0.10.0 h1:GhBG8WuerxjFQQYeuZAeVTuyxuX+UraiZGD4HJQ3Y8g=
github.com/clipperhouse/displaywidth v0.10.0/go.mod h1:XqJajYsaiEwkxOj4bowCTMcT1SgvHo9flfF3jQasdbs=
github.com/clipperhouse/uax29/v2 v2.6.0 h1:z0cDbUV+aPASdFb2/ndFnS9ts/WNXgTNNGFoKXuhpos=
github.com/clipperhouse/uax29/v2 v2.6.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
@@ -934,13 +932,13 @@ github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YF
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 h1:zrbMGy9YXpIeTnGj4EljqMiZsIcE09mmF8XsD5AYOJc=
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6/go.mod h1:rEKTHC9roVVicUIfZK7DYrdIoM0EOr8mK1Hj5s3JjH0=
github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM=
github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y=
github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0 h1:jrYnow5+hy3WRDCBypUFvVKNSPPCdqgSXIE9eJDD8LM=
github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0/go.mod h1:b52bVQRRPObe+yyBl0TxNfhesL0nedD4Cht0/zx55Ew=
github.com/olekukonko/errors v1.2.0 h1:10Zcn4GeV59t/EGqJc8fUjtFT/FuUh5bTMzZ1XwmCRo=
github.com/olekukonko/errors v1.2.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y=
github.com/olekukonko/ll v0.1.6 h1:lGVTHO+Qc4Qm+fce/2h2m5y9LvqaW+DCN7xW9hsU3uA=
github.com/olekukonko/ll v0.1.6/go.mod h1:NVUmjBb/aCtUpjKk75BhWrOlARz3dqsM+OtszpY4o88=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/olekukonko/tablewriter v1.1.3 h1:VSHhghXxrP0JHl+0NnKid7WoEmd9/urKRJLysb70nnA=
github.com/olekukonko/tablewriter v1.1.3/go.mod h1:9VU0knjhmMkXjnMKrZ3+L2JhhtsQ/L38BbL3CRNE8tM=
github.com/olekukonko/tablewriter v1.1.4 h1:ORUMI3dXbMnRlRggJX3+q7OzQFDdvgbN9nVWj1drm6I=
github.com/olekukonko/tablewriter v1.1.4/go.mod h1:+kedxuyTtgoZLwif3P1Em4hARJs+mVnzKxmsCL/C5RY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=

View File

@@ -1 +1,3 @@
.DS_Store
*.out
*.test

View File

@@ -18,13 +18,15 @@ by running `go generate` from the top package directory.
## Pull Requests and branches
For PRs (pull requests), you can use the gh CLI tool to retrieve details,
or post comments. Then, compare the current branch with main. Reviewing a PR
and reviewing a branch are about the same, but the PR may add context.
For PRs (pull requests), you can use the gh CLI tool. Compare the current branch with main. Reviewing a PR and reviewing a branch are about the same, but the PR may add context.
Look for bugs. Think like GitHub Copilot or Cursor BugBot.
Understand the goals of the PR. Note any API changes, especially breaking changes.
Offer to post a brief summary of the review to the PR, via the gh CLI tool.
Look for thoroughness of tests, as well as GoDoc comments.
Retrieve and consider the comments on the PR, which may have come from GitHub Copilot or Cursor BugBot. Think like GitHub Copilot or Cursor BugBot.
Offer to optionally post a brief summary of the review to the PR, via the gh CLI tool.
## Comparisons to go-runewidth

View File

@@ -1,5 +1,50 @@
# Changelog
## [0.10.0]
[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.9.0...v0.10.0)
### Added
- New `ControlSequences` option to treat ECMA-48/ANSI escape sequences as zero-width. (#20)
- `TruncateString` and `TruncateBytes` now preserve trailing ANSI escape sequences (such as SGR resets) when `ControlSequences` is true, preventing color bleed in terminal output.
### Changed
- Removed `stringish` dependency; generic type constraints are now inline `~string | []byte`.
- Upgraded uax29 dependency to v2.6.0 for ANSI escape sequence support in the grapheme iterator.
## [0.9.0]
[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.8.0...v0.9.0)
### Changed
- Unicode 17 support: East Asian Width and emoji data updated to Unicode 17.0.0. (#18)
- Upgraded uax29 dependency to v2.5.0 (Unicode 17 grapheme segmentation).
## [0.8.0]
[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.7.0...v0.8.0)
### Changed
- Performance: ASCII fast path that applies to any run of printable
ASCII. 2x-10x faster for ASCII text vs v0.7.0. (#16)
- Upgraded uax29 dependency to v2.4.0 for Unicode 16 support. Text that includes
Indic_Conjunct_Break may segment differently (and more correctly). (#15)
## [0.7.0]
[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.6.2...v0.7.0)
### Added
- New `TruncateString` and `TruncateBytes` methods to truncate strings to a
maximum display width, with optional tail (like an ellipsis). (#13)
## [0.6.2]
[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.6.1...v0.6.2)
### Changed
- Internal: reduced property categories for simpler trie.
## [0.6.1]
[Compare](https://github.com/clipperhouse/displaywidth/compare/v0.6.0...v0.6.1)
@@ -19,7 +64,7 @@
widths of grapheme clusters.
### Changed
- Added ASCII fast paths
- Fast ASCII lookups
## [0.5.0]

View File

@@ -61,8 +61,28 @@ func main() {
### Options
There is one option, `displaywidth.Options.EastAsianWidth`, which defines
how [East Asian Ambiguous characters](https://www.unicode.org/reports/tr11/#Ambiguous)
Create the options you need, and then use methods on the options struct.
```go
var myOptions = displaywidth.Options{
EastAsianWidth: true,
ControlSequences: true,
}
width := myOptions.String("Hello, 世界!")
```
#### ControlSequences
`ControlSequences` specifies whether to ignore ECMA-48 escape sequences
when calculating the display width. When `false` (default), ANSI escape
sequences are treated as just a series of characters. When `true`, they are
treated as a single zero-width unit.
#### EastAsianWidth
`EastAsianWidth` defines how
[East Asian Ambiguous characters](https://www.unicode.org/reports/tr11/#Ambiguous)
are treated.
When `false` (default), East Asian Ambiguous characters are treated as width 1.
@@ -70,26 +90,8 @@ When `true`, they are treated as width 2.
You may wish to configure this based on environment variables or locale.
`go-runewidth`, for example, does so
[during package initialization](https://github.com/mattn/go-runewidth/blob/master/runewidth.go#L26C1-L45C2).
[during package initialization](https://github.com/mattn/go-runewidth/blob/master/runewidth.go#L26C1-L45C2). `displaywidth` does not do this automatically, we prefer to leave it to you.
`displaywidth` does not do this automatically, we prefer to leave it to you.
You might do something like:
```go
var width displaywidth.Options // zero value is default
func init() {
if os.Getenv("EAST_ASIAN_WIDTH") == "true" {
width = displaywidth.Options{EastAsianWidth: true}
}
// or check locale, or any other logic you want
}
// use it in your logic
func myApp() {
fmt.Println(width.String("Hello, 世界!"))
}
```
## Technical standards and compatibility
@@ -97,19 +99,16 @@ This package implements the Unicode East Asian Width standard
([UAX #11](https://www.unicode.org/reports/tr11/tr11-43.html)), and handles
[version selectors](https://en.wikipedia.org/wiki/Variation_Selectors_(Unicode_block)),
and [regional indicator pairs](https://en.wikipedia.org/wiki/Regional_indicator_symbol)
(flags). We implement [Unicode TR51](https://www.unicode.org/reports/tr51/tr51-27.html). We are keeping
an eye on [emerging standards](https://www.jeffquast.com/post/state-of-terminal-emulation-2025/).
(flags). We implement [Unicode TR51](https://www.unicode.org/reports/tr51/tr51-27.html)
for emojis. We are keeping an eye on
[emerging standards](https://www.jeffquast.com/post/state-of-terminal-emulation-2025/).
For control sequences, we implement the [ECMA-48](https://ecma-international.org/publications-and-standards/standards/ecma-48/) standard for 7-bit ASCII control sequences.
`clipperhouse/displaywidth`, `mattn/go-runewidth`, and `rivo/uniseg` will
give the same outputs for most real-world text. Extensive details are in the
[compatibility analysis](comparison/COMPATIBILITY_ANALYSIS.md).
If you wish to investigate the core logic, see the `lookupProperties` and `width`
functions in [width.go](width.go#L139). The essential trie generation logic is in
`buildPropertyBitmap` in [unicode.go](internal/gen/unicode.go#L316).
## Prior Art
[mattn/go-runewidth](https://github.com/mattn/go-runewidth)
@@ -133,33 +132,39 @@ goarch: arm64
pkg: github.com/clipperhouse/displaywidth/comparison
cpu: Apple M2
BenchmarkString_Mixed/clipperhouse/displaywidth-8 10326 ns/op 163.37 MB/s 0 B/op 0 allocs/op
BenchmarkString_Mixed/mattn/go-runewidth-8 14415 ns/op 117.03 MB/s 0 B/op 0 allocs/op
BenchmarkString_Mixed/rivo/uniseg-8 19343 ns/op 87.21 MB/s 0 B/op 0 allocs/op
BenchmarkString_Mixed/clipperhouse/displaywidth-8 5784 ns/op 291.69 MB/s 0 B/op 0 allocs/op
BenchmarkString_Mixed/mattn/go-runewidth-8 14751 ns/op 114.36 MB/s 0 B/op 0 allocs/op
BenchmarkString_Mixed/rivo/uniseg-8 19360 ns/op 87.14 MB/s 0 B/op 0 allocs/op
BenchmarkString_EastAsian/clipperhouse/displaywidth-8 10561 ns/op 159.74 MB/s 0 B/op 0 allocs/op
BenchmarkString_EastAsian/mattn/go-runewidth-8 23790 ns/op 70.91 MB/s 0 B/op 0 allocs/op
BenchmarkString_EastAsian/rivo/uniseg-8 19322 ns/op 87.31 MB/s 0 B/op 0 allocs/op
BenchmarkString_ASCII/clipperhouse/displaywidth-8 54.60 ns/op 2344.32 MB/s 0 B/op 0 allocs/op
BenchmarkString_ASCII/mattn/go-runewidth-8 1195 ns/op 107.08 MB/s 0 B/op 0 allocs/op
BenchmarkString_ASCII/rivo/uniseg-8 1578 ns/op 81.13 MB/s 0 B/op 0 allocs/op
BenchmarkString_ASCII/clipperhouse/displaywidth-8 1033 ns/op 123.88 MB/s 0 B/op 0 allocs/op
BenchmarkString_ASCII/mattn/go-runewidth-8 1168 ns/op 109.59 MB/s 0 B/op 0 allocs/op
BenchmarkString_ASCII/rivo/uniseg-8 1585 ns/op 80.74 MB/s 0 B/op 0 allocs/op
BenchmarkString_EastAsian/clipperhouse/displaywidth-8 5837 ns/op 289.01 MB/s 0 B/op 0 allocs/op
BenchmarkString_EastAsian/mattn/go-runewidth-8 24418 ns/op 69.09 MB/s 0 B/op 0 allocs/op
BenchmarkString_EastAsian/rivo/uniseg-8 19339 ns/op 87.23 MB/s 0 B/op 0 allocs/op
BenchmarkString_Emoji/clipperhouse/displaywidth-8 3034 ns/op 238.61 MB/s 0 B/op 0 allocs/op
BenchmarkString_Emoji/mattn/go-runewidth-8 4797 ns/op 150.94 MB/s 0 B/op 0 allocs/op
BenchmarkString_Emoji/rivo/uniseg-8 6612 ns/op 109.50 MB/s 0 B/op 0 allocs/op
BenchmarkString_Emoji/clipperhouse/displaywidth-8 3225 ns/op 224.51 MB/s 0 B/op 0 allocs/op
BenchmarkString_Emoji/mattn/go-runewidth-8 4851 ns/op 149.25 MB/s 0 B/op 0 allocs/op
BenchmarkString_Emoji/rivo/uniseg-8 6591 ns/op 109.85 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Mixed/clipperhouse/displaywidth-8 3343 ns/op 504.67 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Mixed/mattn/go-runewidth-8 5414 ns/op 311.62 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Mixed/clipperhouse/displaywidth-8 3385 ns/op 498.34 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Mixed/mattn/go-runewidth-8 5354 ns/op 315.07 MB/s 0 B/op 0 allocs/op
BenchmarkRune_EastAsian/clipperhouse/displaywidth-8 3393 ns/op 497.17 MB/s 0 B/op 0 allocs/op
BenchmarkRune_EastAsian/mattn/go-runewidth-8 15312 ns/op 110.17 MB/s 0 B/op 0 allocs/op
BenchmarkRune_EastAsian/clipperhouse/displaywidth-8 3397 ns/op 496.56 MB/s 0 B/op 0 allocs/op
BenchmarkRune_EastAsian/mattn/go-runewidth-8 15673 ns/op 107.64 MB/s 0 B/op 0 allocs/op
BenchmarkRune_ASCII/clipperhouse/displaywidth-8 256.9 ns/op 498.32 MB/s 0 B/op 0 allocs/op
BenchmarkRune_ASCII/mattn/go-runewidth-8 265.7 ns/op 481.75 MB/s 0 B/op 0 allocs/op
BenchmarkRune_ASCII/clipperhouse/displaywidth-8 255.7 ns/op 500.53 MB/s 0 B/op 0 allocs/op
BenchmarkRune_ASCII/mattn/go-runewidth-8 261.5 ns/op 489.55 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Emoji/clipperhouse/displaywidth-8 1336 ns/op 541.96 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Emoji/mattn/go-runewidth-8 2304 ns/op 314.23 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Emoji/clipperhouse/displaywidth-8 1371 ns/op 528.22 MB/s 0 B/op 0 allocs/op
BenchmarkRune_Emoji/mattn/go-runewidth-8 2267 ns/op 319.43 MB/s 0 B/op 0 allocs/op
BenchmarkTruncateWithTail/clipperhouse/displaywidth-8 3229 ns/op 54.82 MB/s 192 B/op 14 allocs/op
BenchmarkTruncateWithTail/mattn/go-runewidth-8 8408 ns/op 21.05 MB/s 192 B/op 14 allocs/op
BenchmarkTruncateWithoutTail/clipperhouse/displaywidth-8 3554 ns/op 64.43 MB/s 0 B/op 0 allocs/op
BenchmarkTruncateWithoutTail/mattn/go-runewidth-8 11189 ns/op 20.47 MB/s 0 B/op 0 allocs/op
```
Here are some notes on [how to make Unicode things fast](https://clipperhouse.com/go-unicode/).

View File

@@ -1,7 +1,6 @@
package displaywidth
import (
"github.com/clipperhouse/stringish"
"github.com/clipperhouse/uax29/v2/graphemes"
)
@@ -9,8 +8,8 @@ import (
//
// Iterate using the Next method, and get the width of the current grapheme
// using the Width method.
type Graphemes[T stringish.Interface] struct {
iter graphemes.Iterator[T]
type Graphemes[T ~string | []byte] struct {
iter *graphemes.Iterator[T]
options Options
}
@@ -44,10 +43,10 @@ func StringGraphemes(s string) Graphemes[string] {
// Iterate using the Next method, and get the width of the current grapheme
// using the Width method.
func (options Options) StringGraphemes(s string) Graphemes[string] {
return Graphemes[string]{
iter: graphemes.FromString(s),
options: options,
}
g := graphemes.FromString(s)
g.AnsiEscapeSequences = options.ControlSequences
return Graphemes[string]{iter: g, options: options}
}
// BytesGraphemes returns an iterator over grapheme clusters for the given
@@ -65,8 +64,8 @@ func BytesGraphemes(s []byte) Graphemes[[]byte] {
// Iterate using the Next method, and get the width of the current grapheme
// using the Width method.
func (options Options) BytesGraphemes(s []byte) Graphemes[[]byte] {
return Graphemes[[]byte]{
iter: graphemes.FromBytes(s),
options: options,
}
g := graphemes.FromBytes(s)
g.AnsiEscapeSequences = options.ControlSequences
return Graphemes[[]byte]{iter: g, options: options}
}

View File

@@ -2,8 +2,6 @@
package displaywidth
import "github.com/clipperhouse/stringish"
// property is an enum representing the properties of a character
type property uint8
@@ -19,7 +17,7 @@ const (
// lookup returns the trie value for the first UTF-8 encoding in s and
// the width in bytes of this encoding. The size will be 0 if s does not
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
func lookup[T stringish.Interface](s T) (v uint8, sz int) {
func lookup[T ~string | []byte](s T) (v uint8, sz int) {
c0 := s[0]
switch {
case c0 < 0x80: // is ASCII
@@ -79,7 +77,7 @@ func lookup[T stringish.Interface](s T) (v uint8, sz int) {
return 0, 1
}
// stringWidthTrie. Total size: 17664 bytes (17.25 KiB). Checksum: c77d82ff2d69f0d2.
// stringWidthTrie. Total size: 17664 bytes (17.25 KiB). Checksum: 220983462f26d765.
// type stringWidthTrie struct { }
// func newStringWidthTrie(i int) *stringWidthTrie {
@@ -1133,27 +1131,31 @@ var stringWidthValues = [15744]uint8{
// Block 0xc2, offset 0x3080
0x30a0: 0x0002, 0x30a1: 0x0002, 0x30a2: 0x0002, 0x30a3: 0x0002,
0x30a4: 0x0001,
0x30b0: 0x0002, 0x30b1: 0x0002,
0x30b0: 0x0002, 0x30b1: 0x0002, 0x30b2: 0x0002, 0x30b3: 0x0002, 0x30b4: 0x0002, 0x30b5: 0x0002,
0x30b6: 0x0002,
// Block 0xc3, offset 0x30c0
0x30c0: 0x0002, 0x30c1: 0x0002, 0x30c2: 0x0002, 0x30c3: 0x0002, 0x30c4: 0x0002, 0x30c5: 0x0002,
0x30c6: 0x0002, 0x30c7: 0x0002, 0x30c8: 0x0002, 0x30c9: 0x0002, 0x30ca: 0x0002, 0x30cb: 0x0002,
0x30cc: 0x0002, 0x30cd: 0x0002, 0x30ce: 0x0002, 0x30cf: 0x0002, 0x30d0: 0x0002, 0x30d1: 0x0002,
0x30d2: 0x0002, 0x30d3: 0x0002, 0x30d4: 0x0002, 0x30d5: 0x0002, 0x30d6: 0x0002, 0x30d7: 0x0002,
0x30d8: 0x0002, 0x30d9: 0x0002, 0x30da: 0x0002, 0x30db: 0x0002, 0x30dc: 0x0002, 0x30dd: 0x0002,
0x30de: 0x0002, 0x30df: 0x0002, 0x30e0: 0x0002, 0x30e1: 0x0002, 0x30e2: 0x0002, 0x30e3: 0x0002,
0x30e4: 0x0002, 0x30e5: 0x0002, 0x30e6: 0x0002, 0x30e7: 0x0002, 0x30e8: 0x0002, 0x30e9: 0x0002,
0x30ea: 0x0002, 0x30eb: 0x0002, 0x30ec: 0x0002, 0x30ed: 0x0002, 0x30ee: 0x0002, 0x30ef: 0x0002,
0x30f0: 0x0002, 0x30f1: 0x0002, 0x30f2: 0x0002, 0x30f3: 0x0002, 0x30f4: 0x0002, 0x30f5: 0x0002,
0x30f6: 0x0002, 0x30f7: 0x0002,
0x30d2: 0x0002, 0x30d3: 0x0002, 0x30d4: 0x0002, 0x30d5: 0x0002,
0x30ff: 0x0002,
// Block 0xc4, offset 0x3100
0x3100: 0x0002, 0x3101: 0x0002, 0x3102: 0x0002, 0x3103: 0x0002, 0x3104: 0x0002, 0x3105: 0x0002,
0x3106: 0x0002, 0x3107: 0x0002, 0x3108: 0x0002, 0x3109: 0x0002, 0x310a: 0x0002, 0x310b: 0x0002,
0x310c: 0x0002, 0x310d: 0x0002, 0x310e: 0x0002, 0x310f: 0x0002, 0x3110: 0x0002, 0x3111: 0x0002,
0x3112: 0x0002, 0x3113: 0x0002, 0x3114: 0x0002, 0x3115: 0x0002,
0x313f: 0x0002,
0x3112: 0x0002, 0x3113: 0x0002, 0x3114: 0x0002, 0x3115: 0x0002, 0x3116: 0x0002, 0x3117: 0x0002,
0x3118: 0x0002, 0x3119: 0x0002, 0x311a: 0x0002, 0x311b: 0x0002, 0x311c: 0x0002, 0x311d: 0x0002,
0x311e: 0x0002,
// Block 0xc5, offset 0x3140
0x3140: 0x0002, 0x3141: 0x0002, 0x3142: 0x0002, 0x3143: 0x0002, 0x3144: 0x0002, 0x3145: 0x0002,
0x3146: 0x0002, 0x3147: 0x0002, 0x3148: 0x0002,
0x3146: 0x0002, 0x3147: 0x0002, 0x3148: 0x0002, 0x3149: 0x0002, 0x314a: 0x0002, 0x314b: 0x0002,
0x314c: 0x0002, 0x314d: 0x0002, 0x314e: 0x0002, 0x314f: 0x0002, 0x3150: 0x0002, 0x3151: 0x0002,
0x3152: 0x0002, 0x3153: 0x0002, 0x3154: 0x0002, 0x3155: 0x0002, 0x3156: 0x0002, 0x3157: 0x0002,
0x3158: 0x0002, 0x3159: 0x0002, 0x315a: 0x0002, 0x315b: 0x0002, 0x315c: 0x0002, 0x315d: 0x0002,
0x315e: 0x0002, 0x315f: 0x0002, 0x3160: 0x0002, 0x3161: 0x0002, 0x3162: 0x0002, 0x3163: 0x0002,
0x3164: 0x0002, 0x3165: 0x0002, 0x3166: 0x0002, 0x3167: 0x0002, 0x3168: 0x0002, 0x3169: 0x0002,
0x316a: 0x0002, 0x316b: 0x0002, 0x316c: 0x0002, 0x316d: 0x0002, 0x316e: 0x0002, 0x316f: 0x0002,
0x3170: 0x0002, 0x3171: 0x0002, 0x3172: 0x0002,
// Block 0xc6, offset 0x3180
0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b5: 0x0002,
0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002,
@@ -1443,7 +1445,7 @@ var stringWidthValues = [15744]uint8{
0x3b40: 0x0002, 0x3b41: 0x0002, 0x3b42: 0x0002, 0x3b43: 0x0002, 0x3b44: 0x0002, 0x3b45: 0x0002,
0x3b4c: 0x0002, 0x3b50: 0x0002, 0x3b51: 0x0002,
0x3b52: 0x0002, 0x3b55: 0x0002, 0x3b56: 0x0002, 0x3b57: 0x0002,
0x3b5c: 0x0002, 0x3b5d: 0x0002,
0x3b58: 0x0002, 0x3b5c: 0x0002, 0x3b5d: 0x0002,
0x3b5e: 0x0002, 0x3b5f: 0x0002,
0x3b6b: 0x0002, 0x3b6c: 0x0002,
0x3b74: 0x0002, 0x3b75: 0x0002,
@@ -1482,8 +1484,8 @@ var stringWidthValues = [15744]uint8{
0x3c7c: 0x0002,
// Block 0xf2, offset 0x3c80
0x3c80: 0x0002, 0x3c81: 0x0002, 0x3c82: 0x0002, 0x3c83: 0x0002, 0x3c84: 0x0002, 0x3c85: 0x0002,
0x3c86: 0x0002, 0x3c87: 0x0002, 0x3c88: 0x0002, 0x3c89: 0x0002,
0x3c8f: 0x0002, 0x3c90: 0x0002, 0x3c91: 0x0002,
0x3c86: 0x0002, 0x3c87: 0x0002, 0x3c88: 0x0002, 0x3c89: 0x0002, 0x3c8a: 0x0002,
0x3c8e: 0x0002, 0x3c8f: 0x0002, 0x3c90: 0x0002, 0x3c91: 0x0002,
0x3c92: 0x0002, 0x3c93: 0x0002, 0x3c94: 0x0002, 0x3c95: 0x0002, 0x3c96: 0x0002, 0x3c97: 0x0002,
0x3c98: 0x0002, 0x3c99: 0x0002, 0x3c9a: 0x0002, 0x3c9b: 0x0002, 0x3c9c: 0x0002, 0x3c9d: 0x0002,
0x3c9e: 0x0002, 0x3c9f: 0x0002, 0x3ca0: 0x0002, 0x3ca1: 0x0002, 0x3ca2: 0x0002, 0x3ca3: 0x0002,
@@ -1494,12 +1496,13 @@ var stringWidthValues = [15744]uint8{
0x3cbc: 0x0002, 0x3cbd: 0x0002, 0x3cbe: 0x0002, 0x3cbf: 0x0002,
// Block 0xf3, offset 0x3cc0
0x3cc0: 0x0002, 0x3cc1: 0x0002, 0x3cc2: 0x0002, 0x3cc3: 0x0002, 0x3cc4: 0x0002, 0x3cc5: 0x0002,
0x3cc6: 0x0002,
0x3cce: 0x0002, 0x3ccf: 0x0002, 0x3cd0: 0x0002, 0x3cd1: 0x0002,
0x3cc6: 0x0002, 0x3cc8: 0x0002,
0x3ccd: 0x0002, 0x3cce: 0x0002, 0x3ccf: 0x0002, 0x3cd0: 0x0002, 0x3cd1: 0x0002,
0x3cd2: 0x0002, 0x3cd3: 0x0002, 0x3cd4: 0x0002, 0x3cd5: 0x0002, 0x3cd6: 0x0002, 0x3cd7: 0x0002,
0x3cd8: 0x0002, 0x3cd9: 0x0002, 0x3cda: 0x0002, 0x3cdb: 0x0002, 0x3cdc: 0x0002,
0x3cdf: 0x0002, 0x3ce0: 0x0002, 0x3ce1: 0x0002, 0x3ce2: 0x0002, 0x3ce3: 0x0002,
0x3ce4: 0x0002, 0x3ce5: 0x0002, 0x3ce6: 0x0002, 0x3ce7: 0x0002, 0x3ce8: 0x0002, 0x3ce9: 0x0002,
0x3cea: 0x0002, 0x3cef: 0x0002,
0x3cf0: 0x0002, 0x3cf1: 0x0002, 0x3cf2: 0x0002, 0x3cf3: 0x0002, 0x3cf4: 0x0002, 0x3cf5: 0x0002,
0x3cf6: 0x0002, 0x3cf7: 0x0002, 0x3cf8: 0x0002,
// Block 0xf4, offset 0x3d00
@@ -1631,10 +1634,10 @@ var stringWidthIndex = [1920]uint8{
0x440: 0x39, 0x441: 0x39, 0x442: 0x39, 0x443: 0x39, 0x444: 0x39, 0x445: 0x39, 0x446: 0x39, 0x447: 0x39,
0x448: 0x39, 0x449: 0x39, 0x44a: 0x39, 0x44b: 0x39, 0x44c: 0x39, 0x44d: 0x39, 0x44e: 0x39, 0x44f: 0x39,
0x450: 0x39, 0x451: 0x39, 0x452: 0x39, 0x453: 0x39, 0x454: 0x39, 0x455: 0x39, 0x456: 0x39, 0x457: 0x39,
0x458: 0x39, 0x459: 0x39, 0x45a: 0x39, 0x45b: 0x39, 0x45c: 0x39, 0x45d: 0x39, 0x45e: 0x39, 0x45f: 0xc1,
0x458: 0x39, 0x459: 0x39, 0x45a: 0x39, 0x45b: 0x39, 0x45c: 0x39, 0x45d: 0x39, 0x45e: 0x39, 0x45f: 0x39,
0x460: 0x39, 0x461: 0x39, 0x462: 0x39, 0x463: 0x39, 0x464: 0x39, 0x465: 0x39, 0x466: 0x39, 0x467: 0x39,
0x468: 0x39, 0x469: 0x39, 0x46a: 0x39, 0x46b: 0x39, 0x46c: 0x39, 0x46d: 0x39, 0x46e: 0x39, 0x46f: 0x39,
0x470: 0x39, 0x471: 0x39, 0x472: 0x39, 0x473: 0xc2, 0x474: 0xc3,
0x470: 0x39, 0x471: 0x39, 0x472: 0x39, 0x473: 0xc1, 0x474: 0xc2, 0x476: 0x39, 0x477: 0xc3,
// Block 0x12, offset 0x480
0x4bf: 0xc4,
// Block 0x13, offset 0x4c0

View File

@@ -1,23 +1,34 @@
package displaywidth
import (
"strings"
"unicode/utf8"
"github.com/clipperhouse/stringish"
"github.com/clipperhouse/uax29/v2/graphemes"
)
// Options allows you to specify the treatment of ambiguous East Asian
// characters. When EastAsianWidth is false (default), ambiguous East Asian
// characters are treated as width 1. When EastAsianWidth is true, ambiguous
// East Asian characters are treated as width 2.
// characters and ANSI escape sequences.
type Options struct {
// EastAsianWidth specifies whether to treat ambiguous East Asian characters
// as width 1 or 2. When false (default), ambiguous East Asian characters
// are treated as width 1. When true, they are width 2.
EastAsianWidth bool
// ControlSequences specifies whether to ignore ECMA-48 escape sequences
// when calculating the display width. When false (default), ANSI escape
// sequences are treated as just a series of characters. When true, they are
// treated as a single zero-width unit.
//
// Note that this option is about *sequences*. Individual control characters
// are already treated as zero-width. With this option, ANSI sequences such as
// "\x1b[31m" and "\x1b[0m" do not count towards the width of a string.
ControlSequences bool
}
// DefaultOptions is the default options for the display width
// calculation, which is EastAsianWidth: false.
var DefaultOptions = Options{EastAsianWidth: false}
// calculation, which is EastAsianWidth false and ControlSequences false.
var DefaultOptions = Options{EastAsianWidth: false, ControlSequences: false}
// String calculates the display width of a string,
// by iterating over grapheme clusters in the string
@@ -29,19 +40,43 @@ func String(s string) int {
// String calculates the display width of a string, for the given options, by
// iterating over grapheme clusters in the string and summing their widths.
func (options Options) String(s string) int {
// Optimization: no need to parse grapheme
switch len(s) {
case 0:
return 0
case 1:
return asciiWidth(s[0])
width := 0
pos := 0
for pos < len(s) {
// Try ASCII optimization
asciiLen := printableASCIILength(s[pos:])
if asciiLen > 0 {
width += asciiLen
pos += asciiLen
continue
}
// Not ASCII, use grapheme parsing
g := graphemes.FromString(s[pos:])
g.AnsiEscapeSequences = options.ControlSequences
start := pos
for g.Next() {
v := g.Value()
width += graphemeWidth(v, options)
pos += len(v)
// Quick check: if remaining might have printable ASCII, break to outer loop
if pos < len(s) && s[pos] >= 0x20 && s[pos] <= 0x7E {
break
}
}
// Defensive, should not happen: if no progress was made,
// skip a byte to prevent infinite loop. Only applies if
// the grapheme parser misbehaves.
if pos == start {
pos++
}
}
width := 0
g := graphemes.FromString(s)
for g.Next() {
width += graphemeWidth(g.Value(), options)
}
return width
}
@@ -55,19 +90,43 @@ func Bytes(s []byte) int {
// Bytes calculates the display width of a []byte, for the given options, by
// iterating over grapheme clusters in the slice and summing their widths.
func (options Options) Bytes(s []byte) int {
// Optimization: no need to parse grapheme
switch len(s) {
case 0:
return 0
case 1:
return asciiWidth(s[0])
width := 0
pos := 0
for pos < len(s) {
// Try ASCII optimization
asciiLen := printableASCIILength(s[pos:])
if asciiLen > 0 {
width += asciiLen
pos += asciiLen
continue
}
// Not ASCII, use grapheme parsing
g := graphemes.FromBytes(s[pos:])
g.AnsiEscapeSequences = options.ControlSequences
start := pos
for g.Next() {
v := g.Value()
width += graphemeWidth(v, options)
pos += len(v)
// Quick check: if remaining might have printable ASCII, break to outer loop
if pos < len(s) && s[pos] >= 0x20 && s[pos] <= 0x7E {
break
}
}
// Defensive, should not happen: if no progress was made,
// skip a byte to prevent infinite loop. Only applies if
// the grapheme parser misbehaves.
if pos == start {
pos++
}
}
width := 0
g := graphemes.FromBytes(s)
for g.Next() {
width += graphemeWidth(g.Value(), options)
}
return width
}
@@ -107,9 +166,123 @@ func (options Options) Rune(r rune) int {
const _Default property = 0
// TruncateString truncates a string to the given maxWidth, and appends the
// given tail if the string is truncated.
//
// It ensures the visible width, including the width of the tail, is less than or
// equal to maxWidth.
//
// When [Options.ControlSequences] is true, ANSI escape sequences that appear
// after the truncation point are preserved in the output. This ensures that
// escape sequences such as SGR resets are not lost, preventing color bleed
// in terminal output.
func (options Options) TruncateString(s string, maxWidth int, tail string) string {
maxWidthWithoutTail := maxWidth - options.String(tail)
var pos, total int
g := graphemes.FromString(s)
g.AnsiEscapeSequences = options.ControlSequences
for g.Next() {
gw := graphemeWidth(g.Value(), options)
if total+gw <= maxWidthWithoutTail {
pos = g.End()
}
total += gw
if total > maxWidth {
if options.ControlSequences {
// Build result with trailing ANSI escape sequences preserved
var b strings.Builder
b.Grow(len(s) + len(tail)) // at most original + tail
b.WriteString(s[:pos])
b.WriteString(tail)
rem := graphemes.FromString(s[pos:])
rem.AnsiEscapeSequences = true
for rem.Next() {
v := rem.Value()
if len(v) > 0 && v[0] == 0x1B {
b.WriteString(v)
}
}
return b.String()
}
return s[:pos] + tail
}
}
// No truncation
return s
}
// TruncateString truncates a string to the given maxWidth, and appends the
// given tail if the string is truncated.
//
// It ensures the total width, including the width of the tail, is less than or
// equal to maxWidth.
func TruncateString(s string, maxWidth int, tail string) string {
return DefaultOptions.TruncateString(s, maxWidth, tail)
}
// TruncateBytes truncates a []byte to the given maxWidth, and appends the
// given tail if the []byte is truncated.
//
// It ensures the visible width, including the width of the tail, is less than or
// equal to maxWidth.
//
// When [Options.ControlSequences] is true, ANSI escape sequences that appear
// after the truncation point are preserved in the output. This ensures that
// escape sequences such as SGR resets are not lost, preventing color bleed
// in terminal output.
func (options Options) TruncateBytes(s []byte, maxWidth int, tail []byte) []byte {
maxWidthWithoutTail := maxWidth - options.Bytes(tail)
var pos, total int
g := graphemes.FromBytes(s)
g.AnsiEscapeSequences = options.ControlSequences
for g.Next() {
gw := graphemeWidth(g.Value(), options)
if total+gw <= maxWidthWithoutTail {
pos = g.End()
}
total += gw
if total > maxWidth {
if options.ControlSequences {
// Build result with trailing ANSI escape sequences preserved
result := make([]byte, 0, len(s)+len(tail)) // at most original + tail
result = append(result, s[:pos]...)
result = append(result, tail...)
rem := graphemes.FromBytes(s[pos:])
rem.AnsiEscapeSequences = true
for rem.Next() {
v := rem.Value()
if len(v) > 0 && v[0] == 0x1B {
result = append(result, v...)
}
}
return result
}
result := make([]byte, 0, pos+len(tail))
result = append(result, s[:pos]...)
result = append(result, tail...)
return result
}
}
// No truncation
return s
}
// TruncateBytes truncates a []byte to the given maxWidth, and appends the
// given tail if the []byte is truncated.
//
// It ensures the total width, including the width of the tail, is less than or
// equal to maxWidth.
func TruncateBytes(s []byte, maxWidth int, tail []byte) []byte {
return DefaultOptions.TruncateBytes(s, maxWidth, tail)
}
// graphemeWidth returns the display width of a grapheme cluster.
// The passed string must be a single grapheme cluster.
func graphemeWidth[T stringish.Interface](s T, options Options) int {
func graphemeWidth[T ~string | []byte](s T, options Options) int {
// Optimization: no need to look up properties
switch len(s) {
case 0:
@@ -118,6 +291,11 @@ func graphemeWidth[T stringish.Interface](s T, options Options) int {
return asciiWidth(s[0])
}
// Multi-byte grapheme clusters led by a C0 control (0x00-0x1F)
if s[0] <= 0x1F {
return 0
}
p, sz := lookup(s)
prop := property(p)
@@ -150,9 +328,31 @@ func asciiWidth(b byte) int {
return 1
}
// printableASCIILength returns the length of consecutive printable ASCII bytes
// starting at the beginning of s.
func printableASCIILength[T string | []byte](s T) int {
i := 0
for ; i < len(s); i++ {
b := s[i]
// Printable ASCII is 0x20-0x7E (space through tilde)
if b < 0x20 || b > 0x7E {
break
}
}
// If the next byte is non-ASCII (>= 0x80), back off by 1. The grapheme
// parser may group the last ASCII byte with subsequent non-ASCII bytes,
// such as combining marks.
if i > 0 && i < len(s) && s[i] >= 0x80 {
i--
}
return i
}
// isVS16 checks if the slice matches VS16 (U+FE0F) UTF-8 encoding
// (EF B8 8F). It assumes len(s) >= 3.
func isVS16[T stringish.Interface](s T) bool {
func isVS16[T ~string | []byte](s T) bool {
return s[0] == 0xEF && s[1] == 0xB8 && s[2] == 0x8F
}

View File

@@ -1,2 +0,0 @@
.DS_Store
*.test

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2025 Matt Sherman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,64 +0,0 @@
# stringish
A small Go module that provides a generic type constraint for “string-like”
data, and a utf8 package that works with both strings and byte slices
without conversions.
```go
type Interface interface {
~[]byte | ~string
}
```
[![Go Reference](https://pkg.go.dev/badge/github.com/clipperhouse/stringish/utf8.svg)](https://pkg.go.dev/github.com/clipperhouse/stringish/utf8)
[![Test Status](https://github.com/clipperhouse/stringish/actions/workflows/gotest.yml/badge.svg)](https://github.com/clipperhouse/stringish/actions/workflows/gotest.yml)
## Install
```
go get github.com/clipperhouse/stringish
```
## Examples
```go
import (
"github.com/clipperhouse/stringish"
"github.com/clipperhouse/stringish/utf8"
)
s := "Hello, 世界"
r, size := utf8.DecodeRune(s) // not DecodeRuneInString 🎉
b := []byte("Hello, 世界")
r, size = utf8.DecodeRune(b) // same API!
func MyFoo[T stringish.Interface](s T) T {
// pass a string or a []byte
// iterate, slice, transform, whatever
}
```
## Motivation
Sometimes we want APIs to accept `string` or `[]byte` without having to convert
between those types. That conversion usually allocates!
By implementing with `stringish.Interface`, we can have a single API, and
single implementation for both types: one `Foo` instead of `Foo` and
`FooString`.
We have converted the
[`unicode/utf8` package](https://github.com/clipperhouse/stringish/blob/main/utf8/utf8.go)
as an example -- note the absence of`*InString` funcs. We might look at `x/text`
next.
## Used by
- clipperhouse/uax29: [stringish trie](https://github.com/clipperhouse/uax29/blob/master/graphemes/trie.go#L27), [stringish iterator](https://github.com/clipperhouse/uax29/blob/master/internal/iterators/iterator.go#L9), [stringish SplitFunc](https://github.com/clipperhouse/uax29/blob/master/graphemes/splitfunc.go#L21)
- [clipperhouse/displaywidth](https://github.com/clipperhouse/displaywidth)
## Prior discussion
- [Consideration of similar by the Go team](https://github.com/golang/go/issues/48643)

View File

@@ -1,5 +0,0 @@
package stringish
type Interface interface {
~[]byte | ~string
}

View File

@@ -1,4 +1,4 @@
An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode version 15.0.0.
An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode 17.
[![Documentation](https://pkg.go.dev/badge/github.com/clipperhouse/uax29/v2/graphemes.svg)](https://pkg.go.dev/github.com/clipperhouse/uax29/v2/graphemes)
![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg)
@@ -26,7 +26,7 @@ _A grapheme is a “single visible character”, which might be a simple as a si
## Conformance
We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-26.html#Tests29).
We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-36.html#Tests29).
![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg)
![Fuzz](https://github.com/clipperhouse/uax29/actions/workflows/gofuzz.yml/badge.svg)
@@ -76,15 +76,17 @@ for tokens.Next() { // Next() returns true until end of data
### Benchmarks
On a Mac M2 laptop, we see around 200MB/s, or around 100 million graphemes per second, and no allocations.
```
goos: darwin
goarch: arm64
pkg: github.com/clipperhouse/uax29/graphemes/comparative
cpu: Apple M2
BenchmarkGraphemes/clipperhouse/uax29-8 173805 ns/op 201.16 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemes/rivo/uniseg-8 2045128 ns/op 17.10 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemesMixed/clipperhouse/uax29-8 142635 ns/op 245.12 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemesMixed/rivo/uniseg-8 2018284 ns/op 17.32 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemesASCII/clipperhouse/uax29-8 8846 ns/op 508.73 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemesASCII/rivo/uniseg-8 366760 ns/op 12.27 MB/s 0 B/op 0 allocs/op
```
### Invalid inputs

View File

@@ -0,0 +1,119 @@
package graphemes
// ansiEscapeLength returns the byte length of a valid ANSI escape sequence at the
// start of data, or 0 if none. Input is UTF-8; only 7-bit ESC sequences are
// recognized (C1 0x800x9F can be UTF-8 continuation bytes).
//
// Recognized forms (ECMA-48 / ISO 6429):
// - CSI: ESC [ then parameter bytes (0x300x3F), intermediate (0x200x2F), final (0x400x7E)
// - OSC: ESC ] then payload until ST (ESC \) or BEL (0x07)
// - DCS, SOS, PM, APC: ESC P / X / ^ / _ then payload until ST (ESC \)
// - Two-byte: ESC + Fe (0x400x5F excluding above), or Fp (0x300x3F), or nF (0x200x2F then final)
func ansiEscapeLength[T ~string | ~[]byte](data T) int {
n := len(data)
if n < 2 {
return 0
}
if data[0] != esc {
return 0
}
b1 := data[1]
switch b1 {
case '[': // CSI
body := csiLength(data[2:])
if body == 0 {
return 0
}
return 2 + body
case ']': // OSC allows BEL or ST as terminator
body := oscLength(data[2:])
if body == 0 {
return 0
}
return 2 + body
case 'P', 'X', '^', '_': // DCS, SOS, PM, APC require ST (ESC \) only
body := stSequenceLength(data[2:])
if body == 0 {
return 0
}
return 2 + body
}
if b1 >= 0x40 && b1 <= 0x5F {
// Fe (C1) two-byte; [ ] P X ^ _ handled above
return 2
}
if b1 >= 0x30 && b1 <= 0x3F {
// Fp (private) two-byte
return 2
}
if b1 >= 0x20 && b1 <= 0x2F {
// nF: intermediates then one final (0x300x7E)
i := 2
for i < n && data[i] >= 0x20 && data[i] <= 0x2F {
i++
}
if i < n && data[i] >= 0x30 && data[i] <= 0x7E {
return i + 1
}
return 0
}
return 0
}
// csiLength returns the length of the CSI body (param/intermediate/final bytes).
// data is the slice after "ESC [".
// Per ECMA-48, the CSI body has the form:
//
// parameters (0x300x3F)*, intermediates (0x200x2F)*, final (0x400x7E)
//
// Once an intermediate byte is seen, subsequent parameter bytes are invalid.
func csiLength[T ~string | ~[]byte](data T) int {
seenIntermediate := false
for i := 0; i < len(data); i++ {
b := data[i]
if b >= 0x30 && b <= 0x3F {
if seenIntermediate {
return 0
}
continue
}
if b >= 0x20 && b <= 0x2F {
seenIntermediate = true
continue
}
if b >= 0x40 && b <= 0x7E {
return i + 1
}
return 0
}
return 0
}
// oscLength returns the length of the OSC body up to and including
// the terminator. OSC accepts either BEL (0x07) or ST (ESC \) per
// widespread terminal convention. data is the slice after "ESC ]".
func oscLength[T ~string | ~[]byte](data T) int {
for i := 0; i < len(data); i++ {
b := data[i]
if b == bel {
return i + 1
}
if b == esc && i+1 < len(data) && data[i+1] == '\\' {
return i + 2
}
}
return 0
}
// stSequenceLength returns the length of a control-string body up to and
// including the ST (ESC \) terminator. Used for DCS, SOS, PM, and APC, which
// per ECMA-48 require ST and do not accept BEL. data is the slice after "ESC x".
func stSequenceLength[T ~string | ~[]byte](data T) int {
for i := 0; i < len(data); i++ {
if data[i] == esc && i+1 < len(data) && data[i+1] == '\\' {
return i + 2
}
}
return 0
}

View File

@@ -1,12 +1,35 @@
package graphemes
import (
"github.com/clipperhouse/stringish"
"github.com/clipperhouse/uax29/v2/internal/iterators"
)
import "unicode/utf8"
type Iterator[T stringish.Interface] struct {
*iterators.Iterator[T]
// FromString returns an iterator for the grapheme clusters in the input string.
// Iterate while Next() is true, and access the grapheme via Value().
func FromString(s string) *Iterator[string] {
return &Iterator[string]{
split: splitFuncString,
data: s,
}
}
// FromBytes returns an iterator for the grapheme clusters in the input bytes.
// Iterate while Next() is true, and access the grapheme via Value().
func FromBytes(b []byte) *Iterator[[]byte] {
return &Iterator[[]byte]{
split: splitFuncBytes,
data: b,
}
}
// Iterator is a generic iterator for grapheme clusters in strings or byte slices,
// with an ASCII hot path optimization.
type Iterator[T ~string | ~[]byte] struct {
split func(T, bool) (int, T, error)
data T
pos int
start int
// AnsiEscapeSequences treats ANSI escape sequences (ECMA-48) as single grapheme
// clusters when true. Default is false.
AnsiEscapeSequences bool
}
var (
@@ -14,18 +37,91 @@ var (
splitFuncBytes = splitFunc[[]byte]
)
// FromString returns an iterator for the grapheme clusters in the input string.
// Iterate while Next() is true, and access the grapheme via Value().
func FromString(s string) Iterator[string] {
return Iterator[string]{
iterators.New(splitFuncString, s),
const (
esc = 0x1B
cr = 0x0D
bel = 0x07
)
// Next advances the iterator to the next grapheme cluster.
// Returns false when there are no more grapheme clusters.
func (iter *Iterator[T]) Next() bool {
if iter.pos >= len(iter.data) {
return false
}
iter.start = iter.pos
if iter.AnsiEscapeSequences && iter.data[iter.pos] == esc {
if a := ansiEscapeLength(iter.data[iter.pos:]); a > 0 {
iter.pos += a
return true
}
}
// ASCII hot path: any ASCII is one grapheme when next byte is ASCII or end.
// Fall through on CR so splitfunc can handle CR+LF as a single cluster.
b := iter.data[iter.pos]
if b < utf8.RuneSelf && b != cr {
if iter.pos+1 >= len(iter.data) || iter.data[iter.pos+1] < utf8.RuneSelf {
iter.pos++
return true
}
}
// Fall back to actual grapheme parsing
remaining := iter.data[iter.pos:]
advance, _, err := iter.split(remaining, true)
if err != nil {
panic(err)
}
if advance <= 0 {
panic("splitFunc returned a zero or negative advance")
}
iter.pos += advance
if iter.pos > len(iter.data) {
panic("splitFunc advanced beyond end of data")
}
return true
}
// FromBytes returns an iterator for the grapheme clusters in the input bytes.
// Iterate while Next() is true, and access the grapheme via Value().
func FromBytes(b []byte) Iterator[[]byte] {
return Iterator[[]byte]{
iterators.New(splitFuncBytes, b),
}
// Value returns the current grapheme cluster.
func (iter *Iterator[T]) Value() T {
return iter.data[iter.start:iter.pos]
}
// Start returns the byte position of the current grapheme in the original data.
func (iter *Iterator[T]) Start() int {
return iter.start
}
// End returns the byte position after the current grapheme in the original data.
func (iter *Iterator[T]) End() int {
return iter.pos
}
// Reset resets the iterator to the beginning of the data.
func (iter *Iterator[T]) Reset() {
iter.start = 0
iter.pos = 0
}
// SetText sets the data for the iterator to operate on, and resets all state.
func (iter *Iterator[T]) SetText(data T) {
iter.data = data
iter.start = 0
iter.pos = 0
}
// First returns the first grapheme cluster without advancing the iterator.
func (iter *Iterator[T]) First() T {
if len(iter.data) == 0 {
return iter.data
}
// Use a copy to leverage Next()'s ASCII optimization
cp := *iter
cp.pos = 0
cp.start = 0
cp.Next()
return cp.Value()
}

View File

@@ -2,8 +2,6 @@ package graphemes
import (
"bufio"
"github.com/clipperhouse/stringish"
)
// is determines if lookup intersects propert(ies)
@@ -13,12 +11,22 @@ func (lookup property) is(properties property) bool {
const _Ignore = _Extend
// incbState tracks state for GB9c rule (Indic conjunct clusters)
// Pattern: Consonant (Extend|Linker)* Linker (Extend|Linker)* × Consonant
type incbState int
const (
incbNone incbState = iota // initial/reset
incbConsonant // seen Consonant, awaiting Linker
incbLinker // seen Consonant and Linker (conjunct ready)
)
// SplitFunc is a bufio.SplitFunc implementation of Unicode grapheme cluster segmentation, for use with bufio.Scanner.
//
// See https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries.
var SplitFunc bufio.SplitFunc = splitFunc[[]byte]
func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T, err error) {
func splitFunc[T ~string | ~[]byte](data T, atEOF bool) (advance int, token T, err error) {
var empty T
if len(data) == 0 {
return 0, empty, nil
@@ -30,6 +38,9 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T,
var lastLastExIgnore property = 0 // "last one before that"
var regionalIndicatorCount int
// GB9c state: tracking Indic conjunct clusters
var incb incbState
// Rules are usually of the form Cat1 × Cat2; "current" refers to the first property
// to the right of the ×, from which we look back or forward
@@ -76,6 +87,23 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T,
lastExIgnore = last
}
// Update GB9c state based on what we just advanced past
if last.is(_InCBConsonant | _InCBLinker | _InCBExtend) {
switch {
case last.is(_InCBConsonant):
if incb != incbLinker {
incb = incbConsonant
}
case last.is(_InCBLinker):
if incb >= incbConsonant {
incb = incbLinker
}
// case last.is(_InCBExtend): stay in current state
}
} else {
incb = incbNone
}
current, w = lookup(data[pos:])
if w == 0 {
if atEOF {
@@ -141,11 +169,14 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T,
}
// https://unicode.org/reports/tr29/#GB9c
// TODO(clipperhouse):
// It appears to be added in Unicode 15.1.0:
// https://unicode.org/versions/Unicode15.1.0/#Migration
// This package currently supports Unicode 15.0.0, so
// out of scope for now
// Do not break within certain combinations with Indic_Conjunct_Break (InCB)=Linker.
if incb == incbLinker && current.is(_InCBConsonant) {
// After matching the pattern, reset state to start tracking a new pattern
// The current Consonant becomes the start of the new pattern
incb = incbConsonant
pos += w
continue
}
// https://unicode.org/reports/tr29/#GB11
if current.is(_ExtendedPictographic) && last.is(_ZWJ) && lastLastExIgnore.is(_ExtendedPictographic) {

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,100 +0,0 @@
package iterators
import "github.com/clipperhouse/stringish"
type SplitFunc[T stringish.Interface] func(T, bool) (int, T, error)
// Iterator is a generic iterator for words that are either []byte or string.
// Iterate while Next() is true, and access the word via Value().
type Iterator[T stringish.Interface] struct {
split SplitFunc[T]
data T
start int
pos int
}
// New creates a new Iterator for the given data and SplitFunc.
func New[T stringish.Interface](split SplitFunc[T], data T) *Iterator[T] {
return &Iterator[T]{
split: split,
data: data,
}
}
// SetText sets the text for the iterator to operate on, and resets all state.
func (iter *Iterator[T]) SetText(data T) {
iter.data = data
iter.start = 0
iter.pos = 0
}
// Split sets the SplitFunc for the Iterator.
func (iter *Iterator[T]) Split(split SplitFunc[T]) {
iter.split = split
}
// Next advances the iterator to the next token. It returns false when there
// are no remaining tokens or an error occurred.
func (iter *Iterator[T]) Next() bool {
if iter.pos == len(iter.data) {
return false
}
if iter.pos > len(iter.data) {
panic("SplitFunc advanced beyond the end of the data")
}
iter.start = iter.pos
advance, _, err := iter.split(iter.data[iter.pos:], true)
if err != nil {
panic(err)
}
if advance <= 0 {
panic("SplitFunc returned a zero or negative advance")
}
iter.pos += advance
if iter.pos > len(iter.data) {
panic("SplitFunc advanced beyond the end of the data")
}
return true
}
// Value returns the current token.
func (iter *Iterator[T]) Value() T {
return iter.data[iter.start:iter.pos]
}
// Start returns the byte position of the current token in the original data.
func (iter *Iterator[T]) Start() int {
return iter.start
}
// End returns the byte position after the current token in the original data.
func (iter *Iterator[T]) End() int {
return iter.pos
}
// Reset resets the iterator to the beginning of the data.
func (iter *Iterator[T]) Reset() {
iter.start = 0
iter.pos = 0
}
func (iter *Iterator[T]) First() T {
if len(iter.data) == 0 {
return iter.data
}
advance, _, err := iter.split(iter.data, true)
if err != nil {
panic(err)
}
if advance <= 0 {
panic("SplitFunc returned a zero or negative advance")
}
if advance > len(iter.data) {
panic("SplitFunc advanced beyond the end of the data")
}
return iter.data[:advance]
}

View File

@@ -527,46 +527,34 @@ func (c *Chain) wrapCallable(fn interface{}, args ...interface{}) (func() error,
}
// executeStep runs a single step, applying retries if configured.
// This version is synchronous and avoids the bugs caused by the previous goroutine-based implementation.
func (c *Chain) executeStep(ctx context.Context, step *chainStep) error {
// First, check if the context has already been canceled before starting the step.
// This allows the chain to fail fast.
select {
case <-ctx.Done():
return ctx.Err()
default:
// Context is still active, proceed.
}
// If the step has retry logic configured...
if step.config.retry != nil {
retry := step.config.retry.Transform(WithContext(ctx))
// Wrap step execution to respect context
wrappedFn := func() error {
type result struct {
err error
}
done := make(chan result, 1)
go func() {
done <- result{err: step.execute()}
}()
select {
case res := <-done:
return res.err
case <-ctx.Done():
return ctx.Err()
}
}
return retry.Execute(wrappedFn)
}
// Non-retry case also respects context
type result struct {
err error
}
done := make(chan result, 1)
go func() {
done <- result{err: step.execute()}
}()
select {
case res := <-done:
return res.err
case <-ctx.Done():
return ctx.Err()
// Create a new retry instance that is aware of the chain's context.
// The retry executor will be responsible for checking ctx.Done() between attempts.
retryExecutor := step.config.retry.Transform(WithContext(ctx))
// Execute the step's function directly. The retry mechanism will manage the loop,
// delays, and context cancellation checks. We pass step.execute without any
// extra goroutine wrappers.
return retryExecutor.Execute(step.execute)
}
// For a simple, non-retrying step, execute the function directly and synchronously
// in the current goroutine. This is the simplest, fastest, and most correct approach.
// It ensures that database connections are used and returned to the pool sequentially,
// preventing the deadlock issue.
return step.execute()
}
// enhanceError wraps an error with additional context from the step.

View File

@@ -95,6 +95,10 @@ type contextItem struct {
// context, cause, and metadata like code and category. It is thread-safe and
// supports pooling for performance.
type Error struct {
// Fields used in atomic operations. Place them at the beginning of the
// struct to ensure proper alignment across all architectures.
count uint64 // Occurrence count for tracking frequency.
// Primary fields (frequently accessed).
msg string // The error message displayed by Error().
name string // The error name or type (e.g., "AuthError").
@@ -103,7 +107,6 @@ type Error struct {
// Secondary metadata.
template string // Fallback message template if msg is empty.
category string // Error category (e.g., "network").
count uint64 // Occurrence count for tracking frequency.
code int32 // HTTP-like status code (e.g., 400, 500).
smallCount int32 // Number of items in smallContext.
@@ -172,7 +175,7 @@ func newError() *Error {
//
// err := errors.Empty().With("key", "value").WithCode(400)
func Empty() *Error {
return emptyError
return newError()
}
// Named creates an error with the specified name and captures a stack trace.
@@ -213,10 +216,18 @@ func New(text string) *Error {
// err := errors.Newf("query failed: %w", cause)
// // err.Error() will match fmt.Errorf("query failed: %w", cause).Error()
// // errors.Unwrap(err) == cause
func Newf(format string, args ...interface{}) *Error {
func Newf(f any, args ...interface{}) *Error {
var format string
switch v := f.(type) {
case string:
format = v
case fmt.Stringer:
format = v.String()
default:
panic("Newf: format must be a string or fmt.Stringer")
}
err := newError()
// --- Start: Parsing and Validation (mostly unchanged) ---
var wCount int
var wArgPos = -1
var wArg error
@@ -356,11 +367,10 @@ func Newf(format string, args ...interface{}) *Error {
err.formatWrapped = false
return err
}
// --- End: Parsing and Validation ---
// --- Start: Processing Valid Format String ---
// Start: Processing Valid Format String
if wCount == 1 && wArg != nil {
// --- Handle %w: Simulate for Sprintf and pre-format ---
// Handle %w: Simulate for Sprintf and pre-format
err.cause = wArg // Set the cause for unwrapping
err.formatWrapped = true // Signal that msg is the final formatted string
@@ -397,10 +407,10 @@ func Newf(format string, args ...interface{}) *Error {
// Store the final, fully formatted string, matching fmt.Errorf output
err.msg = result
}
// --- End %w Simulation ---
// End %w Simulation
} else {
// --- No %w or wArg is nil: Format directly (original logic) ---
// No %w or wArg is nil: Format directly (original logic)
result, fmtErr := FmtErrorCheck(format, args...)
if fmtErr != nil {
err.msg = fmt.Sprintf("errors.Newf: formatting error for format %q: %v", format, fmtErr)
@@ -411,7 +421,7 @@ func Newf(format string, args ...interface{}) *Error {
err.formatWrapped = false // Ensure false if no %w was involved
}
}
// --- End: Processing Valid Format String ---
// End: Processing Valid Format String
return err
}
@@ -448,38 +458,6 @@ func FmtErrorCheck(format string, args ...interface{}) (result string, err error
return result, nil
}
// countFmtArgs counts format specifiers that consume arguments in a format string.
// Ignores %% and non-consuming verbs like %n.
// Internal use by Newf for argument validation.
func countFmtArgs(format string) int {
count := 0
runes := []rune(format)
i := 0
for i < len(runes) {
if runes[i] == '%' {
if i+1 < len(runes) && runes[i+1] == '%' {
i += 2 // Skip %%
continue
}
i++ // Move past %
for i < len(runes) && (runes[i] == '+' || runes[i] == '-' || runes[i] == '#' ||
runes[i] == ' ' || runes[i] == '0' ||
(runes[i] >= '1' && runes[i] <= '9') || runes[i] == '.') {
i++
}
if i < len(runes) {
if strings.ContainsRune("vTtbcdoqxXUeEfFgGsp", runes[i]) {
count++
}
i++ // Move past verb
}
} else {
i++
}
}
return count
}
// Std creates a standard error using errors.New for compatibility.
// Does not capture stack traces or add context.
// Example:
@@ -700,8 +678,8 @@ func (e *Error) Error() string {
return e.msg // Return the pre-formatted fmt.Errorf-compatible string
}
// --- Original logic for errors not created via Newf("%w", ...) ---
// --- or errors created via New/Named and then Wrap() called. ---
// Original logic for errors not created via Newf("%w", ...)
// or errors created via New/Named and then Wrap() called.
var buf strings.Builder
// Append primary message part (msg, template, or name)

View File

@@ -430,3 +430,8 @@ func Wrapf(err error, format string, args ...interface{}) *Error {
e.cause = err
return e
}
// Err creates a new Error with the given message and wraps the provided error as its cause.
func Err(msg string, err error) *Error {
return New(msg).Wrap(err)
}

37
vendor/github.com/olekukonko/ll/.goreleaser.yaml generated vendored Normal file
View File

@@ -0,0 +1,37 @@
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
version: 2
project_name: ll
# For a library repo, publish source archives instead of binaries.
source:
enabled: true
name_template: "{{ .ProjectName }}_{{ .Version }}"
# Optional: include/exclude files in the source archive (defaults are usually fine)
# files:
# - README.md
# - LICENSE
# - go.mod
# - go.sum
# - "**/*.go"
# No binaries to build.
builds: []
## Other Information
checksum:
name_template: "checksums.txt"
snapshot:
version_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- "^docs:"
- "^test:"
- "^chore:"
- "^ci:"

99
vendor/github.com/olekukonko/ll/Makefile generated vendored Normal file
View File

@@ -0,0 +1,99 @@
# Git remote for pushing tags
REMOTE ?= origin
# Version for release tagging (required for tag/release targets)
RELEASE_VERSION ?=
# Convenience
GO ?= go
GOLANGCI ?= golangci-lint
GORELEASER?= goreleaser
.PHONY: help \
test race bench fmt tidy lint check \
ensure-clean ensure-release-version tag tag-delete \
release release-dry
help:
@echo "Targets:"
@echo " fmt - gofmt + go fmt"
@echo " tidy - go mod tidy"
@echo " test - go test ./..."
@echo " race - go test -race ./..."
@echo " bench - go test -bench=. ./..."
@echo " lint - golangci-lint run ./... (if installed)"
@echo " check - fmt + tidy + test + race"
@echo ""
@echo "Release targets:"
@echo " tag - Create annotated tag RELEASE_VERSION and push"
@echo " tag-delete - Delete tag RELEASE_VERSION locally + remote"
@echo " release - tag + goreleaser release --clean (if you use goreleaser)"
@echo " release-dry - tag + goreleaser release --clean --skip=publish"
@echo ""
@echo "Usage:"
@echo " make check"
@echo " make tag RELEASE_VERSION=v0.1.2"
@echo " make release RELEASE_VERSION=v0.1.2"
fmt:
@echo "Formatting..."
gofmt -w -s .
$(GO) fmt ./...
tidy:
@echo "Tidying..."
$(GO) mod tidy
test:
@echo "Testing..."
$(GO) test ./... -count=1
race:
@echo "Race testing..."
$(GO) test ./... -race -count=1
bench:
@echo "Bench..."
$(GO) test ./... -bench=. -run=^$$
lint:
@echo "Linting..."
@command -v $(GOLANGCI) >/dev/null 2>&1 || { echo "golangci-lint not found"; exit 1; }
$(GOLANGCI) run ./...
check: fmt tidy test race
# --------------------------
# Release helpers
# --------------------------
ensure-clean:
@echo "Checking git working tree..."
@git diff --quiet || (echo "Error: tracked changes exist. Commit/stash them."; exit 1)
@test -z "$$(git status --porcelain)" || (echo "Error: uncommitted/untracked files:"; git status --porcelain; exit 1)
@echo "OK: working tree clean"
ensure-release-version:
@test -n "$(RELEASE_VERSION)" || (echo "Error: set RELEASE_VERSION, e.g. make tag RELEASE_VERSION=v0.1.2"; exit 1)
tag: ensure-clean ensure-release-version
@if git rev-parse "$(RELEASE_VERSION)" >/dev/null 2>&1; then \
echo "Error: tag $(RELEASE_VERSION) already exists. Bump version."; \
exit 1; \
fi
@echo "Tagging $(RELEASE_VERSION) at HEAD $$(git rev-parse --short HEAD)"
@git tag -a $(RELEASE_VERSION) -m "$(RELEASE_VERSION)"
@git push $(REMOTE) $(RELEASE_VERSION)
tag-delete: ensure-release-version
@echo "Deleting tag $(RELEASE_VERSION) locally + remote..."
@git tag -d $(RELEASE_VERSION) 2>/dev/null || true
@git push $(REMOTE) :refs/tags/$(RELEASE_VERSION) || true
release: tag
@command -v $(GORELEASER) >/dev/null 2>&1 || { echo "goreleaser not found"; exit 1; }
$(GORELEASER) release --clean
release-dry: tag
@command -v $(GORELEASER) >/dev/null 2>&1 || { echo "goreleaser not found"; exit 1; }
$(GORELEASER) release --clean --skip=publish

View File

@@ -1,17 +1,19 @@
# ll - A Modern Structured Logging Library for Go
`ll` is a high-performance, production-ready logging library for Go, designed to provide **hierarchical namespaces**, **structured logging**, **middleware pipelines**, **conditional logging**, and support for multiple output formats, including text, JSON, colorized logs, and compatibility with Gos `slog`. Its ideal for applications requiring fine-grained log control, extensibility, and scalability.
`ll` is a high-performance, production-ready logging library for Go, designed to provide **hierarchical namespaces**, **structured logging**, **middleware pipelines**, **conditional logging**, and support for multiple output formats, including text, JSON, colorized logs, syslog, VictoriaLogs, and compatibility with Go's `slog`. It's ideal for applications requiring fine-grained log control, extensibility, and scalability.
## Key Features
- **Hierarchical Namespaces**: Organize logs with fine-grained control over subsystems (e.g., "app/db").
- **Structured Logging**: Add key-value metadata for machine-readable logs.
- **Middleware Pipeline**: Customize log processing with error-based rejection.
- **Conditional Logging**: Optimize performance by skipping unnecessary log operations.
- **Multiple Output Formats**: Support for text, JSON, colorized logs, and `slog` integration.
- **Debugging Utilities**: Inspect variables (`Dbg`), binary data (`Dump`), and stack traces (`Stack`).
- **Thread-Safe**: Built for concurrent use with mutex-protected state.
- **Performance Optimized**: Minimal allocations and efficient namespace caching.
- **Logging Enabled by Default** - Zero configuration to start logging
- **Hierarchical Namespaces** - Organize logs with fine-grained control over subsystems (e.g., "app/db")
- **Structured Logging** - Add key-value metadata for machine-readable logs
- **Middleware Pipeline** - Customize log processing with rate limiting, sampling, and deduplication
- **Conditional & Error-Based Logging** - Optimize performance with fluent `If`, `IfErr`, `IfAny`, `IfOne` chains
- **Multiple Output Formats** - Text, JSON, colorized ANSI, syslog, VictoriaLogs, and `slog` integration
- **Advanced Debugging Utilities** - Source-aware `Dbg()`, hex/ASCII `Dump()`, private field `Inspect()`, and stack traces
- **Production Ready** - Buffered batching, log rotation, duplicate suppression, and rate limiting
- **Thread-Safe** - Built for high-concurrency with atomic operations, sharded mutexes, and lock-free fast paths
- **Performance Optimized** - Zero allocations for disabled logs, sync.Pool buffers, LRU caching for source files
## Installation
@@ -21,235 +23,267 @@ Install `ll` using Go modules:
go get github.com/olekukonko/ll
```
Ensure you have Go 1.21 or later for optimal compatibility.
## Getting Started
Heres a quick example to start logging with `ll`:
Requires Go 1.21 or later.
## Quick Start
```go
package main
import (
"github.com/olekukonko/ll"
)
import "github.com/olekukonko/ll"
func main() {
// Create a logger with namespace "app"
logger := ll.New("")
// enable output
logger.Enable()
// Basic log
logger.Info("Welcome") // Output: [app] INFO: Application started
logger = logger.Namespace("app")
// Basic log
logger.Info("start at :8080") // Output: [app] INFO: Application started
//Output
//INFO: Welcome
//[app] INFO: start at :8080
}
```
```go
package main
import (
"github.com/olekukonko/ll"
"github.com/olekukonko/ll/lh"
"os"
)
func main() {
// Chaining
logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout))
// Basic log
logger.Info("Application started") // Output: [app] INFO: Application started
// Structured log with fields
logger.Fields("user", "alice", "status", 200).Info("User logged in")
// Output: [app] INFO: User logged in [user=alice status=200]
// Conditional log
debugMode := false
logger.If(debugMode).Debug("Debug info") // No output (debugMode is false)
// Logger is ENABLED by default - no .Enable() needed!
logger := ll.New("app")
// Basic logging - works immediately
logger.Info("Server starting") // Output: [app] INFO: Server starting
logger.Warn("Memory high") // Output: [app] WARN: Memory high
logger.Error("Connection failed") // Output: [app] ERROR: Connection failed
// Structured fields
logger.Fields("user", "alice", "status", 200).Info("Login successful")
// Output: [app] INFO: Login successful [user=alice status=200]
}
```
## Core Features
**That's it. No `.Enable()`, no handlers to configure—it just works.**
### 1. Hierarchical Namespaces
## Core Concepts
Namespaces allow you to organize logs hierarchically, enabling precise control over logging for different parts of your application. This is especially useful for large systems with multiple components.
### 1. Enabled by Default, Configurable When Needed
**Benefits**:
- **Granular Control**: Enable/disable logs for specific subsystems (e.g., "app/db" vs. "app/api").
- **Scalability**: Manage log volume in complex applications.
- **Readability**: Clear namespace paths improve traceability.
Unlike many logging libraries that require explicit enabling, `ll` **logs immediately**. This eliminates boilerplate and reduces the chance of missing logs in production.
**Example**:
```go
logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout))
// This works out of the box:
ll.Info("Service started") // Output: [] INFO: Service started
// Child loggers
dbLogger := logger.Namespace("db")
apiLogger := logger.Namespace("api").Style(lx.NestedPath)
// Namespace control
logger.NamespaceEnable("app/db") // Enable DB logs
logger.NamespaceDisable("app/api") // Disable API logs
dbLogger.Info("Query executed") // Output: [app/db] INFO: Query executed
apiLogger.Info("Request received") // No output
// But you still have full control:
ll.Disable() // Global shutdown
ll.Enable() // Reactivate
```
### 2. Structured Logging
### 2. Hierarchical Namespaces
Add key-value metadata to logs for machine-readable output, making it easier to query and analyze logs in tools like ELK or Grafana.
Organize logs hierarchically with precise control over subsystems:
**Example**:
```go
logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout))
// Create a logger hierarchy
root := ll.New("app")
db := root.Namespace("database")
cache := root.Namespace("cache").Style(lx.NestedPath)
// Variadic fields
logger.Fields("user", "bob", "status", 200).Info("Request completed")
// Output: [app] INFO: Request completed [user=bob status=200]
// Control logging per namespace
root.NamespaceEnable("app/database") // Enable database logs
root.NamespaceDisable("app/cache") // Disable cache logs
db.Info("Connected") // Output: [app/database] INFO: Connected
cache.Info("Hit") // No output (disabled)
```
### 3. Structured Logging with Ordered Fields
Fields maintain insertion order and support fluent chaining:
```go
// Fluent key-value pairs
logger.
Fields("request_id", "req-123").
Fields("user", "alice").
Fields("duration_ms", 42).
Info("Request processed")
// Map-based fields
logger.Field(map[string]interface{}{"method": "GET"}).Info("Request")
// Output: [app] INFO: Request [method=GET]
logger.Field(map[string]interface{}{
"method": "POST",
"path": "/api/users",
}).Debug("API call")
// Persistent context (included in ALL subsequent logs)
logger.AddContext("environment", "production", "version", "1.2.3")
logger.Info("Deployed") // Output: ... [environment=production version=1.2.3]
```
### 3. Middleware Pipeline
### 4. Conditional & Error-Based Logging
Customize log processing with a middleware pipeline. Middleware functions can enrich, filter, or transform logs, using an error-based rejection mechanism (non-nil errors stop logging).
Optimize performance with fluent conditional chains that **completely skip processing** when conditions are false:
**Example**:
```go
logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout))
// Boolean conditions
logger.If(debugMode).Debug("Detailed diagnostics") // No overhead when false
logger.If(featureEnabled).Info("Feature used")
// Enrich logs with app metadata
logger.Use(ll.FuncMiddleware(func(e *lx.Entry) error {
if e.Fields == nil {
e.Fields = make(map[string]interface{})
}
e.Fields["app"] = "myapp"
return nil
}))
// Error conditions
err := db.Query()
logger.IfErr(err).Error("Query failed") // Logs only if err != nil
// Filter low-level logs
logger.Use(ll.FuncMiddleware(func(e *lx.Entry) error {
if e.Level < lx.LevelWarn {
return fmt.Errorf("level too low")
// Multiple conditions - ANY true
logger.IfErrAny(err1, err2, err3).Fatal("System failure")
// Multiple conditions - ALL true
logger.IfErrOne(validateErr, authErr).Error("Both checks failed")
// Chain conditions
logger.
If(debugMode).
IfErr(queryErr).
Fields("query", sql).
Debug("Query debug")
```
**Performance**: When conditions are false, the logger returns immediately with zero allocations.
### 5. Powerful Debugging Toolkit
`ll` includes advanced debugging utilities not found in standard logging libraries:
#### Dbg() - Source-Aware Variable Inspection
Captures both variable name AND value from your source code:
```go
x := 42
user := &User{Name: "Alice"}
ll.Dbg(x, user)
// Output: [file.go:123] x = 42, *user = &{Name:Alice}
```
#### Dump() - Hex/ASCII Binary Inspection
Perfect for protocol debugging and binary data:
```go
ll.Handler(lh.NewColorizedHandler(os.Stdout))
ll.Dump([]byte("hello\nworld"))
// Output: Colorized hex/ASCII dump with offset markers
```
#### Inspect() - Private Field Reflection
Reveals unexported fields, embedded structs, and pointer internals:
```go
type secret struct {
password string // unexported!
}
s := secret{password: "hunter2"}
ll.Inspect(s)
// Output: [file.go:123] INSPECT: {
// "(password)": "hunter2" // Note the parentheses
// }
```
#### Stack() - Configurable Stack Traces
```go
ll.StackSize(8192) // Larger buffer for deep stacks
ll.Stack("Critical failure")
// Output: ERROR: Critical failure [stack=goroutine 1 [running]...]
```
#### Mark() - Execution Flow Tracing
```go
func process() {
ll.Mark() // *MARK*: [file.go:123]
ll.Mark("phase1") // *phase1*: [file.go:124]
// ... work ...
}
```
### 6. Production-Ready Handlers
```go
import (
"github.com/olekukonko/ll"
"github.com/olekukonko/ll/lh"
"github.com/olekukonko/ll/l3rd/syslog"
"github.com/olekukonko/ll/l3rd/victoria"
)
// JSON for structured logging
logger.Handler(lh.NewJSONHandler(os.Stdout))
// Colorized for development
logger.Handler(lh.NewColorizedHandler(os.Stdout,
lh.WithColorTheme("dark"),
lh.WithColorIntensity(lh.IntensityVibrant),
))
// Buffered for high throughput (100 entries or 10 seconds)
buffered := lh.NewBuffered(
lh.NewJSONHandler(os.Stdout),
lh.WithBatchSize(100),
lh.WithFlushInterval(10 * time.Second),
)
logger.Handler(buffered)
defer buffered.Close() // Ensures flush on exit
// Syslog integration
syslogHandler, _ := syslog.New(
syslog.WithTag("myapp"),
syslog.WithFacility(syslog.LOG_LOCAL0),
)
logger.Handler(syslogHandler)
// VictoriaLogs (cloud-native)
victoriaHandler, _ := victoria.New(
victoria.WithURL("http://victoria-logs:9428"),
victoria.WithAppName("payment-service"),
victoria.WithEnvironment("production"),
victoria.WithBatching(200, 5*time.Second),
)
logger.Handler(victoriaHandler)
```
### 7. Middleware Pipeline
Transform, filter, or reject logs with a middleware pipeline:
```go
// Rate limiting - 10 logs per second maximum
rateLimiter := lm.NewRateLimiter(lx.LevelInfo, 10, time.Second)
logger.Use(rateLimiter)
// Sampling - 10% of debug logs
sampler := lm.NewSampling(lx.LevelDebug, 0.1)
logger.Use(sampler)
// Deduplication - suppress identical logs for 2 seconds
deduper := lh.NewDedup(logger.GetHandler(), 2*time.Second)
logger.Handler(deduper)
// Custom middleware
logger.Use(ll.Middle(func(e *lx.Entry) error {
if strings.Contains(e.Message, "password") {
return fmt.Errorf("sensitive information redacted")
}
return nil
}))
logger.Info("Ignored") // No output (filtered)
logger.Warn("Warning") // Output: [app] WARN: Warning [app=myapp]
```
### 4. Conditional Logging
### 8. Global Convenience API
Optimize performance by skipping expensive log operations when conditions are false, ideal for production environments.
Use package-level functions for quick logging without creating loggers:
**Example**:
```go
logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout))
import "github.com/olekukonko/ll"
featureEnabled := true
logger.If(featureEnabled).Fields("action", "update").Info("Feature used")
// Output: [app] INFO: Feature used [action=update]
logger.If(false).Info("Ignored") // No output, no processing
func main() {
ll.Info("Server starting") // Global logger
ll.Fields("port", 8080).Info("Listening")
// Conditional logging at package level
ll.If(simulation).Debug("Test mode")
ll.IfErr(err).Error("Startup failed")
// Debug utilities
ll.Dbg(config)
ll.Dump(requestBody)
ll.Inspect(complexStruct)
}
```
### 5. Multiple Output Formats
## Real-World Examples
`ll` supports various output formats, including human-readable text, colorized logs, JSON, and integration with Gos `slog` package.
**Example**:
```go
logger := ll.New("app").Enable()
// Text output
logger.Handler(lh.NewTextHandler(os.Stdout))
logger.Info("Text log") // Output: [app] INFO: Text log
// JSON output
logger.Handler(lh.NewJSONHandler(os.Stdout, time.RFC3339Nano))
logger.Info("JSON log") // Output: {"timestamp":"...","level":"INFO","message":"JSON log","namespace":"app"}
// Slog integration
slogText := slog.NewTextHandler(os.Stdout, nil)
logger.Handler(lh.NewSlogHandler(slogText))
logger.Info("Slog log") // Output: level=INFO msg="Slog log" namespace=app class=Text
```
### 6. Debugging Utilities
`ll` provides powerful tools for debugging, including variable inspection, binary data dumps, and stack traces.
#### Core Debugging Methods
1. **Dbg - Contextual Inspection**
Inspects variables with file and line context, preserving variable names and handling all Go types.
```go
x := 42
user := struct{ Name string }{"Alice"}
ll.Dbg(x) // Output: [file.go:123] x = 42
ll.Dbg(user) // Output: [file.go:124] user = [Name:Alice]
```
2. **Dump - Binary Inspection**
Displays a hex/ASCII view of data, optimized for strings, bytes, and complex types (with JSON fallback).
```go
ll.Handler(lh.NewColorizedHandler(os.Stdout))
ll.Dump("hello\nworld") // Output: Hex/ASCII dump (see example/dump.png)
```
3. **Stack - Stack Inspection**
Logs a stack trace for debugging critical errors.
```go
ll.Handler(lh.NewColorizedHandler(os.Stdout))
ll.Stack("Critical error") // Output: [app] ERROR: Critical error [stack=...] (see example/stack.png)
```
4**General Output**
Logs a output in structured way for inspection of public & private values.
```go
ll.Handler(lh.NewColorizedHandler(os.Stdout))
ll.Output(&SomeStructWithPrivateValues{})
```
#### Performance Tracking
Measure execution time for performance analysis.
```go
// Automatic measurement
defer ll.Measure(func() { time.Sleep(time.Millisecond) })()
// Output: [app] INFO: function executed [duration=~1ms]
// Explicit benchmarking
start := time.Now()
time.Sleep(time.Millisecond)
ll.Benchmark(start) // Output: [app] INFO: benchmark [start=... end=... duration=...]
```
**Performance Notes**:
- `Dbg` calls are disabled at compile-time when not enabled.
- `Dump` optimizes for primitive types, strings, and bytes with zero-copy paths.
- Stack traces are configurable via `StackSize`.
## Real-World Example: Web Server
A practical example of using `ll` in a web server with structured logging, middleware, and `slog` integration:
### Web Server with Structured Logging
```go
package main
@@ -257,110 +291,127 @@ package main
import (
"github.com/olekukonko/ll"
"github.com/olekukonko/ll/lh"
"log/slog"
"net/http"
"os"
"time"
)
func main() {
// Initialize logger with slog handler
slogHandler := slog.NewJSONHandler(os.Stdout, nil)
logger := ll.New("server").Enable().Handler(lh.NewSlogHandler(slogHandler))
// HTTP child logger
httpLogger := logger.Namespace("http").Style(lx.NestedPath)
// Middleware for request ID
httpLogger.Use(ll.FuncMiddleware(func(e *lx.Entry) error {
if e.Fields == nil {
e.Fields = make(map[string]interface{})
}
e.Fields["request_id"] = "req-" + time.Now().String()
return nil
}))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// Root logger - enabled by default
log := ll.New("server")
// JSON output for production
log.Handler(lh.NewJSONHandler(os.Stdout))
// Request logger with context
http.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) {
reqLog := log.Namespace("http").Fields(
"method", r.Method,
"path", r.URL.Path,
"request_id", r.Header.Get("X-Request-ID"),
)
start := time.Now()
httpLogger.Fields("method", r.Method, "path", r.URL.Path).Info("Request received")
w.Write([]byte("Hello, world!"))
httpLogger.Fields("duration_ms", time.Since(start).Milliseconds()).Info("Request completed")
reqLog.Info("request started")
// ... handle request ...
reqLog.Fields(
"status", 200,
"duration_ms", time.Since(start).Milliseconds(),
).Info("request completed")
})
logger.Info("Starting server on :8080")
log.Info("Server listening on :8080")
http.ListenAndServe(":8080", nil)
}
```
**Sample Output (JSON via slog)**:
```json
{"level":"INFO","msg":"Starting server on :8080","namespace":"server"}
{"level":"INFO","msg":"Request received","namespace":"server/http","class":"Text","method":"GET","path":"/","request_id":"req-..."}
{"level":"INFO","msg":"Request completed","namespace":"server/http","class":"Text","duration_ms":1,"request_id":"req-..."}
### Microservice with VictoriaLogs
```go
package main
import (
"github.com/olekukonko/ll"
"github.com/olekukonko/ll/l3rd/victoria"
)
func main() {
// Production setup
vlHandler, _ := victoria.New(
victoria.WithURL("http://logs.internal:9428"),
victoria.WithAppName("payment-api"),
victoria.WithEnvironment("production"),
victoria.WithVersion("1.2.3"),
victoria.WithBatching(500, 2*time.Second),
victoria.WithRetry(3),
)
defer vlHandler.Close()
logger := ll.New("payment").
Handler(vlHandler).
AddContext("region", "us-east-1")
logger.Info("Payment service initialized")
// Conditional error handling
if err := processPayment(); err != nil {
logger.IfErr(err).
Fields("payment_id", paymentID).
Error("Payment processing failed")
}
}
```
## Performance
`ll` is engineered for high-performance environments:
| Operation | Time/op | Allocations |
|-----------|---------|-------------|
| **Disabled log** | **15.9 ns** | **0 allocs** |
| Simple text log | 176 ns | 2 allocs |
| With 2 fields | 383 ns | 4 allocs |
| JSON output | 1006 ns | 13 allocs |
| Namespace lookup (cached) | 550 ns | 6 allocs |
| Deduplication | 214 ns | 2 allocs |
**Key optimizations**:
- Zero allocations when logs are skipped (conditional, disabled)
- Atomic operations for hot paths
- Sync.Pool for buffer reuse
- LRU cache for source file lines (Dbg)
- Sharded mutexes for deduplication
## Why Choose `ll`?
- **Granular Control**: Hierarchical namespaces for precise log management.
- **Performance**: Conditional logging and optimized concatenation reduce overhead.
- **Extensibility**: Middleware pipeline for custom log processing.
- **Structured Output**: Machine-readable logs with key-value metadata.
- **Flexible Formats**: Text, JSON, colorized, and `slog` support.
- **Debugging Power**: Advanced tools like `Dbg`, `Dump`, and `Stack` for deep inspection.
- **Thread-Safe**: Safe for concurrent use in high-throughput applications.
| Feature | `ll` | `slog` | `zap` | `logrus` |
|---------|------|--------|-------|----------|
| **Enabled by default** | ✅ | ❌ | ❌ | ❌ |
| Hierarchical namespaces | ✅ | ❌ | ❌ | ❌ |
| Conditional logging | ✅ | ❌ | ❌ | ❌ |
| Error-based conditions | ✅ | ❌ | ❌ | ❌ |
| Source-aware Dbg() | ✅ | ❌ | ❌ | ❌ |
| Private field inspection | ✅ | ❌ | ❌ | ❌ |
| Hex/ASCII Dump() | ✅ | ❌ | ❌ | ❌ |
| Middleware pipeline | ✅ | ❌ | ✅ (limited) | ❌ |
| Deduplication | ✅ | ❌ | ❌ | ❌ |
| Rate limiting | ✅ | ❌ | ❌ | ❌ |
| VictoriaLogs support | ✅ | ❌ | ❌ | ❌ |
| Syslog support | ✅ | ❌ | ❌ | ✅ |
| Zero-allocs disabled logs | ✅ | ❌ | ❌ | ❌ |
| Thread-safe | ✅ | ✅ | ✅ | ✅ |
## Comparison with Other Libraries
## Documentation
| Feature | `ll` | `log` (stdlib) | `slog` (stdlib) | `zap` |
|--------------------------|--------------------------|----------------|-----------------|-------------------|
| Hierarchical Namespaces | ✅ | ❌ | ❌ | ❌ |
| Structured Logging | ✅ (Fields, Context) | ❌ | ✅ | ✅ |
| Middleware Pipeline | ✅ | ❌ | ❌ | ✅ (limited) |
| Conditional Logging | ✅ (If, IfOne, IfAny) | ❌ | ❌ | ❌ |
| Slog Compatibility | ✅ | ❌ | ✅ (native) | ❌ |
| Debugging (Dbg, Dump) | ✅ | ❌ | ❌ | ❌ |
| Performance (disabled logs) | High (conditional) | Low | Medium | High |
| Output Formats | Text, JSON, Color, Slog | Text | Text, JSON | JSON, Text |
## Benchmarks
`ll` is optimized for performance, particularly for disabled logs and structured logging:
- **Disabled Logs**: 30% faster than `slog` due to efficient conditional checks.
- **Structured Logging**: 2x faster than `log` with minimal allocations.
- **Namespace Caching**: Reduces overhead for hierarchical lookups.
See `ll_bench_test.go` for detailed benchmarks on namespace creation, cloning, and field building.
## Testing and Stability
The `ll` library includes a comprehensive test suite (`ll_test.go`) covering:
- Logger configuration, namespaces, and conditional logging.
- Middleware, rate limiting, and sampling.
- Handler output formats (text, JSON, slog).
- Debugging utilities (`Dbg`, `Dump`, `Stack`).
Recent improvements:
- Fixed sampling middleware for reliable behavior at edge cases (0.0 and 1.0 rates).
- Enhanced documentation across `conditional.go`, `field.go`, `global.go`, `ll.go`, `lx.go`, and `ns.go`.
- Added `slog` compatibility via `lh.SlogHandler`.
- [GoDoc](https://pkg.go.dev/github.com/olekukonko/ll) - Full API documentation
- [Examples](_example/) - Runable example code
- [Benchmarks](tests/ll_bench_test.go) - Performance benchmarks
## Contributing
Contributions are welcome! To contribute:
1. Fork the repository: `github.com/olekukonko/ll`.
2. Create a feature branch: `git checkout -b feature/your-feature`.
3. Commit changes: `git commit -m "Add your feature"`.
4. Push to the branch: `git push origin feature/your-feature`.
5. Open a pull request with a clear description.
Please include tests in `ll_test.go` and update documentation as needed. Follow the Go coding style and run `go test ./...` before submitting.
Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
## License
`ll` is licensed under the MIT License. See [LICENSE](LICENSE) for details.
## Resources
- **Source Code**: [github.com/olekukonko/ll](https://github.com/olekukonko/ll)
- **Issue Tracker**: [github.com/olekukonko/ll/issues](https://github.com/olekukonko/ll/issues)
- **GoDoc**: [pkg.go.dev/github.com/olekukonko/ll](https://pkg.go.dev/github.com/olekukonko/ll)
MIT License - see [LICENSE](LICENSE) for details.

7
vendor/github.com/olekukonko/ll/comb.hcl generated vendored Normal file
View File

@@ -0,0 +1,7 @@
recursive = true
output_file = "all.txt"
extensions = [".go"]
exclude_dirs = ["_examples", "_lab", "_tmp", "pkg", "lab","bin","dist","assets","oppor"]
exclude_files = [""]
use_gitignore = true
detailed = true

View File

@@ -12,7 +12,7 @@ type Conditional struct {
// If creates a conditional logger that logs only if the condition is true.
// It returns a Conditional struct that wraps the logger, enabling conditional logging methods.
// This method is typically called on a Logger instance to start a conditional chain.
// Thread-safe via the underlying loggers mutex.
// Thread-safe via the underlying logger's mutex.
// Example:
//
// logger := New("app").Enable()
@@ -22,27 +22,6 @@ func (l *Logger) If(condition bool) *Conditional {
return &Conditional{logger: l, condition: condition}
}
// IfOne creates a conditional logger that logs only if all conditions are true.
// It evaluates a variadic list of boolean conditions, setting the condition to true only if
// all are true (logical AND). Returns a new Conditional with the result. Thread-safe via the
// underlying logger.
// Example:
//
// logger := New("app").Enable()
// logger.IfOne(true, true).Info("Logged") // Output: [app] INFO: Logged
// logger.IfOne(true, false).Info("Ignored") // No output
func (cl *Conditional) IfOne(conditions ...bool) *Conditional {
result := true
// Check each condition; set result to false if any is false
for _, cond := range conditions {
if !cond {
result = false
break
}
}
return &Conditional{logger: cl.logger, condition: result}
}
// IfAny creates a conditional logger that logs only if at least one condition is true.
// It evaluates a variadic list of boolean conditions, setting the condition to true if any
// is true (logical OR). Returns a new Conditional with the result. Thread-safe via the
@@ -64,79 +43,117 @@ func (cl *Conditional) IfAny(conditions ...bool) *Conditional {
return &Conditional{logger: cl.logger, condition: result}
}
// Fields starts a fluent chain for adding fields using variadic key-value pairs, if the condition is true.
// It returns a FieldBuilder to attach fields, skipping field processing if the condition is false
// to optimize performance. Thread-safe via the FieldBuilders logger.
// IfErr creates a conditional logger that logs only if the error is non-nil.
// It's designed for the common pattern of checking errors before logging.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Fields("user", "alice").Info("Logged") // Output: [app] INFO: Logged [user=alice]
// logger.If(false).Fields("user", "alice").Info("Ignored") // No output, no field processing
func (cl *Conditional) Fields(pairs ...any) *FieldBuilder {
// Skip field processing if condition is false
if !cl.condition {
return &FieldBuilder{logger: cl.logger, fields: nil}
}
// Delegate to loggers Fields method
return cl.logger.Fields(pairs...)
// err := doSomething()
// logger.IfErr(err).Error("Operation failed") // Only logs if err != nil
func (l *Logger) IfErr(err error) *Conditional {
return l.If(err != nil)
}
// Field starts a fluent chain for adding fields from a map, if the condition is true.
// It returns a FieldBuilder to attach fields from a map, skipping processing if the condition
// is false. Thread-safe via the FieldBuilders logger.
// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil.
// It evaluates a variadic list of errors, setting the condition to true if any
// is non-nil (logical OR). Useful when any error should trigger logging.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Field(map[string]interface{}{"user": "alice"}).Info("Logged") // Output: [app] INFO: Logged [user=alice]
// logger.If(false).Field(map[string]interface{}{"user": "alice"}).Info("Ignored") // No output
func (cl *Conditional) Field(fields map[string]interface{}) *FieldBuilder {
// Skip field processing if condition is false
if !cl.condition {
return &FieldBuilder{logger: cl.logger, fields: nil}
// err1 := validate(input)
// err2 := authorize(user)
// logger.IfErrAny(err1, err2).Error("Either check failed") // Logs if EITHER error exists
func (l *Logger) IfErrAny(errs ...error) *Conditional {
for _, err := range errs {
if err != nil {
return l.If(true) // Any non-nil error makes it true
}
}
// Delegate to loggers Field method
return cl.logger.Field(fields)
return l.If(false) // False only if all errors are nil
}
// Info logs a message at Info level with variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the loggers Info method if the
// condition is true. Skips processing if false, optimizing performance. Thread-safe via the
// loggers log method.
// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil.
// It evaluates a variadic list of errors, setting the condition to true only if
// all are non-nil (logical AND). Useful when you need all errors to be present.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Info("Action", "started") // Output: [app] INFO: Action started
// logger.If(false).Info("Action", "ignored") // No output
func (cl *Conditional) Info(args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
// err1 := validate(input)
// err2 := authorize(user)
// logger.IfErrOne(err1, err2).Error("Both checks failed") // Logs only if BOTH errors exist
func (l *Logger) IfErrOne(errs ...error) *Conditional {
for _, err := range errs {
if err == nil {
return l.If(false) // Any nil error makes it false
}
}
// Delegate to loggers Info method
cl.logger.Info(args...)
return l.If(len(errs) > 0) // True only if we have at least one error and all are non-nil
}
// Infof logs a message at Info level with a format string if the condition is true.
// It formats the message using the provided format string and arguments, delegating to the
// loggers Infof method if the condition is true. Skips processing if false, optimizing performance.
// Thread-safe via the loggers log method.
// IfErr creates a conditional logger that logs only if the error is non-nil.
// Returns a new Conditional with the error check result.
// Example:
//
// err := doSomething()
// logger.If(true).IfErr(err).Error("Failed") // Only logs if condition true AND err != nil
func (cl *Conditional) IfErr(err error) *Conditional {
return cl.IfOne(err != nil)
}
// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil.
// Returns a new Conditional with the logical OR result of error checks.
// Example:
//
// err1 := validate(input)
// err2 := authorize(user)
// logger.If(true).IfErrAny(err1, err2).Error("Either failed") // Logs if condition true AND either error exists
func (cl *Conditional) IfErrAny(errs ...error) *Conditional {
for _, err := range errs {
if err != nil {
return &Conditional{logger: cl.logger, condition: cl.condition && true}
}
}
return &Conditional{logger: cl.logger, condition: false}
}
// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil.
// Returns a new Conditional with the logical AND result of error checks.
// Example:
//
// err1 := validate(input)
// err2 := authorize(user)
// logger.If(true).IfErrOne(err1, err2).Error("Both failed") // Logs if condition true AND both errors exist
func (cl *Conditional) IfErrOne(errs ...error) *Conditional {
for _, err := range errs {
if err == nil {
return &Conditional{logger: cl.logger, condition: false}
}
}
return &Conditional{logger: cl.logger, condition: cl.condition && len(errs) > 0}
}
// IfOne creates a conditional logger that logs only if all conditions are true.
// It evaluates a variadic list of boolean conditions, setting the condition to true only if
// all are true (logical AND). Returns a new Conditional with the result. Thread-safe via the
// underlying logger.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Infof("Action %s", "started") // Output: [app] INFO: Action started
// logger.If(false).Infof("Action %s", "ignored") // No output
func (cl *Conditional) Infof(format string, args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
// logger.IfOne(true, true).Info("Logged") // Output: [app] INFO: Logged
// logger.IfOne(true, false).Info("Ignored") // No output
func (cl *Conditional) IfOne(conditions ...bool) *Conditional {
result := true
// Check each condition; set result to false if any is false
for _, cond := range conditions {
if !cond {
result = false
break
}
}
// Delegate to loggers Infof method
cl.logger.Infof(format, args...)
return &Conditional{logger: cl.logger, condition: result}
}
// Debug logs a message at Debug level with variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the loggers Debug method if the
// condition is true. Skips processing if false. Thread-safe via the loggers log method.
// It concatenates the arguments with spaces and delegates to the logger's Debug method if the
// condition is true. Skips processing if false, optimizing performance. Thread-safe via the
// logger's log method.
// Example:
//
// logger := New("app").Enable().Level(lx.LevelDebug)
@@ -147,13 +164,13 @@ func (cl *Conditional) Debug(args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Debug method
// Delegate to logger's Debug method
cl.logger.Debug(args...)
}
// Debugf logs a message at Debug level with a format string if the condition is true.
// It formats the message and delegates to the loggers Debugf method if the condition is true.
// Skips processing if false. Thread-safe via the loggers log method.
// It formats the message and delegates to the logger's Debugf method if the condition is true.
// Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable().Level(lx.LevelDebug)
@@ -164,47 +181,13 @@ func (cl *Conditional) Debugf(format string, args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Debugf method
// Delegate to logger's Debugf method
cl.logger.Debugf(format, args...)
}
// Warn logs a message at Warn level with variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the loggers Warn method if the
// condition is true. Skips processing if false. Thread-safe via the loggers log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Warn("Warning", "issued") // Output: [app] WARN: Warning issued
// logger.If(false).Warn("Warning", "ignored") // No output
func (cl *Conditional) Warn(args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to loggers Warn method
cl.logger.Warn(args...)
}
// Warnf logs a message at Warn level with a format string if the condition is true.
// It formats the message and delegates to the loggers Warnf method if the condition is true.
// Skips processing if false. Thread-safe via the loggers log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Warnf("Warning %s", "issued") // Output: [app] WARN: Warning issued
// logger.If(false).Warnf("Warning %s", "ignored") // No output
func (cl *Conditional) Warnf(format string, args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to loggers Warnf method
cl.logger.Warnf(format, args...)
}
// Error logs a message at Error level with variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the loggers Error method if the
// condition is true. Skips processing if false. Thread-safe via the loggers log method.
// It concatenates the arguments with spaces and delegates to the logger's Error method if the
// condition is true. Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
@@ -215,13 +198,13 @@ func (cl *Conditional) Error(args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Error method
// Delegate to logger's Error method
cl.logger.Error(args...)
}
// Errorf logs a message at Error level with a format string if the condition is true.
// It formats the message and delegates to the loggers Errorf method if the condition is true.
// Skips processing if false. Thread-safe via the loggers log method.
// It formats the message and delegates to the logger's Errorf method if the condition is true.
// Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
@@ -232,48 +215,14 @@ func (cl *Conditional) Errorf(format string, args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Errorf method
// Delegate to logger's Errorf method
cl.logger.Errorf(format, args...)
}
// Stack logs a message at Error level with a stack trace and variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the loggers Stack method if the
// condition is true. Skips processing if false. Thread-safe via the loggers log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Stack("Critical", "error") // Output: [app] ERROR: Critical error [stack=...]
// logger.If(false).Stack("Critical", "ignored") // No output
func (cl *Conditional) Stack(args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to loggers Stack method
cl.logger.Stack(args...)
}
// Stackf logs a message at Error level with a stack trace and a format string if the condition is true.
// It formats the message and delegates to the loggers Stackf method if the condition is true.
// Skips processing if false. Thread-safe via the loggers log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [stack=...]
// logger.If(false).Stackf("Critical %s", "ignored") // No output
func (cl *Conditional) Stackf(format string, args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to loggers Stackf method
cl.logger.Stackf(format, args...)
}
// Fatal logs a message at Error level with a stack trace and variadic arguments if the condition is true,
// then exits. It concatenates the arguments with spaces and delegates to the loggers Fatal method
// then exits. It concatenates the arguments with spaces and delegates to the logger's Fatal method
// if the condition is true, terminating the program with exit code 1. Skips processing if false.
// Thread-safe via the loggers log method.
// Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
@@ -284,13 +233,13 @@ func (cl *Conditional) Fatal(args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Fatal method
// Delegate to logger's Fatal method
cl.logger.Fatal(args...)
}
// Fatalf logs a formatted message at Error level with a stack trace if the condition is true, then exits.
// It formats the message and delegates to the loggers Fatalf method if the condition is true,
// terminating the program with exit code 1. Skips processing if false. Thread-safe via the loggers log method.
// It formats the message and delegates to the logger's Fatalf method if the condition is true,
// terminating the program with exit code 1. Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
@@ -301,13 +250,83 @@ func (cl *Conditional) Fatalf(format string, args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Fatalf method
// Delegate to logger's Fatalf method
cl.logger.Fatalf(format, args...)
}
// Field starts a fluent chain for adding fields from a map, if the condition is true.
// It returns a FieldBuilder to attach fields from a map, skipping processing if the condition
// is false. Thread-safe via the FieldBuilder's logger.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Field(map[string]interface{}{"user": "alice"}).Info("Logged") // Output: [app] INFO: Logged [user=alice]
// logger.If(false).Field(map[string]interface{}{"user": "alice"}).Info("Ignored") // No output
func (cl *Conditional) Field(fields map[string]interface{}) *FieldBuilder {
// Skip field processing if condition is false
if !cl.condition {
return &FieldBuilder{logger: cl.logger, fields: nil}
}
// Delegate to logger's Field method
return cl.logger.Field(fields)
}
// Fields starts a fluent chain for adding fields using variadic key-value pairs, if the condition is true.
// It returns a FieldBuilder to attach fields, skipping field processing if the condition is false
// to optimize performance. Thread-safe via the FieldBuilder's logger.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Fields("user", "alice").Info("Logged") // Output: [app] INFO: Logged [user=alice]
// logger.If(false).Fields("user", "alice").Info("Ignored") // No output, no field processing
func (cl *Conditional) Fields(pairs ...any) *FieldBuilder {
// Skip field processing if condition is false
if !cl.condition {
return &FieldBuilder{logger: cl.logger, fields: nil}
}
// Delegate to logger's Fields method
return cl.logger.Fields(pairs...)
}
// Info logs a message at Info level with variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the logger's Info method if the
// condition is true. Skips processing if false, optimizing performance. Thread-safe via the
// logger's log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Info("Action", "started") // Output: [app] INFO: Action started
// logger.If(false).Info("Action", "ignored") // No output
func (cl *Conditional) Info(args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to logger's Info method
cl.logger.Info(args...)
}
// Infof logs a message at Info level with a format string if the condition is true.
// It formats the message using the provided format string and arguments, delegating to the
// logger's Infof method if the condition is true. Skips processing if false, optimizing performance.
// Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Infof("Action %s", "started") // Output: [app] INFO: Action started
// logger.If(false).Infof("Action %s", "ignored") // No output
func (cl *Conditional) Infof(format string, args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to logger's Infof method
cl.logger.Infof(format, args...)
}
// Panic logs a message at Error level with a stack trace and variadic arguments if the condition is true,
// then panics. It concatenates the arguments with spaces and delegates to the loggers Panic method
// if the condition is true, triggering a panic. Skips processing if false. Thread-safe via the loggers log method.
// then panics. It concatenates the arguments with spaces and delegates to the logger's Panic method
// if the condition is true, triggering a panic. Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
@@ -318,13 +337,13 @@ func (cl *Conditional) Panic(args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Panic method
// Delegate to logger's Panic method
cl.logger.Panic(args...)
}
// Panicf logs a formatted message at Error level with a stack trace if the condition is true, then panics.
// It formats the message and delegates to the loggers Panicf method if the condition is true,
// triggering a panic. Skips processing if false. Thread-safe via the loggers log method.
// It formats the message and delegates to the logger's Panicf method if the condition is true,
// triggering a panic. Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
@@ -335,6 +354,74 @@ func (cl *Conditional) Panicf(format string, args ...any) {
if !cl.condition {
return
}
// Delegate to loggers Panicf method
// Delegate to logger's Panicf method
cl.logger.Panicf(format, args...)
}
// Stack logs a message at Error level with a stack trace and variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the logger's Stack method if the
// condition is true. Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Stack("Critical", "error") // Output: [app] ERROR: Critical error [stack=...]
// logger.If(false).Stack("Critical", "ignored") // No output
func (cl *Conditional) Stack(args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to logger's Stack method
cl.logger.Stack(args...)
}
// Stackf logs a message at Error level with a stack trace and a format string if the condition is true.
// It formats the message and delegates to the logger's Stackf method if the condition is true.
// Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [stack=...]
// logger.If(false).Stackf("Critical %s", "ignored") // No output
func (cl *Conditional) Stackf(format string, args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to logger's Stackf method
cl.logger.Stackf(format, args...)
}
// Warn logs a message at Warn level with variadic arguments if the condition is true.
// It concatenates the arguments with spaces and delegates to the logger's Warn method if the
// condition is true. Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Warn("Warning", "issued") // Output: [app] WARN: Warning issued
// logger.If(false).Warn("Warning", "ignored") // No output
func (cl *Conditional) Warn(args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to logger's Warn method
cl.logger.Warn(args...)
}
// Warnf logs a message at Warn level with a format string if the condition is true.
// It formats the message and delegates to the logger's Warnf method if the condition is true.
// Skips processing if false. Thread-safe via the logger's log method.
// Example:
//
// logger := New("app").Enable()
// logger.If(true).Warnf("Warning %s", "issued") // Output: [app] WARN: Warning issued
// logger.If(false).Warnf("Warning %s", "ignored") // No output
func (cl *Conditional) Warnf(format string, args ...any) {
// Skip logging if condition is false
if !cl.condition {
return
}
// Delegate to logger's Warnf method
cl.logger.Warnf(format, args...)
}

282
vendor/github.com/olekukonko/ll/dbg.go generated vendored Normal file
View File

@@ -0,0 +1,282 @@
package ll
import (
"container/list"
"fmt"
"os"
"runtime"
"strings"
"sync"
"github.com/olekukonko/ll/lx"
)
// -----------------------------------------------------------------------------
// Global Cache Instance
// -----------------------------------------------------------------------------
// sourceCache caches up to 128 source files using LRU eviction.
var sourceCache = newFileLRU(128)
// -----------------------------------------------------------------------------
// File-Level LRU Cache
// -----------------------------------------------------------------------------
type fileLRU struct {
capacity int
mu sync.Mutex
list *list.List
items map[string]*list.Element
}
type fileItem struct {
key string
lines []string
}
func newFileLRU(capacity int) *fileLRU {
if capacity <= 0 {
capacity = 1
}
return &fileLRU{
capacity: capacity,
list: list.New(),
items: make(map[string]*list.Element, capacity),
}
}
// getLine retrieves a specific 1-indexed line from a file.
func (c *fileLRU) getLine(file string, line int) (string, bool) {
c.mu.Lock()
defer c.mu.Unlock()
// 1. Cache Hit
if elem, ok := c.items[file]; ok {
c.list.MoveToFront(elem)
item := elem.Value.(*fileItem)
if item.lines == nil {
return "", false
}
return nthLine(item.lines, line)
}
// 2. Cache Miss - Read File
// Release lock during I/O to avoid blocking other loggers
c.mu.Unlock()
data, err := os.ReadFile(file)
c.mu.Lock()
// 3. Double-check (another goroutine might have loaded it while unlocked)
if elem, ok := c.items[file]; ok {
c.list.MoveToFront(elem)
item := elem.Value.(*fileItem)
if item.lines == nil {
return "", false
}
return nthLine(item.lines, line)
}
var lines []string
if err == nil {
lines = strings.Split(string(data), "\n")
}
// 4. Store (Positive or Negative Cache)
item := &fileItem{
key: file,
lines: lines,
}
elem := c.list.PushFront(item)
c.items[file] = elem
// 5. Evict if needed
if c.list.Len() > c.capacity {
old := c.list.Back()
if old != nil {
c.list.Remove(old)
delete(c.items, old.Value.(*fileItem).key)
}
}
if lines == nil {
return "", false
}
return nthLine(lines, line)
}
// nthLine returns the 1-indexed line from slice.
func nthLine(lines []string, n int) (string, bool) {
if n <= 0 || n > len(lines) {
return "", false
}
return strings.TrimSuffix(lines[n-1], "\r"), true
}
// -----------------------------------------------------------------------------
// Logger Debug Implementation
// -----------------------------------------------------------------------------
// Dbg logs debug information including source file, line number,
// and the best-effort extracted expression.
//
// Example:
//
// x := 42
// logger.Dbg("val", x)
// Output: [file.go:123] "val" = "val", x = 42
func (l *Logger) Dbg(values ...interface{}) {
if !l.shouldLog(lx.LevelInfo) {
return
}
l.dbg(2, values...)
}
func (l *Logger) dbg(skip int, values ...interface{}) {
file, line, ok := callerFrame(skip)
if !ok {
// Fallback if we can't get frame
var sb strings.Builder
sb.WriteString("[?:?] ")
for i, v := range values {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(fmt.Sprintf("%+v", v))
}
l.log(lx.LevelInfo, lx.ClassText, sb.String(), nil, false)
return
}
shortFile := file
if idx := strings.LastIndex(file, "/"); idx >= 0 {
shortFile = file[idx+1:]
}
srcLine, hit := sourceCache.getLine(file, line)
var expr string
if hit && srcLine != "" {
// Attempt to extract the text inside Dbg(...)
if a := strings.Index(srcLine, "Dbg("); a >= 0 {
rest := srcLine[a+len("Dbg("):]
if b := strings.LastIndex(rest, ")"); b >= 0 {
expr = strings.TrimSpace(rest[:b])
}
} else {
// Fallback: extract first (...) group if Dbg isn't explicit prefix
a := strings.Index(srcLine, "(")
b := strings.LastIndex(srcLine, ")")
if a >= 0 && b > a {
expr = strings.TrimSpace(srcLine[a+1 : b])
}
}
}
// Format output
var outBuilder strings.Builder
outBuilder.WriteString(fmt.Sprintf("[%s:%d] ", shortFile, line))
// Attempt to split expressions to map 1:1 with values
var parts []string
if expr != "" {
parts = splitExpressions(expr)
}
// If the number of extracted expressions matches the number of values,
// print them as "expr = value". Otherwise, fall back to "expr = val1, val2".
if len(parts) == len(values) {
for i, v := range values {
if i > 0 {
outBuilder.WriteString(", ")
}
outBuilder.WriteString(fmt.Sprintf("%s = %+v", parts[i], v))
}
} else {
if expr != "" {
outBuilder.WriteString(expr)
outBuilder.WriteString(" = ")
}
for i, v := range values {
if i > 0 {
outBuilder.WriteString(", ")
}
outBuilder.WriteString(fmt.Sprintf("%+v", v))
}
}
l.log(lx.LevelInfo, lx.ClassDbg, outBuilder.String(), nil, false)
}
// splitExpressions splits a comma-separated string of expressions,
// respecting nested parentheses, brackets, braces, and quotes.
// Example: "a, fn(b, c), d" -> ["a", "fn(b, c)", "d"]
func splitExpressions(s string) []string {
var parts []string
var current strings.Builder
depth := 0 // Tracks nested (), [], {}
inQuote := false // Tracks string literals
var quoteChar rune
for _, r := range s {
switch {
case inQuote:
current.WriteRune(r)
if r == quoteChar {
// We rely on the fact that valid Go source won't have unescaped quotes easily
// accessible here without complex parsing, but for simple Dbg calls this suffices.
// A robust parser handles `\"`, but simple state toggling covers 99% of debug cases.
inQuote = false
}
case r == '"' || r == '\'':
inQuote = true
quoteChar = r
current.WriteRune(r)
case r == '(' || r == '{' || r == '[':
depth++
current.WriteRune(r)
case r == ')' || r == '}' || r == ']':
depth--
current.WriteRune(r)
case r == ',' && depth == 0:
// Split point
parts = append(parts, strings.TrimSpace(current.String()))
current.Reset()
default:
current.WriteRune(r)
}
}
if current.Len() > 0 {
parts = append(parts, strings.TrimSpace(current.String()))
}
return parts
}
// -----------------------------------------------------------------------------
// Caller Resolution
// -----------------------------------------------------------------------------
// callerFrame walks stack frames until it finds the first frame
// outside the ll package.
func callerFrame(skip int) (file string, line int, ok bool) {
// +2 to skip callerFrame + dbg itself.
pcs := make([]uintptr, 32)
n := runtime.Callers(skip+2, pcs)
if n == 0 {
return "", 0, false
}
frames := runtime.CallersFrames(pcs[:n])
for {
fr, more := frames.Next()
// fr.Function looks like: "github.com/you/mod/ll.(*Logger).Dbg"
// We want the first frame that is NOT inside package ll.
if fr.Function == "" || !strings.Contains(fr.Function, "/ll.") && !strings.Contains(fr.Function, ".ll.") {
return fr.File, fr.Line, true
}
if !more {
// Fallback: return the last frame we saw
return fr.File, fr.Line, fr.File != ""
}
}
}

View File

@@ -1,11 +1,13 @@
// field.go
package ll
import (
"fmt"
"github.com/olekukonko/cat"
"github.com/olekukonko/ll/lx"
"os"
"strings"
"github.com/olekukonko/cat"
"github.com/olekukonko/ll/lx"
)
// FieldBuilder enables fluent addition of fields before logging.
@@ -13,12 +15,12 @@ import (
// supporting structured logging with metadata. The builder allows chaining to add fields
// and log messages at various levels (Info, Debug, Warn, Error, etc.) in a single expression.
type FieldBuilder struct {
logger *Logger // Associated logger instance for logging operations
fields map[string]interface{} // Fields to include in the log entry as key-value pairs
logger *Logger // Associated logger instance for logging operations
fields lx.Fields // Fields to include in the log entry as ordered key-value pairs
}
// Logger creates a new logger with the builders fields embedded in its context.
// It clones the parent logger and copies the builders fields into the new loggers context,
// Logger creates a new logger with the builder's fields embedded in its context.
// It clones the parent logger and copies the builder's fields into the new logger's context,
// enabling persistent field inclusion in subsequent logs. This method supports fluent chaining
// after Fields or Field calls.
// Example:
@@ -29,17 +31,14 @@ type FieldBuilder struct {
func (fb *FieldBuilder) Logger() *Logger {
// Clone the parent logger to preserve its configuration
newLogger := fb.logger.Clone()
// Initialize a new context map to avoid modifying the parents context
newLogger.context = make(map[string]interface{})
// Copy builders fields into the new loggers context
for k, v := range fb.fields {
newLogger.context[k] = v
}
// Copy builder's fields into the new logger's context
newLogger.context = make(lx.Fields, len(fb.fields))
copy(newLogger.context, fb.fields)
return newLogger
}
// Info logs a message at Info level with the builders fields.
// It concatenates the arguments with spaces and delegates to the loggers log method,
// Info logs a message at Info level with the builder's fields.
// It concatenates the arguments with spaces and delegates to the logger's log method,
// returning early if fields are nil. This method is used for informational messages.
// Example:
//
@@ -50,13 +49,13 @@ func (fb *FieldBuilder) Info(args ...any) {
if fb.fields == nil {
return
}
// Log at Info level with the builders fields, no stack trace
// Log at Info level with the builder's fields, no stack trace
fb.logger.log(lx.LevelInfo, lx.ClassText, cat.Space(args...), fb.fields, false)
}
// Infof logs a message at Info level with the builders fields.
// Infof logs a message at Info level with the builder's fields.
// It formats the message using the provided format string and arguments, then delegates
// to the loggers internal log method. If fields are nil, it returns early to avoid logging.
// to the logger's internal log method. If fields are nil, it returns early to avoid logging.
// This method is part of the fluent API, typically called after adding fields.
// Example:
//
@@ -69,12 +68,12 @@ func (fb *FieldBuilder) Infof(format string, args ...any) {
}
// Format the message using the provided arguments
msg := fmt.Sprintf(format, args...)
// Log at Info level with the builders fields, no stack trace
// Log at Info level with the builder's fields, no stack trace
fb.logger.log(lx.LevelInfo, lx.ClassText, msg, fb.fields, false)
}
// Debug logs a message at Debug level with the builders fields.
// It concatenates the arguments with spaces and delegates to the loggers log method,
// Debug logs a message at Debug level with the builder's fields.
// It concatenates the arguments with spaces and delegates to the logger's log method,
// returning early if fields are nil. This method is used for debugging information.
// Example:
//
@@ -85,12 +84,12 @@ func (fb *FieldBuilder) Debug(args ...any) {
if fb.fields == nil {
return
}
// Log at Debug level with the builders fields, no stack trace
// Log at Debug level with the builder's fields, no stack trace
fb.logger.log(lx.LevelDebug, lx.ClassText, cat.Space(args...), fb.fields, false)
}
// Debugf logs a message at Debug level with the builders fields.
// It formats the message and delegates to the loggers log method, returning early if
// Debugf logs a message at Debug level with the builder's fields.
// It formats the message and delegates to the logger's log method, returning early if
// fields are nil. This method is used for debugging information that may be disabled in
// production environments.
// Example:
@@ -104,12 +103,12 @@ func (fb *FieldBuilder) Debugf(format string, args ...any) {
}
// Format the message
msg := fmt.Sprintf(format, args...)
// Log at Debug level with the builders fields, no stack trace
// Log at Debug level with the builder's fields, no stack trace
fb.logger.log(lx.LevelDebug, lx.ClassText, msg, fb.fields, false)
}
// Warn logs a message at Warn level with the builders fields.
// It concatenates the arguments with spaces and delegates to the loggers log method,
// Warn logs a message at Warn level with the builder's fields.
// It concatenates the arguments with spaces and delegates to the logger's log method,
// returning early if fields are nil. This method is used for warning conditions.
// Example:
//
@@ -120,12 +119,12 @@ func (fb *FieldBuilder) Warn(args ...any) {
if fb.fields == nil {
return
}
// Log at Warn level with the builders fields, no stack trace
// Log at Warn level with the builder's fields, no stack trace
fb.logger.log(lx.LevelWarn, lx.ClassText, cat.Space(args...), fb.fields, false)
}
// Warnf logs a message at Warn level with the builders fields.
// It formats the message and delegates to the loggers log method, returning early if
// Warnf logs a message at Warn level with the builder's fields.
// It formats the message and delegates to the logger's log method, returning early if
// fields are nil. This method is used for warning conditions that do not halt execution.
// Example:
//
@@ -138,12 +137,12 @@ func (fb *FieldBuilder) Warnf(format string, args ...any) {
}
// Format the message
msg := fmt.Sprintf(format, args...)
// Log at Warn level with the builders fields, no stack trace
// Log at Warn level with the builder's fields, no stack trace
fb.logger.log(lx.LevelWarn, lx.ClassText, msg, fb.fields, false)
}
// Error logs a message at Error level with the builders fields.
// It concatenates the arguments with spaces and delegates to the loggers log method,
// Error logs a message at Error level with the builder's fields.
// It concatenates the arguments with spaces and delegates to the logger's log method,
// returning early if fields are nil. This method is used for error conditions.
// Example:
//
@@ -154,12 +153,12 @@ func (fb *FieldBuilder) Error(args ...any) {
if fb.fields == nil {
return
}
// Log at Error level with the builders fields, no stack trace
// Log at Error level with the builder's fields, no stack trace
fb.logger.log(lx.LevelError, lx.ClassText, cat.Space(args...), fb.fields, false)
}
// Errorf logs a message at Error level with the builders fields.
// It formats the message and delegates to the loggers log method, returning early if
// Errorf logs a message at Error level with the builder's fields.
// It formats the message and delegates to the logger's log method, returning early if
// fields are nil. This method is used for error conditions that may require attention.
// Example:
//
@@ -172,12 +171,12 @@ func (fb *FieldBuilder) Errorf(format string, args ...any) {
}
// Format the message
msg := fmt.Sprintf(format, args...)
// Log at Error level with the builders fields, no stack trace
// Log at Error level with the builder's fields, no stack trace
fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, false)
}
// Stack logs a message at Error level with a stack trace and the builders fields.
// It concatenates the arguments with spaces and delegates to the loggers log method,
// Stack logs a message at Error level with a stack trace and the builder's fields.
// It concatenates the arguments with spaces and delegates to the logger's log method,
// returning early if fields are nil. This method is useful for debugging critical errors.
// Example:
//
@@ -188,12 +187,12 @@ func (fb *FieldBuilder) Stack(args ...any) {
if fb.fields == nil {
return
}
// Log at Error level with the builders fields and a stack trace
// Log at Error level with the builder's fields and a stack trace
fb.logger.log(lx.LevelError, lx.ClassText, cat.Space(args...), fb.fields, true)
}
// Stackf logs a message at Error level with a stack trace and the builders fields.
// It formats the message and delegates to the loggers log method, returning early if
// Stackf logs a message at Error level with a stack trace and the builder's fields.
// It formats the message and delegates to the logger's log method, returning early if
// fields are nil. This method is useful for debugging critical errors.
// Example:
//
@@ -206,11 +205,11 @@ func (fb *FieldBuilder) Stackf(format string, args ...any) {
}
// Format the message
msg := fmt.Sprintf(format, args...)
// Log at Error level with the builders fields and a stack trace
// Log at Error level with the builder's fields and a stack trace
fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, true)
}
// Fatal logs a message at Error level with a stack trace and the builders fields, then exits.
// Fatal logs a message at Error level with a stack trace and the builder's fields, then exits.
// It constructs the message from variadic arguments, logs it with a stack trace, and terminates
// the program with exit code 1. Returns early if fields are nil. This method is used for
// unrecoverable errors.
@@ -231,13 +230,16 @@ func (fb *FieldBuilder) Fatal(args ...any) {
}
builder.WriteString(fmt.Sprint(arg))
}
// Log at Error level with the builders fields and a stack trace
fb.logger.log(lx.LevelError, lx.ClassText, builder.String(), fb.fields, true)
// Log at Error level with the builder's fields and a stack trace
fb.logger.log(lx.LevelFatal, lx.ClassText, builder.String(), fb.fields, fb.logger.fatalStack)
// Exit the program with status code 1
os.Exit(1)
if fb.logger.fatalExits {
os.Exit(1)
}
}
// Fatalf logs a formatted message at Error level with a stack trace and the builders fields,
// Fatalf logs a formatted message at Error level with a stack trace and the builder's fields,
// then exits. It delegates to Fatal and returns early if fields are nil. This method is used
// for unrecoverable errors.
// Example:
@@ -253,7 +255,7 @@ func (fb *FieldBuilder) Fatalf(format string, args ...any) {
fb.Fatal(fmt.Sprintf(format, args...))
}
// Panic logs a message at Error level with a stack trace and the builders fields, then panics.
// Panic logs a message at Error level with a stack trace and the builder's fields, then panics.
// It constructs the message from variadic arguments, logs it with a stack trace, and triggers
// a panic with the message. Returns early if fields are nil. This method is used for critical
// errors that require immediate program termination with a panic.
@@ -275,13 +277,13 @@ func (fb *FieldBuilder) Panic(args ...any) {
builder.WriteString(fmt.Sprint(arg))
}
msg := builder.String()
// Log at Error level with the builders fields and a stack trace
// Log at Error level with the builder's fields and a stack trace
fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, true)
// Trigger a panic with the formatted message
panic(msg)
}
// Panicf logs a formatted message at Error level with a stack trace and the builders fields,
// Panicf logs a formatted message at Error level with a stack trace and the builder's fields,
// then panics. It delegates to Panic and returns early if fields are nil. This method is used
// for critical errors that require immediate program termination with a panic.
// Example:
@@ -301,7 +303,7 @@ func (fb *FieldBuilder) Panicf(format string, args ...any) {
// It stores non-nil errors in the "error" field: a single error if only one is non-nil,
// or a slice of errors if multiple are non-nil. It logs the concatenated string representations
// of non-nil errors (e.g., "failed 1; failed 2") at the Error level. Returns the FieldBuilder
// for chaining, allowing further field additions or logging. Thread-safe via the loggers mutex.
// for chaining, allowing further field additions or logging. Thread-safe via the logger's mutex.
// Example:
//
// logger := New("app").Enable()
@@ -311,9 +313,9 @@ func (fb *FieldBuilder) Panicf(format string, args ...any) {
// // Output: [app] ERROR: failed 1; failed 2
// // [app] INFO: Error occurred [error=[failed 1 failed 2] k=v]
func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder {
// Initialize fields map if nil
// Initialize fields slice if nil
if fb.fields == nil {
fb.fields = make(map[string]interface{})
fb.fields = make(lx.Fields, 0, 4)
}
// Collect non-nil errors and build log message
@@ -335,10 +337,10 @@ func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder {
if count > 0 {
if count == 1 {
// Store single error directly
fb.fields["error"] = nonNilErrors[0]
fb.fields = append(fb.fields, lx.Field{Key: "error", Value: nonNilErrors[0]})
} else {
// Store slice of errors
fb.fields["error"] = nonNilErrors
fb.fields = append(fb.fields, lx.Field{Key: "error", Value: nonNilErrors})
}
// Log concatenated error messages at Error level
fb.logger.log(lx.LevelError, lx.ClassText, builder.String(), nil, false)
@@ -357,19 +359,30 @@ func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder {
// logger := New("app").Enable()
// logger.Fields("k1", "v1").Merge("k2", "v2").Info("Action") // Output: [app] INFO: Action [k1=v1 k2=v2]
func (fb *FieldBuilder) Merge(pairs ...any) *FieldBuilder {
// Initialize fields slice if nil
if fb.fields == nil {
fb.fields = make(lx.Fields, 0, len(pairs)/2)
}
// Process pairs as key-value, advancing by 2
for i := 0; i < len(pairs)-1; i += 2 {
// Ensure the key is a string
if key, ok := pairs[i].(string); ok {
fb.fields[key] = pairs[i+1]
fb.fields = append(fb.fields, lx.Field{Key: key, Value: pairs[i+1]})
} else {
// Log an error field for non-string keys
fb.fields["error"] = fmt.Errorf("non-string key in Merge: %v", pairs[i])
fb.fields = append(fb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("non-string key in Merge: %v", pairs[i]),
})
}
}
// Check for uneven pairs (missing value)
if len(pairs)%2 != 0 {
fb.fields["error"] = fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1])
fb.fields = append(fb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]),
})
}
return fb
}

View File

@@ -1,11 +1,9 @@
package ll
import (
"os"
"sync/atomic"
"time"
"github.com/olekukonko/ll/lh"
"github.com/olekukonko/ll/lx"
)
@@ -14,16 +12,7 @@ import (
// a logger instance. The logger is initialized with default settings: enabled, Debug level,
// flat namespace style, and a text handler to os.Stdout. It is thread-safe due to the Logger
// structs mutex.
var defaultLogger = &Logger{
enabled: true, // Initially enabled
level: lx.LevelDebug, // Minimum log level set to Debug
namespaces: defaultStore, // Shared namespace store for enable/disable states
context: make(map[string]interface{}), // Empty context for global fields
style: lx.FlatPath, // Flat namespace style (e.g., [parent/child])
handler: lh.NewTextHandler(os.Stdout), // Default text handler to os.Stdout
middleware: make([]Middleware, 0), // Empty middleware chain
stackBufferSize: 4096, // Buffer size for stack traces
}
var defaultLogger = New("")
// Handler sets the handler for the default logger.
// It configures the output destination and format (e.g., text, JSON) for logs emitted by
@@ -233,16 +222,25 @@ func Panicf(format string, args ...any) {
}
// If creates a conditional logger that logs only if the condition is true using the default logger.
// It returns a Conditional struct that wraps the default logger, enabling conditional logging methods.
// Thread-safe via the Loggers mutex.
// Example:
//
// ll.If(true).Info("Logged") // Output: [] INFO: Logged
// ll.If(false).Info("Ignored") // No output
func If(condition bool) *Conditional {
return defaultLogger.If(condition)
}
// IfErr creates a conditional logger that logs only if the error is non-nil using the default logger.
func IfErr(err error) *Conditional {
return defaultLogger.IfErr(err)
}
// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil using the default logger.
func IfErrAny(errs ...error) *Conditional {
return defaultLogger.IfErrAny(errs...)
}
// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil using the default logger.
func IfErrOne(errs ...error) *Conditional {
return defaultLogger.IfErrOne(errs...)
}
// Context creates a new logger with additional contextual fields using the default logger.
// It preserves existing context fields and adds new ones, returning a new logger instance
// to avoid mutating the default logger. Thread-safe with write lock.
@@ -260,8 +258,8 @@ func Context(fields map[string]interface{}) *Logger {
//
// ll.AddContext("user", "alice")
// ll.Info("Action") // Output: [] INFO: Action [user=alice]
func AddContext(key string, value interface{}) *Logger {
return defaultLogger.AddContext(key, value)
func AddContext(pairs ...any) *Logger {
return defaultLogger.AddContext(pairs...)
}
// GetContext returns the default loggers context map of persistent key-value fields.
@@ -269,7 +267,7 @@ func AddContext(key string, value interface{}) *Logger {
// Example:
//
// ll.AddContext("user", "alice")
// ctx := ll.GetContext() // Returns map[string]interface{}{"user": "alice"}
// ctx := ll.GetContext() // Returns map[string]interface{}{"user": "alice"}k
func GetContext() map[string]interface{} {
return defaultLogger.GetContext()
}
@@ -472,6 +470,37 @@ func Measure(fns ...func()) time.Duration {
return defaultLogger.Measure(fns...)
}
// Labels temporarily attaches one or more label names to the logger for the next log entry.
// Labels are typically used for metrics, benchmarking, tracing, or categorizing logs in a structured way.
//
// The labels are stored atomically and intended to be short-lived, applying only to the next
// log operation (or until overwritten by a subsequent call to Labels). Multiple labels can
// be provided as separate string arguments.
//
// Example usage:
//
// logger := New("app").Enable()
//
// // Add labels for a specific operation
// logger.Labels("load_users", "process_orders").Measure(func() {
// // ... perform work ...
// }, func() {
// // ... optional callback ...
// })
func Labels(names ...string) *Logger {
return defaultLogger.Labels(names...)
}
// Since creates a timer that will log the duration when completed
// If startTime is provided, uses that as the start time; otherwise uses time.Now()
//
// defer logger.Since().Info("request") // Auto-start
// logger.Since(start).Info("request") // Manual timing
// logger.Since().If(debug).Debug("timing") // Conditional
func Since(start ...time.Time) *SinceBuilder {
return defaultLogger.Since(start...)
}
// Benchmark logs the duration since a start time at Info level using the default logger.
// It calculates the time elapsed since the provided start time and logs it with "start",
// "end", and "duration" fields. Thread-safe via the Loggers mutex.
@@ -586,8 +615,8 @@ func Dbg(any ...interface{}) {
// Example:
//
// ll.Dump([]byte{0x41, 0x42}) // Outputs hex/ASCII dump
func Dump(any interface{}) {
defaultLogger.Dump(any)
func Dump(values ...interface{}) {
defaultLogger.Dump(values...)
}
// Enabled returns whether the default logger is enabled for logging.
@@ -672,3 +701,7 @@ func Apply(opts ...Option) *Logger {
return defaultLogger.Apply(opts...)
}
func Toggle(v bool) *Logger {
return defaultLogger.Toggle(v)
}

View File

@@ -31,7 +31,7 @@ func NewInspector(logger *Logger) *Inspector {
// Example usage within a Logger method:
//
// o := NewInspector(l)
// o.Log(2, someStruct) // Logs JSON representation with caller info
// o.Log(2, someStruct)
func (o *Inspector) Log(skip int, values ...interface{}) {
// Skip if logger is suspended or Info level is disabled
if o.logger.suspend.Load() || !o.logger.shouldLog(lx.LevelInfo) {
@@ -74,13 +74,13 @@ func (o *Inspector) Log(skip int, values ...interface{}) {
}
if err != nil {
o.logger.log(lx.LevelError, lx.ClassText, fmt.Sprintf("Inspector: JSON encoding error: %v", err), nil, false)
o.logger.log(lx.LevelError, lx.ClassInspect, fmt.Sprintf("Inspector: JSON encoding error: %v", err), nil, false)
continue
}
// Construct log message with file, line, and JSON data
msg := fmt.Sprintf("[%s:%d] INSPECT: %s", shortFile, line, string(jsonData))
o.logger.log(lx.LevelInfo, lx.ClassText, msg, nil, false)
msg := fmt.Sprintf("[%s:%d] %s", shortFile, line, string(jsonData))
o.logger.log(lx.LevelInfo, lx.ClassInspect, msg, nil, false)
}
}

View File

@@ -17,6 +17,7 @@ type Buffering struct {
FlushInterval time.Duration // Maximum time between flushes (default: 10s)
MaxBuffer int // Maximum buffer size before applying backpressure (default: 1000)
OnOverflow func(int) // Called when buffer reaches MaxBuffer (default: logs warning)
ErrorOutput io.Writer // Destination for internal errors like flush failures (default: os.Stderr)
}
// BufferingOpt configures Buffered handler.
@@ -66,6 +67,18 @@ func WithOverflowHandler(fn func(int)) BufferingOpt {
}
}
// WithErrorOutput sets the destination for internal errors (e.g., downstream handler failures).
// Defaults to os.Stderr if not set.
// Example:
//
// // Redirect internal errors to a file or discard them
// handler := NewBuffered(textHandler, WithErrorOutput(os.Stdout))
func WithErrorOutput(w io.Writer) BufferingOpt {
return func(c *Buffering) {
c.ErrorOutput = w
}
}
// Buffered wraps any Handler to provide buffering capabilities.
// It buffers log entries in a channel and flushes them based on batch size, time interval, or explicit flush.
// The generic type H ensures compatibility with any lx.Handler implementation.
@@ -93,7 +106,8 @@ func NewBuffered[H lx.Handler](handler H, opts ...BufferingOpt) *Buffered[H] {
BatchSize: 100, // Default: flush every 100 entries
FlushInterval: 10 * time.Second, // Default: flush every 10 seconds
MaxBuffer: 1000, // Default: max 1000 entries in buffer
OnOverflow: func(count int) { // Default: log overflow to io.Discard
ErrorOutput: os.Stderr, // Default: report errors to stderr
OnOverflow: func(count int) { // Default: log overflow to io.Discard (silent by default for overflow)
fmt.Fprintf(io.Discard, "log buffer overflow: %d entries\n", count)
},
}
@@ -113,6 +127,9 @@ func NewBuffered[H lx.Handler](handler H, opts ...BufferingOpt) *Buffered[H] {
if config.FlushInterval <= 0 {
config.FlushInterval = 10 * time.Second // Minimum flush interval is 10s
}
if config.ErrorOutput == nil {
config.ErrorOutput = os.Stderr
}
// Initialize Buffered handler
b := &Buffered[H]{
@@ -173,18 +190,25 @@ func (b *Buffered[H]) Flush() {
// Close flushes any remaining entries and stops the worker.
// It ensures shutdown is performed only once and waits for the worker to finish.
// If the underlying handler implements a Close() error method, it will be called to release resources.
// Thread-safe via sync.Once and WaitGroup.
// Returns nil as it does not produce errors.
// Returns any error from the underlying handler's Close, or nil.
// Example:
//
// buffered.Close() // Flushes entries and stops worker
func (b *Buffered[H]) Close() error {
var closeErr error
b.shutdownOnce.Do(func() {
close(b.shutdown) // Signal worker to shut down
b.wg.Wait() // Wait for worker to finish
runtime.SetFinalizer(b, nil) // Remove finalizer
// Check if underlying handler has a Close method and call it
if closer, ok := any(b.handler).(interface{ Close() error }); ok {
closeErr = closer.Close()
}
})
return nil
return closeErr
}
// Final ensures remaining entries are flushed during garbage collection.
@@ -246,7 +270,7 @@ func (b *Buffered[H]) worker() {
}
// flushBatch processes a batch of entries through the wrapped handler.
// It writes each entry to the underlying handler, logging any errors to stderr.
// It writes each entry to the underlying handler, logging any errors to the configured ErrorOutput.
// Example (internal usage):
//
// b.flushBatch([]*lx.Entry{entry1, entry2})
@@ -254,14 +278,16 @@ func (b *Buffered[H]) flushBatch(batch []*lx.Entry) {
for _, entry := range batch {
// Process each entry through the handler
if err := b.handler.Handle(entry); err != nil {
fmt.Fprintf(os.Stderr, "log flush error: %v\n", err) // Log errors to stderr
if b.config.ErrorOutput != nil {
fmt.Fprintf(b.config.ErrorOutput, "log flush error: %v\n", err)
}
}
}
}
// drainRemaining processes any remaining entries in the channel.
// It flushes all entries from the entries channel to the underlying handler,
// logging any errors to stderr. Used during flush or shutdown.
// logging any errors to the configured ErrorOutput. Used during flush or shutdown.
// Example (internal usage):
//
// b.drainRemaining() // Flushes all pending entries
@@ -270,7 +296,9 @@ func (b *Buffered[H]) drainRemaining() {
select {
case entry := <-b.entries: // Process next entry
if err := b.handler.Handle(entry); err != nil {
fmt.Fprintf(os.Stderr, "log drain error: %v\n", err) // Log errors to stderr
if b.config.ErrorOutput != nil {
fmt.Fprintf(b.config.ErrorOutput, "log drain error: %v\n", err)
}
}
default: // Exit when channel is empty
return

View File

File diff suppressed because it is too large Load Diff

163
vendor/github.com/olekukonko/ll/lh/dedup.go generated vendored Normal file
View File

@@ -0,0 +1,163 @@
package lh
import (
"sync"
"time"
"github.com/olekukonko/ll/lx"
)
// Dedup is a log handler that suppresses duplicate entries within a TTL window.
// It wraps another handler (H) and filters out repeated log entries that match
// within the deduplication period.
type Dedup[H lx.Handler] struct {
next H
ttl time.Duration
cleanupEvery time.Duration
keyFn func(*lx.Entry) uint64
maxKeys int
// shards reduce lock contention by partitioning the key space
shards [32]dedupShard
done chan struct{}
wg sync.WaitGroup
once sync.Once
}
type dedupShard struct {
mu sync.Mutex
seen map[uint64]int64
}
// DedupOpt configures a Dedup handler.
type DedupOpt[H lx.Handler] func(*Dedup[H])
// WithDedupKeyFunc customizes how deduplication keys are generated.
func WithDedupKeyFunc[H lx.Handler](fn func(*lx.Entry) uint64) DedupOpt[H] {
return func(d *Dedup[H]) { d.keyFn = fn }
}
// WithDedupCleanupInterval sets how often expired deduplication keys are purged.
func WithDedupCleanupInterval[H lx.Handler](every time.Duration) DedupOpt[H] {
return func(d *Dedup[H]) {
if every > 0 {
d.cleanupEvery = every
}
}
}
// WithDedupMaxKeys sets a soft limit on tracked deduplication keys.
func WithDedupMaxKeys[H lx.Handler](max int) DedupOpt[H] {
return func(d *Dedup[H]) {
if max > 0 {
d.maxKeys = max
}
}
}
// NewDedup creates a deduplicating handler wrapper.
func NewDedup[H lx.Handler](next H, ttl time.Duration, opts ...DedupOpt[H]) *Dedup[H] {
if ttl <= 0 {
ttl = 2 * time.Second
}
d := &Dedup[H]{
next: next,
ttl: ttl,
cleanupEvery: time.Minute,
keyFn: defaultDedupKey,
done: make(chan struct{}),
}
// Initialize shards
for i := 0; i < len(d.shards); i++ {
d.shards[i].seen = make(map[uint64]int64, 64)
}
for _, opt := range opts {
opt(d)
}
d.wg.Add(1)
go d.cleanupLoop()
return d
}
// Handle processes a log entry, suppressing duplicates within the TTL window.
func (d *Dedup[H]) Handle(e *lx.Entry) error {
now := time.Now().UnixNano()
key := d.keyFn(e)
// Select shard based on key hash
shardIdx := key % uint64(len(d.shards))
shard := &d.shards[shardIdx]
shard.mu.Lock()
exp, ok := shard.seen[key]
if ok && now < exp {
shard.mu.Unlock()
return nil
}
// Basic guard against unbounded growth per shard
// Using strict limits per shard avoids global atomic counters
limitPerShard := d.maxKeys / len(d.shards)
if d.maxKeys > 0 && len(shard.seen) >= limitPerShard {
// Opportunistic cleanup of current shard
d.cleanupShard(shard, now)
}
shard.seen[key] = now + d.ttl.Nanoseconds()
shard.mu.Unlock()
return d.next.Handle(e)
}
// Close stops the cleanup goroutine and closes the underlying handler.
func (d *Dedup[H]) Close() error {
var err error
d.once.Do(func() {
close(d.done)
d.wg.Wait()
if c, ok := any(d.next).(interface{ Close() error }); ok {
err = c.Close()
}
})
return err
}
// cleanupLoop runs periodically to purge expired deduplication keys.
func (d *Dedup[H]) cleanupLoop() {
defer d.wg.Done()
t := time.NewTicker(d.cleanupEvery)
defer t.Stop()
for {
select {
case <-t.C:
now := time.Now().UnixNano()
// Cleanup all shards sequentially to avoid massive CPU spike
for i := 0; i < len(d.shards); i++ {
d.shards[i].mu.Lock()
d.cleanupShard(&d.shards[i], now)
d.shards[i].mu.Unlock()
}
case <-d.done:
return
}
}
}
// cleanupShard removes expired keys from a specific shard.
func (d *Dedup[H]) cleanupShard(shard *dedupShard, now int64) {
for k, exp := range shard.seen {
if now > exp {
delete(shard.seen, k)
}
}
}

View File

@@ -1,26 +1,34 @@
package lh
import (
"bytes"
"encoding/json"
"fmt"
"github.com/olekukonko/ll/lx"
"io"
"os"
"strings"
"sync"
"time"
"github.com/olekukonko/ll/lx"
)
var jsonBufPool = sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
// JSONHandler is a handler that outputs log entries as JSON objects.
// It formats log entries with timestamp, level, message, namespace, fields, and optional
// stack traces or dump segments, writing the result to the provided writer.
// Thread-safe with a mutex to protect concurrent writes.
type JSONHandler struct {
writer io.Writer // Destination for JSON output
timeFmt string // Format for timestamp (default: RFC3339Nano)
pretty bool // Enable pretty printing with indentation if true
fieldMap map[string]string // Optional mapping for field names (not used in provided code)
mu sync.Mutex // Protects concurrent access to writer
writer io.Writer // Destination for JSON output
timeFmt string // Format for timestamp (default: RFC3339Nano)
pretty bool // Enable pretty printing with indentation if true
//fieldMap map[string]string // Optional mapping for field names (not used in provided code)
mu sync.Mutex // Protects concurrent access to writer
}
// JsonOutput represents the JSON structure for a log entry.
@@ -84,6 +92,13 @@ func (h *JSONHandler) Handle(e *lx.Entry) error {
return h.handleRegular(e)
}
// Output sets the Writer destination for JSONHandler's output, ensuring thread safety with a mutex lock.
func (h *JSONHandler) Output(w io.Writer) {
h.mu.Lock()
defer h.mu.Unlock()
h.writer = w
}
// handleRegular handles standard log entries (non-dump).
// It converts the entry to a JsonOutput struct and encodes it as JSON,
// applying pretty printing if enabled. Logs encoding errors to stderr for debugging.
@@ -92,6 +107,12 @@ func (h *JSONHandler) Handle(e *lx.Entry) error {
//
// h.handleRegular(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes JSON object
func (h *JSONHandler) handleRegular(e *lx.Entry) error {
// Convert ordered fields to map for JSON output
fieldsMap := make(map[string]interface{}, len(e.Fields))
for _, pair := range e.Fields {
fieldsMap[pair.Key] = pair.Value
}
// Create JSON output structure
entry := JsonOutput{
Time: e.Timestamp.Format(h.timeFmt), // Format timestamp
@@ -100,23 +121,32 @@ func (h *JSONHandler) handleRegular(e *lx.Entry) error {
Msg: e.Message, // Set message
Namespace: e.Namespace, // Set namespace
Dump: nil, // No dump for regular entries
Fields: e.Fields, // Copy fields
Fields: fieldsMap, // Copy fields as map
Stack: e.Stack, // Include stack trace if present
}
// Create JSON encoder
enc := json.NewEncoder(h.writer)
// Acquire buffer from pool to avoid allocation and reduce syscalls
buf := jsonBufPool.Get().(*bytes.Buffer)
buf.Reset()
defer jsonBufPool.Put(buf)
// Create JSON encoder writing to buffer
enc := json.NewEncoder(buf)
if h.pretty {
// Enable indentation for pretty printing
enc.SetIndent("", " ")
}
// Log encoding attempt for debugging
fmt.Fprintf(os.Stderr, "Encoding JSON entry: %v\n", e.Message)
// Encode and write JSON
// Encode JSON to buffer
err := enc.Encode(entry)
if err != nil {
// Log encoding error for debugging
fmt.Fprintf(os.Stderr, "JSON encode error: %v\n", err)
return err
}
// Write buffer to underlying writer in one go
_, err = h.writer.Write(buf.Bytes())
return err
}
@@ -156,15 +186,40 @@ func (h *JSONHandler) handleDump(e *lx.Entry) error {
})
}
// Encode JSON output with dump segments
return json.NewEncoder(h.writer).Encode(JsonOutput{
// Convert ordered fields to map for JSON output
fieldsMap := make(map[string]interface{}, len(e.Fields))
for _, pair := range e.Fields {
fieldsMap[pair.Key] = pair.Value
}
// Acquire buffer from pool
buf := jsonBufPool.Get().(*bytes.Buffer)
buf.Reset()
defer jsonBufPool.Put(buf)
// Encode JSON output with dump segments to buffer
enc := json.NewEncoder(buf)
if h.pretty {
enc.SetIndent("", " ")
}
err := enc.Encode(JsonOutput{
Time: e.Timestamp.Format(h.timeFmt), // Format timestamp
Level: e.Level.String(), // Convert level to string
Class: e.Class.String(), // Convert class to string
Msg: "dumping segments", // Fixed message for dumps
Namespace: e.Namespace, // Set namespace
Dump: segments, // Include parsed segments
Fields: e.Fields, // Copy fields
Fields: fieldsMap, // Copy fields as map
Stack: e.Stack, // Include stack trace if present
})
if err != nil {
fmt.Fprintf(os.Stderr, "JSON dump encode error: %v\n", err)
return err
}
// Write buffer to underlying writer
_, err = h.writer.Write(buf.Bytes())
return err
}

67
vendor/github.com/olekukonko/ll/lh/lh.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
package lh
import (
"bytes"
"fmt"
"sort"
"strings"
"sync"
"github.com/cespare/xxhash/v2"
"github.com/olekukonko/ll/lx"
)
// rightPad pads a string with spaces on the right to reach the specified length.
// Returns the original string if it's already at or exceeds the target length.
// Uses strings.Builder for efficient memory allocation.
func rightPad(str string, length int) string {
if len(str) >= length {
return str
}
var sb strings.Builder
sb.Grow(length)
sb.WriteString(str)
sb.WriteString(strings.Repeat(" ", length-len(str)))
return sb.String()
}
var dedupBufPool = sync.Pool{
New: func() any { return new(bytes.Buffer) },
}
// defaultDedupKey generates a deduplication key from log level and message.
// Uses FNV-1a hash for speed and good distribution. Override with WithDedupKeyFunc
// to include additional fields like namespace, caller, or structured fields.
func defaultDedupKey(e *lx.Entry) uint64 {
h := xxhash.New()
_, _ = h.Write([]byte(e.Level.String()))
_, _ = h.Write([]byte{0})
_, _ = h.Write([]byte(e.Message))
_, _ = h.Write([]byte{0})
_, _ = h.Write([]byte(e.Namespace))
_, _ = h.Write([]byte{0})
if len(e.Fields) > 0 {
m := e.Fields.Map()
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
buf := dedupBufPool.Get().(*bytes.Buffer)
buf.Reset()
defer dedupBufPool.Put(buf)
for _, k := range keys {
fmt.Fprint(buf, k)
buf.WriteByte('=')
fmt.Fprint(buf, m[k])
buf.WriteByte(0)
}
_, _ = h.Write(buf.Bytes())
}
return h.Sum64()
}

View File

@@ -2,9 +2,10 @@ package lh
import (
"fmt"
"github.com/olekukonko/ll/lx"
"io"
"sync"
"github.com/olekukonko/ll/lx"
)
// MemoryHandler is an lx.Handler that stores log entries in memory.
@@ -106,7 +107,7 @@ func (h *MemoryHandler) Dump(w io.Writer) error {
// Process each entry through the TextHandler
for _, entry := range h.entries {
if err := tempHandler.Handle(entry); err != nil {
return fmt.Errorf("failed to dump entry: %w", err) // Wrap and return write errors
return fmt.Errorf("failed to dump entry: %writer", err) // Wrap and return write errors
}
}
return nil

View File

@@ -32,15 +32,31 @@ func NewMultiHandler(h ...lx.Handler) *MultiHandler {
}
// Len returns the number of handlers in the MultiHandler.
// Useful for monitoring or debugging handler composition.
//
// Example:
//
// multi := &MultiHandler{}
// multi.Append(h1, h2, h3)
// count := multi.Len() // Returns 3
func (h *MultiHandler) Len() int {
return len(h.Handlers)
}
// Append adds one or more lx.Handler instances to the MultiHandler's list of handlers.
// Append adds one or more handlers to the MultiHandler.
// Handlers will receive log entries in the order they were appended.
// This method modifies the MultiHandler in place.
//
// Example:
//
// multi := &MultiHandler{}
// multi.Append(
// lx.NewJSONHandler(os.Stdout),
// lx.NewTextHandler(logFile),
// )
// // Now multi broadcasts to both stdout and file
func (h *MultiHandler) Append(handlers ...lx.Handler) {
for _, e := range handlers {
h.Handlers = append(h.Handlers, e)
}
h.Handlers = append(h.Handlers, handlers...)
}
// Handle implements the Handler interface, calling Handle on each handler in sequence.
@@ -56,7 +72,7 @@ func (h *MultiHandler) Handle(e *lx.Entry) error {
if err := handler.Handle(e); err != nil {
// fmt.Fprintf(os.Stderr, "MultiHandler error for handler %d: %v\n", i, err)
// Wrap error with handler index for context
errs = append(errs, fmt.Errorf("handler %d: %w", i, err))
errs = append(errs, fmt.Errorf("handler %d: %writer", i, err))
}
}
// Combine errors into a single error, or return nil if no errors

76
vendor/github.com/olekukonko/ll/lh/pipe.go generated vendored Normal file
View File

@@ -0,0 +1,76 @@
package lh
import (
"fmt"
"os"
"time"
"github.com/olekukonko/ll/lx"
)
// Pipe chains multiple handler wrappers together, applying them from left to right.
// The wrappers are composed such that the first wrapper in the list becomes
// the innermost layer, and the last wrapper becomes the outermost layer.
//
// Usage pattern: Pipe(baseHandler, wrapper1, wrapper2, wrapper3)
// Result: wrapper3(wrapper2(wrapper1(baseHandler)))
//
// This enables clean, declarative construction of handler middleware chains.
//
// Example - building a processing pipeline:
//
// base := lx.NewJSONHandler(os.Stdout)
// handler := lh.Pipe(base,
// lh.NewDedup(2*time.Second), // 1. Deduplicate first
// lh.NewRateLimit(10, time.Second), // 2. Then rate limit
// )
// logger := lx.NewLogger(handler)
//
// In this example, logs flow: Dedup → RateLimit → AddTimestamp → JSONHandler
func Pipe(h lx.Handler, wraps ...lx.Wrap) lx.Handler {
for _, w := range wraps {
if w != nil {
h = w(h)
}
}
return h
}
// PipeDedup returns a wrapper that applies deduplication to the handler.
func PipeDedup(ttl time.Duration, opts ...DedupOpt[lx.Handler]) lx.Wrap {
return func(next lx.Handler) lx.Handler {
return NewDedup(next, ttl, opts...)
}
}
// PipeBuffer returns a wrapper that applies buffering to the handler.
func PipeBuffer(opts ...BufferingOpt) lx.Wrap {
return func(next lx.Handler) lx.Handler {
return NewBuffered(next, opts...)
}
}
// PipeRotate returns a wrapper that applies log rotation.
// Ideally, the 'next' handler should be one that writes to a file (like TextHandler or JSONHandler).
//
// If the underlying handler does not implement lx.HandlerOutputter (cannot change output destination),
// or if rotation initialization fails, this will log a warning to stderr and return the
// original handler unmodified to prevent application crashes.
func PipeRotate(maxSizeBytes int64, src RotateSource) lx.Wrap {
return func(next lx.Handler) lx.Handler {
// Attempt to cast to HandlerOutputter (Handler + Outputter interface)
h, ok := next.(lx.HandlerOutputter)
if !ok {
fmt.Fprintf(os.Stderr, "ll/lh: PipeRotate skipped - handler does not implement SetOutput(io.Writer)\n")
return next
}
// Initialize the rotating handler
r, err := NewRotating(h, maxSizeBytes, src)
if err != nil {
fmt.Fprintf(os.Stderr, "ll/lh: PipeRotate initialization failed: %v\n", err)
return next
}
return r
}
}

176
vendor/github.com/olekukonko/ll/lh/rotate.go generated vendored Normal file
View File

@@ -0,0 +1,176 @@
package lh
import (
"io"
"sync"
"github.com/olekukonko/ll/lx"
)
// RotateSource defines the callbacks needed to implement log rotation.
// It abstracts the destination lifecycle: opening, sizing, and rotating.
//
// Example for file rotation:
//
// src := lh.RotateSource{
// Open: func() (io.WriteCloser, error) {
// return os.OpenFile("app.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
// },
// Size: func() (int64, error) {
// if fi, err := os.Stat("app.log"); err == nil {
// return fi.Size(), nil
// }
// return 0, nil // File doesn't exist yet
// },
// Rotate: func() error {
// // Rename current log before creating new one
// return os.Rename("app.log", "app.log."+time.Now().Format("20060102-150405"))
// },
// }
type RotateSource struct {
// Open returns a fresh destination for log output.
// Called on initialization and after rotation.
Open func() (io.WriteCloser, error)
// Size returns the current size in bytes of the active destination.
// Return an error if size cannot be determined (rotation will be skipped).
Size func() (int64, error)
// Rotate performs cleanup/rotation actions before opening a new destination.
// For files: rename or move the current log. Optional for other destinations.
Rotate func() error
}
// Rotating wraps a handler to rotate its output when maxSize is exceeded.
// The wrapped handler must implement both Handler and Outputter interfaces.
// Rotation is triggered on each Handle call if the current size >= maxSize.
//
// Example:
//
// handler := lx.NewJSONHandler(os.Stdout)
// src := lh.RotateSource{...} // see RotateSource example
// rotator, err := lh.NewRotating(handler, 10*1024*1024, src) // 10 MB
// logger := lx.NewLogger(rotator)
// logger.Info("This log may trigger rotation when file reaches 10MB")
type Rotating[H interface {
lx.Handler
lx.Outputter
}] struct {
mu sync.Mutex
maxSize int64
src RotateSource
out io.WriteCloser
handler H
}
// NewRotating creates a rotating wrapper around handler.
// Handler's output will be replaced with destinations from src.Open.
// If maxSizeBytes <= 0, rotation is disabled.
// src.Rotate may be nil if no pre-open actions are needed.
//
// Example:
//
// // Create a JSON handler that rotates at 5MB
// handler := lx.NewJSONHandler(os.Stdout)
// rotator, err := lh.NewRotating(handler, 5*1024*1024, src)
// if err != nil {
// log.Fatal(err)
// }
// // Use rotator as your logger's handler
// logger := lx.NewLogger(rotator)
func NewRotating[H interface {
lx.Handler
lx.Outputter
}](handler H, maxSizeBytes int64, src RotateSource) (*Rotating[H], error) {
r := &Rotating[H]{
maxSize: maxSizeBytes,
src: src,
handler: handler,
}
if err := r.reopenLocked(); err != nil {
return nil, err
}
return r, nil
}
// Handle processes a log entry, rotating output if necessary.
// Thread-safe: can be called concurrently.
//
// Example:
//
// rotator.Handle(&lx.Entry{
// Level: lx.InfoLevel,
// Message: "Processing request",
// Namespace: "api",
// })
func (r *Rotating[H]) Handle(e *lx.Entry) error {
r.mu.Lock()
defer r.mu.Unlock()
if err := r.rotateIfNeededLocked(); err != nil {
return err
}
return r.handler.Handle(e)
}
// Close releases resources (closes the current output).
// Safe to call multiple times.
//
// Example:
//
// defer rotator.Close()
func (r *Rotating[H]) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.out != nil {
return r.out.Close()
}
return nil
}
// rotateIfNeededLocked checks current size and rotates if maxSize exceeded.
// Called with mu already held.
func (r *Rotating[H]) rotateIfNeededLocked() error {
if r.maxSize <= 0 || r.src.Size == nil || r.src.Open == nil {
return nil
}
size, err := r.src.Size()
if err != nil {
// Size unknown - skip rotation
return nil
}
if size < r.maxSize {
return nil
}
// Close current output
if r.out != nil {
_ = r.out.Close()
r.out = nil
}
// Run rotation hook (rename/move/commit)
if r.src.Rotate != nil {
if err := r.src.Rotate(); err != nil {
return err
}
}
// Open fresh output
return r.reopenLocked()
}
// reopenLocked opens a new destination and sets it on the handler.
// Called with mu already held.
func (r *Rotating[H]) reopenLocked() error {
out, err := r.src.Open()
if err != nil {
return err
}
r.out = out
r.handler.Output(out)
return nil
}

View File

@@ -28,6 +28,15 @@ func NewSlogHandler(h slog.Handler) *SlogHandler {
return &SlogHandler{slogHandler: h}
}
// Handle converts an lx.Entry to slog.Record and delegates to the slog.Handler.
// It maps the entry's fields, level, namespace, class, and stack trace to slog attributes,
// passing the resulting record to the underlying slog.Handler.
// Returns an error if the slog.Handler fails to process the record.
// Thread-safe if the underlying slog.Handler is thread-safe.
// Example:
//
// handler.Handle(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Processes as slog record
//
// Handle converts an lx.Entry to slog.Record and delegates to the slog.Handler.
// It maps the entry's fields, level, namespace, class, and stack trace to slog attributes,
// passing the resulting record to the underlying slog.Handler.
@@ -59,9 +68,9 @@ func (h *SlogHandler) Handle(e *lx.Entry) error {
record.AddAttrs(slog.String("stack", string(e.Stack))) // Add stack trace as string
}
// Add custom fields
for k, v := range e.Fields {
record.AddAttrs(slog.Any(k, v)) // Add each field as a key-value attribute
// Add custom fields in order (preserving insertion order)
for _, pair := range e.Fields {
record.AddAttrs(slog.Any(pair.Key, pair.Value)) // Add each field as a key-value attribute
}
// Handle the record with the underlying slog.Handler

View File

@@ -1,9 +1,9 @@
package lh
import (
"bytes"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
@@ -11,12 +11,40 @@ import (
"github.com/olekukonko/ll/lx"
)
type TextOption func(*TextHandler)
var textBufPool = sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
// WithTextTimeFormat enables timestamp display and optionally sets a custom time format.
// It configures the TextHandler to include temporal information in each log entry,
// allowing for precise tracking of when log events occur.
// If the format string is empty, it defaults to time.RFC3339.
func WithTextTimeFormat(format string) TextOption {
return func(t *TextHandler) {
t.Timestamped(true, format)
}
}
// WithTextShowTime enables or disables timestamp display in log entries.
// This option provides direct control over the visibility of the time prefix
// without altering the underlying time format configured in the handler.
// Setting show to true will prepend timestamps to all subsequent regular log outputs.
func WithTextShowTime(show bool) TextOption {
return func(t *TextHandler) {
t.showTime = show
}
}
// TextHandler is a handler that outputs log entries as plain text.
// It formats log entries with namespace, level, message, fields, and optional stack traces,
// writing the result to the provided writer.
// Thread-safe if the underlying writer is thread-safe.
type TextHandler struct {
w io.Writer // Destination for formatted log output
writer io.Writer // Destination for formatted log output
showTime bool // Whether to display timestamps
timeFormat string // Format for timestamps (defaults to time.RFC3339)
mu sync.Mutex
@@ -29,12 +57,18 @@ type TextHandler struct {
// handler := NewTextHandler(os.Stdout)
// logger := ll.New("app").Enable().Handler(handler)
// logger.Info("Test") // Output: [app] INFO: Test
func NewTextHandler(w io.Writer) *TextHandler {
return &TextHandler{
w: w,
func NewTextHandler(w io.Writer, opts ...TextOption) *TextHandler {
t := &TextHandler{
writer: w,
showTime: false,
timeFormat: time.RFC3339,
}
for _, opt := range opts {
opt(t)
}
return t
}
// Timestamped enables or disables timestamp display and optionally sets a custom time format.
@@ -50,6 +84,14 @@ func (h *TextHandler) Timestamped(enable bool, format ...string) {
}
}
// Output sets a new writer for the TextHandler.
// Thread-safe - safe for concurrent use.
func (h *TextHandler) Output(w io.Writer) {
h.mu.Lock()
defer h.mu.Unlock()
h.writer = w
}
// Handle processes a log entry and writes it as plain text.
// It delegates to specialized methods based on the entry's class (Dump, Raw, or regular).
// Returns an error if writing to the underlying writer fails.
@@ -61,18 +103,15 @@ func (h *TextHandler) Handle(e *lx.Entry) error {
h.mu.Lock()
defer h.mu.Unlock()
// Special handling for dump output
if e.Class == lx.ClassDump {
return h.handleDumpOutput(e)
}
// Raw entries are written directly without formatting
if e.Class == lx.ClassRaw {
_, err := h.w.Write([]byte(e.Message))
_, err := h.writer.Write([]byte(e.Message))
return err
}
// Handle standard log entries
return h.handleRegularOutput(e)
}
@@ -84,81 +123,68 @@ func (h *TextHandler) Handle(e *lx.Entry) error {
//
// h.handleRegularOutput(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes "INFO: test"
func (h *TextHandler) handleRegularOutput(e *lx.Entry) error {
var builder strings.Builder // Buffer for building formatted output
buf := textBufPool.Get().(*bytes.Buffer)
buf.Reset()
defer textBufPool.Put(buf)
// Add timestamp if enabled
if h.showTime {
builder.WriteString(e.Timestamp.Format(h.timeFormat))
builder.WriteString(lx.Space)
buf.WriteString(e.Timestamp.Format(h.timeFormat))
buf.WriteString(lx.Space)
}
// Format namespace based on style
switch e.Style {
case lx.NestedPath:
if e.Namespace != "" {
// Split namespace into parts and format as [parent]→[child]
parts := strings.Split(e.Namespace, lx.Slash)
for i, part := range parts {
builder.WriteString(lx.LeftBracket)
builder.WriteString(part)
builder.WriteString(lx.RightBracket)
buf.WriteString(lx.LeftBracket)
buf.WriteString(part)
buf.WriteString(lx.RightBracket)
if i < len(parts)-1 {
builder.WriteString(lx.Arrow)
buf.WriteString(lx.Arrow)
}
}
builder.WriteString(lx.Colon)
builder.WriteString(lx.Space)
buf.WriteString(lx.Colon)
buf.WriteString(lx.Space)
}
default: // FlatPath
if e.Namespace != "" {
// Format namespace as [parent/child]
builder.WriteString(lx.LeftBracket)
builder.WriteString(e.Namespace)
builder.WriteString(lx.RightBracket)
builder.WriteString(lx.Space)
buf.WriteString(lx.LeftBracket)
buf.WriteString(e.Namespace)
buf.WriteString(lx.RightBracket)
buf.WriteString(lx.Space)
}
}
// Add level and message
builder.WriteString(e.Level.String())
builder.WriteString(lx.Colon)
builder.WriteString(lx.Space)
builder.WriteString(e.Message)
buf.WriteString(e.Level.Name(e.Class))
// buf.WriteString(lx.Space)
buf.WriteString(lx.Colon)
buf.WriteString(lx.Space)
buf.WriteString(e.Message)
// Add fields in sorted order
if len(e.Fields) > 0 {
var keys []string
for k := range e.Fields {
keys = append(keys, k)
}
// Sort keys for consistent output
sort.Strings(keys)
builder.WriteString(lx.Space)
builder.WriteString(lx.LeftBracket)
for i, k := range keys {
buf.WriteString(lx.Space)
buf.WriteString(lx.LeftBracket)
for i, pair := range e.Fields {
if i > 0 {
builder.WriteString(lx.Space)
buf.WriteString(lx.Space)
}
// Format field as key=value
builder.WriteString(k)
builder.WriteString("=")
builder.WriteString(fmt.Sprint(e.Fields[k]))
buf.WriteString(pair.Key)
buf.WriteString("=")
fmt.Fprint(buf, pair.Value)
}
builder.WriteString(lx.RightBracket)
buf.WriteString(lx.RightBracket)
}
// Add stack trace if present
if len(e.Stack) > 0 {
h.formatStack(&builder, e.Stack)
h.formatStack(buf, e.Stack)
}
// Append newline for non-None levels
if e.Level != lx.LevelNone {
builder.WriteString(lx.Newline)
buf.WriteString(lx.Newline)
}
// Write formatted output to writer
_, err := h.w.Write([]byte(builder.String()))
_, err := h.writer.Write(buf.Bytes())
return err
}
@@ -169,22 +195,20 @@ func (h *TextHandler) handleRegularOutput(e *lx.Entry) error {
//
// h.handleDumpOutput(&lx.Entry{Class: lx.ClassDump, Message: "pos 00 hex: 61"}) // Writes "---- BEGIN DUMP ----\npos 00 hex: 61\n---- END DUMP ----\n"
func (h *TextHandler) handleDumpOutput(e *lx.Entry) error {
// For text handler, we just add a newline before dump output
var builder strings.Builder // Buffer for building formatted output
buf := textBufPool.Get().(*bytes.Buffer)
buf.Reset()
defer textBufPool.Put(buf)
// Add timestamp if enabled
if h.showTime {
builder.WriteString(e.Timestamp.Format(h.timeFormat))
builder.WriteString(lx.Newline)
buf.WriteString(e.Timestamp.Format(h.timeFormat))
buf.WriteString(lx.Newline)
}
// Add separator lines and dump content
builder.WriteString("---- BEGIN DUMP ----\n")
builder.WriteString(e.Message)
builder.WriteString("---- END DUMP ----\n")
buf.WriteString("---- BEGIN DUMP ----\n")
buf.WriteString(e.Message)
buf.WriteString("---- END DUMP ----\n\n")
// Write formatted output to writer
_, err := h.w.Write([]byte(builder.String()))
_, err := h.writer.Write(buf.Bytes())
return err
}
@@ -194,21 +218,18 @@ func (h *TextHandler) handleDumpOutput(e *lx.Entry) error {
// Example (internal usage):
//
// h.formatStack(&builder, []byte("goroutine 1 [running]:\nmain.main()\n\tmain.go:10")) // Appends formatted stack trace
func (h *TextHandler) formatStack(b *strings.Builder, stack []byte) {
func (h *TextHandler) formatStack(b *bytes.Buffer, stack []byte) {
lines := strings.Split(string(stack), "\n")
if len(lines) == 0 {
return
}
// Start stack trace section
b.WriteString("\n[stack]\n")
// First line: goroutine
b.WriteString(" ┌─ ")
b.WriteString(lines[0])
b.WriteString("\n")
// Iterate through remaining lines
for i := 1; i < len(lines); i++ {
line := strings.TrimSpace(lines[i])
if line == "" {
@@ -216,16 +237,13 @@ func (h *TextHandler) formatStack(b *strings.Builder, stack []byte) {
}
if strings.Contains(line, ".go") {
// File path lines get extra indent
b.WriteString(" ├ ")
} else {
// Function names
b.WriteString(" │ ")
}
b.WriteString(line)
b.WriteString("\n")
}
// End stack trace section
b.WriteString(" └\n")
}

313
vendor/github.com/olekukonko/ll/ll.go generated vendored
View File

@@ -1,7 +1,6 @@
package ll
import (
"bufio"
"encoding/binary"
"encoding/json"
"fmt"
@@ -24,23 +23,25 @@ import (
// log level, namespaces, context fields, output style, handler, middleware, and formatting.
// It is thread-safe, using a read-write mutex to protect concurrent access to its fields.
type Logger struct {
mu sync.RWMutex // Guards concurrent access to fields
enabled bool // Determines if logging is enabled
suspend atomic.Bool // uses suspend path for most actions eg. skipping namespace checks
level lx.LevelType // Minimum log level (e.g., Debug, Info, Warn, Error)
namespaces *lx.Namespace // Manages namespace enable/disable states
currentPath string // Current namespace path (e.g., "parent/child")
context map[string]interface{} // Contextual fields included in all logs
style lx.StyleType // Namespace formatting style (FlatPath or NestedPath)
handler lx.Handler // Output handler for logs (e.g., text, JSON)
middleware []Middleware // Middleware functions to process log entries
prefix string // Prefix prepended to log messages
indent int // Number of double spaces for message indentation
stackBufferSize int // Buffer size for capturing stack traces
separator string // Separator for namespace paths (e.g., "/")
entries atomic.Int64 // Tracks total log entries sent to handler
mu sync.RWMutex // Guards concurrent access to fields
enabled bool // Determines if logging is enabled
suspend atomic.Bool // uses suspend path for most actions eg. skipping namespace checks
level lx.LevelType // Minimum log level (e.g., Debug, Info, Warn, Error)
atomicLevel int32 // Shadow copy of level for lock-free checks
namespaces *lx.Namespace // Manages namespace enable/disable states
currentPath string // Current namespace path (e.g., "parent/child")
context lx.Fields // Contextual fields included in all logs
style lx.StyleType // Namespace formatting style (FlatPath or NestedPath)
handler lx.Handler // Output handler for logs (e.g., text, JSON)
middleware []Middleware // Middleware functions to process log entries
prefix string // Prefix prepended to log messages
indent int // Number of double spaces for message indentation
stackBufferSize int // Buffer size for capturing stack traces
separator string // Separator for namespace paths (e.g., "/")
entries atomic.Int64 // Tracks total log entries sent to handler
fatalExits bool
fatalStack bool
labels atomic.Pointer[[]string]
}
// New creates a new Logger with the given namespace and optional configurations.
@@ -55,9 +56,10 @@ func New(namespace string, opts ...Option) *Logger {
logger := &Logger{
enabled: lx.DefaultEnabled, // Defaults to disabled (false)
level: lx.LevelDebug, // Default minimum log level
atomicLevel: int32(lx.LevelDebug), // Initialize atomic level
namespaces: defaultStore, // Shared namespace store
currentPath: namespace, // Initial namespace path
context: make(map[string]interface{}), // Empty context for fields
context: make(lx.Fields, 0, 10), // Empty context for fields
style: lx.FlatPath, // Default namespace style ([parent/child])
handler: lh.NewTextHandler(os.Stdout), // Default text output to stdout
middleware: make([]Middleware, 0), // Empty middleware chain
@@ -116,28 +118,15 @@ func (l *Logger) AddContext(pairs ...any) *Logger {
l.mu.Lock()
defer l.mu.Unlock()
// Lazy initialization of context map
if l.context == nil {
l.context = make(map[string]interface{})
l.context = make(lx.Fields, 0, len(pairs)/2)
}
// Process key-value pairs
for i := 0; i < len(pairs)-1; i += 2 {
key, ok := pairs[i].(string)
if !ok {
l.Warnf("AddContext: non-string key at index %d: %v", i, pairs[i])
continue
if key, ok := pairs[i].(string); ok {
l.context = append(l.context, lx.Field{Key: key, Value: pairs[i+1]})
}
value := pairs[i+1]
l.context[key] = value
}
// Optional: warn about uneven number of arguments
if len(pairs)%2 != 0 {
l.Warn("AddContext: uneven number of arguments, last value ignored")
}
return l
}
@@ -195,18 +184,19 @@ func (l *Logger) Clone() *Logger {
defer l.mu.RUnlock()
return &Logger{
enabled: l.enabled, // Copy enablement state
level: l.level, // Copy log level
namespaces: l.namespaces, // Share namespace store
currentPath: l.currentPath, // Copy namespace path
context: make(map[string]interface{}), // Fresh context map
style: l.style, // Copy namespace style
handler: l.handler, // Copy output handler
middleware: l.middleware, // Copy middleware chain
prefix: l.prefix, // Copy message prefix
indent: l.indent, // Copy indentation level
stackBufferSize: l.stackBufferSize, // Copy stack trace buffer size
separator: l.separator, // Default separator ("/")
enabled: l.enabled, // Copy enablement state
level: l.level, // Copy log level
atomicLevel: l.atomicLevel, // Copy atomic level
namespaces: l.namespaces, // Share namespace store
currentPath: l.currentPath, // Copy namespace path
context: make(lx.Fields, 0, 10), // Fresh context map
style: l.style, // Copy namespace style
handler: l.handler, // Copy output handler
middleware: l.middleware, // Copy middleware chain
prefix: l.prefix, // Copy message prefix
indent: l.indent, // Copy indentation level
stackBufferSize: l.stackBufferSize, // Copy stack trace buffer size
separator: l.separator, // Default separator ("/")
suspend: l.suspend,
}
}
@@ -227,9 +217,10 @@ func (l *Logger) Context(fields map[string]interface{}) *Logger {
newLogger := &Logger{
enabled: l.enabled,
level: l.level,
atomicLevel: l.atomicLevel,
namespaces: l.namespaces,
currentPath: l.currentPath,
context: make(map[string]interface{}),
context: make(lx.Fields, 0, len(l.context)+len(fields)),
style: l.style,
handler: l.handler,
middleware: l.middleware,
@@ -238,37 +229,21 @@ func (l *Logger) Context(fields map[string]interface{}) *Logger {
stackBufferSize: l.stackBufferSize,
separator: l.separator,
suspend: l.suspend,
fatalExits: l.fatalExits,
fatalStack: l.fatalStack,
}
// Copy parent's context fields
for k, v := range l.context {
newLogger.context[k] = v
}
// Copy parent's context fields (in order)
newLogger.context = append(newLogger.context, l.context...)
// Add new fields
// Add new fields from map
for k, v := range fields {
newLogger.context[k] = v
newLogger.context = append(newLogger.context, lx.Field{Key: k, Value: v})
}
return newLogger
}
// Dbg logs debug information, including the source file, line number, and expression
// value, capturing the calling line of code. It is useful for debugging without temporary
// print statements.
// Example:
//
// x := 42
// logger.Dbg(x) // Output: [file.go:123] x = 42
func (l *Logger) Dbg(values ...interface{}) {
// Skip logging if Info level is not enabled
if !l.shouldLog(lx.LevelInfo) {
return
}
l.dbg(2, values...)
}
// Debug logs a message at Debug level, formatting it and delegating to the internal
// log method. It is thread-safe.
// Example:
@@ -434,7 +409,7 @@ func (l *Logger) output(skip int, values ...interface{}) {
"error": err.Error(),
}, " ", " ")
}
l.log(lx.LevelInfo, lx.ClassText, header+string(b), nil, false)
l.log(lx.LevelInfo, lx.ClassJSON, header+string(b), nil, false)
}
}
@@ -503,10 +478,11 @@ func (l *Logger) Err(errs ...error) {
}
l.mu.Lock()
defer l.mu.Unlock()
// Initialize context map if nil
// Initialize context slice if nil
if l.context == nil {
l.context = make(map[string]interface{})
l.context = make(lx.Fields, 0, 4)
}
// Collect non-nil errors and build log message
@@ -527,15 +503,14 @@ func (l *Logger) Err(errs ...error) {
if count > 0 {
if count == 1 {
// Store single error directly
l.context["error"] = nonNilErrors[0]
l.context = append(l.context, lx.Field{Key: "error", Value: nonNilErrors[0]})
} else {
// Store slice of errors
l.context["error"] = nonNilErrors
l.context = append(l.context, lx.Field{Key: "error", Value: nonNilErrors})
}
// Log concatenated error messages
l.log(lx.LevelError, lx.ClassText, builder.String(), nil, false)
}
l.mu.Unlock()
}
// Error logs a message at Error level, formatting it and delegating to the internal
@@ -616,29 +591,27 @@ func (l *Logger) Fatalf(format string, args ...any) {
// logger := New("app").Enable()
// logger.Field(map[string]interface{}{"user": "alice"}).Info("Action") // Output: [app] INFO: Action [user=alice]
func (l *Logger) Field(fields map[string]interface{}) *FieldBuilder {
fb := &FieldBuilder{logger: l, fields: make(map[string]interface{})}
fb := &FieldBuilder{logger: l, fields: make(lx.Fields, 0, len(fields))}
// check if suspended
if l.suspend.Load() {
return fb
}
// Copy fields from input map to FieldBuilder
// Copy fields from input map to FieldBuilder (preserving map iteration order)
for k, v := range fields {
fb.fields[k] = v
fb.fields = append(fb.fields, lx.Field{Key: k, Value: v})
}
return fb
}
// Fields starts a fluent chain for adding fields using variadic key-value pairs,
// creating a FieldBuilder. Non-string keys or uneven pairs add an error field. It is
// thread-safe via the FieldBuilders logger.
// Fields starts a fluent chain for adding fields using variadic key-value pairs.
// It creates a FieldBuilder to attach fields, handling non-string keys or uneven pairs by
// adding an error field. Thread-safe via the FieldBuilder's logger.
// Example:
//
// logger := New("app").Enable()
// logger.Fields("user", "alice").Info("Action") // Output: [app] INFO: Action [user=alice]
func (l *Logger) Fields(pairs ...any) *FieldBuilder {
fb := &FieldBuilder{logger: l, fields: make(map[string]interface{})}
fb := &FieldBuilder{logger: l, fields: make(lx.Fields, 0, len(pairs)/2)}
if l.suspend.Load() {
return fb
@@ -647,15 +620,21 @@ func (l *Logger) Fields(pairs ...any) *FieldBuilder {
// Process key-value pairs
for i := 0; i < len(pairs)-1; i += 2 {
if key, ok := pairs[i].(string); ok {
fb.fields[key] = pairs[i+1]
fb.fields = append(fb.fields, lx.Field{Key: key, Value: pairs[i+1]})
} else {
// Log error for non-string keys
fb.fields["error"] = fmt.Errorf("non-string key in Fields: %v", pairs[i])
fb.fields = append(fb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("non-string key in Fields: %v", pairs[i]),
})
}
}
// Log error for uneven pairs
if len(pairs)%2 != 0 {
fb.fields["error"] = fmt.Errorf("uneven key-value pairs in Fields: [%v]", pairs[len(pairs)-1])
fb.fields = append(fb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("uneven key-value pairs in Fields: [%v]", pairs[len(pairs)-1]),
})
}
return fb
}
@@ -669,7 +648,12 @@ func (l *Logger) Fields(pairs ...any) *FieldBuilder {
func (l *Logger) GetContext() map[string]interface{} {
l.mu.RLock()
defer l.mu.RUnlock()
return l.context
// Convert slice to map for backward compatibility
contextMap := make(map[string]interface{}, len(l.context))
for _, pair := range l.context {
contextMap[pair.Key] = pair.Value
}
return contextMap
}
// GetHandler returns the logger's current handler for customization or inspection.
@@ -801,6 +785,28 @@ func (l *Logger) Len() int64 {
return l.entries.Load()
}
// Labels temporarily attaches one or more label names to the logger for the next log entry.
// Labels are typically used for metrics, benchmarking, tracing, or categorizing logs in a structured way.
//
// The labels are stored atomically and intended to be short-lived, applying only to the next
// log operation (or until overwritten by a subsequent call to Labels). Multiple labels can
// be provided as separate string arguments.
//
// Example usage:
//
// logger := New("app").Enable()
//
// // Add labels for a specific operation
// logger.Labels("load_users", "process_orders").Measure(func() {
// // ... perform work ...
// }, func() {
// // ... optional callback ...
// })
func (l *Logger) Labels(names ...string) *Logger {
l.labels.Store(&names) // store temporarily
return l
}
// Level sets the minimum log level, ignoring messages below it. It is thread-safe using
// a write lock and returns the logger for chaining.
// Example:
@@ -812,6 +818,7 @@ func (l *Logger) Level(level lx.LevelType) *Logger {
l.mu.Lock()
defer l.mu.Unlock()
l.level = level
atomic.StoreInt32(&l.atomicLevel, int32(level))
return l
}
@@ -879,32 +886,6 @@ func (l *Logger) mark(skip int, names ...string) {
l.log(lx.LevelInfo, lx.ClassRaw, out, nil, false)
}
// Measure benchmarks function execution, logging the duration at Info level with a
// "duration" field. It is thread-safe via Fields and log methods.
// Example:
//
// logger := New("app").Enable()
// duration := logger.Measure(func() { time.Sleep(time.Millisecond) })
// // Output: [app] INFO: function executed [duration=~1ms]
func (l *Logger) Measure(fns ...func()) time.Duration {
start := time.Now()
for _, fn := range fns {
if fn != nil {
fn()
}
}
duration := time.Since(start)
l.Fields(
"duration_ns", duration.Nanoseconds(),
"duration", duration.String(),
"duration_ms", fmt.Sprintf("%.3fms", float64(duration.Nanoseconds())/1e6),
).Infof("execution completed")
return duration
}
// Namespace creates a child logger with a sub-namespace appended to the current path,
// inheriting the parents configuration but with an independent context. It is thread-safe
// using a read lock.
@@ -931,9 +912,10 @@ func (l *Logger) Namespace(name string) *Logger {
return &Logger{
enabled: l.enabled,
level: l.level,
atomicLevel: l.atomicLevel,
namespaces: l.namespaces,
currentPath: fullPath,
context: make(map[string]interface{}),
context: make(lx.Fields, 0, 10),
style: l.style,
handler: l.handler,
middleware: l.middleware,
@@ -1253,6 +1235,17 @@ func (l *Logger) Timestamped(enable bool, format ...string) *Logger {
return l
}
// Toggle enables or disables the logger based on the provided boolean value and returns the updated logger instance.
func (l *Logger) Toggle(v bool) *Logger {
if v {
l.Resume()
return l.Enable()
}
l.Suspend()
return l.Disable()
}
// Use adds a middleware function to process log entries before they are handled, returning
// a Middleware handle for removal. Middleware returning a non-nil error stops the log.
// It is thread-safe using a write lock.
@@ -1315,58 +1308,6 @@ func (l *Logger) Warnf(format string, args ...any) {
l.Warn(fmt.Sprintf(format, args...))
}
// dbg is an internal helper for Dbg, logging debug information with source file and line
// number, extracting the calling line of code. It is thread-safe via the log method.
// Example (internal usage):
//
// logger.Dbg(x) // Calls dbg(2, x)
func (l *Logger) dbg(skip int, values ...interface{}) {
for _, exp := range values {
// Get caller information (file, line)
_, file, line, ok := runtime.Caller(skip)
if !ok {
l.log(lx.LevelError, lx.ClassText, "Dbg: Unable to parse runtime caller", nil, false)
return
}
// Open source file
f, err := os.Open(file)
if err != nil {
l.log(lx.LevelError, lx.ClassText, "Dbg: Unable to open expected file", nil, false)
return
}
// Scan file to find the line
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanLines)
var out string
i := 1
for scanner.Scan() {
if i == line {
// Extract expression between parentheses
v := scanner.Text()[strings.Index(scanner.Text(), "(")+1 : len(scanner.Text())-strings.Index(reverseString(scanner.Text()), ")")-1]
// Format output with file, line, expression, and value
out = fmt.Sprintf("[%s:%d] %s = %+v", file[len(file)-strings.Index(reverseString(file), "/"):], line, v, exp)
break
}
i++
}
if err := scanner.Err(); err != nil {
l.log(lx.LevelError, lx.ClassText, err.Error(), nil, false)
return
}
// Log based on value type
switch exp.(type) {
case error:
l.log(lx.LevelError, lx.ClassText, out, nil, false)
default:
l.log(lx.LevelInfo, lx.ClassText, out, nil, false)
}
f.Close()
}
}
// joinPath joins a base path and a relative path using the logger's separator, handling
// empty base or relative paths. It is used internally for namespace path construction.
// Example (internal usage):
@@ -1394,7 +1335,7 @@ func (l *Logger) joinPath(base, relative string) string {
//
// logger := New("app").Enable()
// logger.Info("Test") // Calls log(lx.LevelInfo, "Test", nil, false)
func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields map[string]interface{}, withStack bool) {
func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields lx.Fields, withStack bool) {
// Skip logging if level is not enabled
if !l.shouldLog(level) {
return
@@ -1408,9 +1349,6 @@ func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields
buf := make([]byte, l.stackBufferSize)
l.mu.RUnlock()
n := runtime.Stack(buf, false)
if fields == nil {
fields = make(map[string]interface{})
}
stack = buf[:n]
}
@@ -1428,30 +1366,33 @@ func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields
builder.WriteString(msg)
finalMsg := builder.String()
// Create log entry
// Create combined fields slice - THIS PRESERVES ORDER!
// Optimized slice allocation
var combinedFields lx.Fields
if len(l.context) == 0 {
combinedFields = fields
} else if len(fields) == 0 {
combinedFields = l.context
} else {
combinedFields = make(lx.Fields, 0, len(l.context)+len(fields))
// Add context fields first (in order)
combinedFields = append(combinedFields, l.context...)
// Add immediate fields
combinedFields = append(combinedFields, fields...)
}
// Create log entry with ordered fields
entry := &lx.Entry{
Timestamp: time.Now(),
Level: level,
Message: finalMsg,
Namespace: l.currentPath,
Fields: fields,
Fields: combinedFields, // Already ordered!
Style: l.style,
Class: class,
Stack: stack,
}
// Merge context fields, avoiding overwrites
if len(l.context) > 0 {
if entry.Fields == nil {
entry.Fields = make(map[string]interface{})
}
for k, v := range l.context {
if _, exists := entry.Fields[k]; !exists {
entry.Fields[k] = v
}
}
}
// Apply middleware, stopping if any returns an error
for _, mw := range l.middleware {
if err := mw.fn.Handle(entry); err != nil {
@@ -1486,8 +1427,8 @@ func (l *Logger) shouldLog(level lx.LevelType) bool {
return false
}
// Skip if log level is below minimum
if level > l.level {
// Atomic fast path: read level without lock
if level > lx.LevelType(atomic.LoadInt32(&l.atomicLevel)) {
return false
}

140
vendor/github.com/olekukonko/ll/lx/field.go generated vendored Normal file
View File

@@ -0,0 +1,140 @@
package lx
import (
"fmt"
"strings"
)
// Field represents a key-value pair where the key is a string and the value is of any type.
type Field struct {
Key string
Value interface{}
}
// Fields represents a slice of key-value pairs.
type Fields []Field
// Map converts the Fields slice to a map[string]interface{}.
// This is useful for backward compatibility or when map operations are needed.
// Example:
//
// fields := lx.Fields{{"user", "alice"}, {"age", 30}}
// m := fields.Map() // Returns map[string]interface{}{"user": "alice", "age": 30}
func (f Fields) Map() map[string]interface{} {
m := make(map[string]interface{}, len(f))
for _, pair := range f {
m[pair.Key] = pair.Value
}
return m
}
// Get returns the value for a given key and a boolean indicating if the key was found.
// This provides O(n) lookup, which is fine for small numbers of fields.
// Example:
//
// fields := lx.Fields{{"user", "alice"}, {"age", 30}}
// value, found := fields.Get("user") // Returns "alice", true
func (f Fields) Get(key string) (interface{}, bool) {
for _, pair := range f {
if pair.Key == key {
return pair.Value, true
}
}
return nil, false
}
// Filter returns a new Fields slice containing only pairs where the predicate returns true.
// Example:
//
// fields := lx.Fields{{"user", "alice"}, {"password", "secret"}, {"age", 30}}
// filtered := fields.Filter(func(key string, value interface{}) bool {
// return key != "password" // Remove sensitive fields
// })
func (f Fields) Filter(predicate func(key string, value interface{}) bool) Fields {
result := make(Fields, 0, len(f))
for _, pair := range f {
if predicate(pair.Key, pair.Value) {
result = append(result, pair)
}
}
return result
}
// Translate returns a new Fields slice with keys translated according to the provided mapping.
// Keys not in the mapping are passed through unchanged. This is useful for adapters like Victoria.
// Example:
//
// fields := lx.Fields{{"user", "alice"}, {"timestamp", time.Now()}}
// translated := fields.Translate(map[string]string{
// "user": "username",
// "timestamp": "ts",
// })
// // Returns: {{"username", "alice"}, {"ts", time.Now()}}
func (f Fields) Translate(mapping map[string]string) Fields {
result := make(Fields, len(f))
for i, pair := range f {
if newKey, ok := mapping[pair.Key]; ok {
result[i] = Field{Key: newKey, Value: pair.Value}
} else {
result[i] = pair
}
}
return result
}
// Merge merges another Fields slice into this one, with the other slice's fields taking precedence
// for duplicate keys (overwrites existing keys).
// Example:
//
// base := lx.Fields{{"user", "alice"}, {"age", 30}}
// additional := lx.Fields{{"age", 31}, {"city", "NYC"}}
// merged := base.Merge(additional)
// // Returns: {{"user", "alice"}, {"age", 31}, {"city", "NYC"}}
func (f Fields) Merge(other Fields) Fields {
result := make(Fields, 0, len(f)+len(other))
// Create a map to track which keys from 'other' we've seen
seen := make(map[string]bool, len(other))
// First add all fields from 'f'
result = append(result, f...)
// Then add fields from 'other', overwriting duplicates
for _, pair := range other {
// Check if this key already exists in result
found := false
for i, existing := range result {
if existing.Key == pair.Key {
result[i] = pair // Overwrite
found = true
break
}
}
if !found {
result = append(result, pair)
}
seen[pair.Key] = true
}
return result
}
// String returns a human-readable string representation of the fields.
// Example:
//
// fields := lx.Fields{{"user", "alice"}, {"age", 30}}
// str := fields.String() // Returns: "[user=alice age=30]"
func (f Fields) String() string {
var builder strings.Builder
builder.WriteString(LeftBracket)
for i, pair := range f {
if i > 0 {
builder.WriteString(Space)
}
builder.WriteString(pair.Key)
builder.WriteString("=")
builder.WriteString(fmt.Sprint(pair.Value))
}
builder.WriteString(RightBracket)
return builder.String()
}

67
vendor/github.com/olekukonko/ll/lx/interface.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
package lx
import "io"
// Handler defines the interface for processing log entries.
// Implementations (e.g., TextHandler, JSONHandler) format and output log entries to various
// destinations (e.g., stdout, files). The Handle method returns an error if processing fails,
// allowing the logger to handle output failures gracefully.
// Example (simplified handler implementation):
//
// type MyHandler struct{}
// func (h *MyHandler) Handle(e *Entry) error {
// fmt.Printf("[%s] %s: %s\n", e.Namespace, e.Level.String(), e.Message)
// return nil
// }
type Handler interface {
Handle(e *Entry) error // Processes a log entry, returning any error
}
// Outputter defines the interface for handlers that support dynamic output
// destination changes. Implementations can switch their output writer at runtime.
//
// Example usage:
//
// h := &JSONHandler{}
// h.Output(os.Stderr) // Switch to stderr
// h.Output(file) // Switch to file
type Outputter interface {
Output(w io.Writer)
}
// HandlerOutputter combines the Handler and Outputter interfaces.
// Types implementing this interface can both process log entries and
// dynamically change their output destination at runtime.
//
// This is useful for creating flexible logging handlers that support
// features like log rotation, output redirection, or runtime configuration.
//
// Example usage:
//
// var ho HandlerOutputter = &TextHandler{}
// // Handle log entries
// ho.Handle(&Entry{...})
// // Switch output destination
// ho.Output(os.Stderr)
//
// Common implementations include TextHandler and JSONHandler when they
// support output destination changes.
type HandlerOutputter interface {
Handler // Can process log entries
Outputter // Can change output destination (has Output(w io.Writer) method)
}
// Timestamper defines an interface for handlers that support timestamp configuration.
// It includes a method to enable or disable timestamp logging and optionally set the timestamp format.
type Timestamper interface {
// Timestamped enables or disables timestamp logging and allows specifying an optional format.
// Parameters:
// enable: Boolean to enable or disable timestamp logging
// format: Optional string(s) to specify the timestamp format
Timestamped(enable bool, format ...string)
}
// Wrap is a handler decorator function that transforms a log handler.
// It takes an existing handler as input and returns a new, wrapped handler
// that adds functionality (like filtering, transformation, or routing).
type Wrap func(next Handler) Handler

View File

@@ -1,10 +1,5 @@
package lx
import (
"strings"
"time"
)
// Formatting constants for log output.
// These constants define the characters used to format log messages, ensuring consistency
// across handlers (e.g., text, JSON, colorized). They are used to construct namespace paths,
@@ -16,7 +11,7 @@ const (
Arrow = "→" // Arrow for NestedPath style namespaces (e.g., [parent]→[child])
LeftBracket = "[" // Opening bracket for namespaces and fields (e.g., [app])
RightBracket = "]" // Closing bracket for namespaces and fields (e.g., [app])
Colon = ":" // Separator after namespace or level (e.g., [app]: INFO:)
Colon = ":" // Separator after namespace or level (e.g., [app]: INFO:) can also be "|"
Dot = "." // Separator for namespace paths (e.g., "parent.child")
Newline = "\n" // Newline for separating log entries or stack trace lines
)
@@ -25,7 +20,7 @@ const (
// It specifies whether logging is enabled by default for new Logger instances in the ll package.
// Set to false to prevent logging until explicitly enabled.
const (
DefaultEnabled = false // Default state for new loggers (disabled)
DefaultEnabled = true // Default state for new loggers (disabled)
)
// Log level constants, ordered by increasing severity.
@@ -57,6 +52,9 @@ const (
DumpString = "DUMP"
SpecialString = "SPECIAL"
RawString = "RAW"
InspectString = "INSPECT"
DbgString = "DBG"
TimedString = "TIMED"
)
// Log class constants, defining the type of log entry.
@@ -68,7 +66,10 @@ const (
ClassDump // Dump entries for hex/ASCII dumps
ClassSpecial // Special entries for custom or non-standard logs
ClassRaw // Raw entries for unformatted output
ClassUnknown // Raw entries for unformatted output
ClassInspect // Inspect entries for debugging
ClassDbg // Inspect entries for debugging
ClassTimed // Inspect entries for debugging
ClassUnknown // Unknown output
)
// Namespace style constants.
@@ -78,151 +79,3 @@ const (
FlatPath StyleType = iota // Formats namespaces as [parent/child]
NestedPath // Formats namespaces as [parent]→[child]
)
// LevelType represents the severity of a log message.
// It is an integer type used to define log levels (Debug, Info, Warn, Error, None), with associated
// string representations for display in log output.
type LevelType int
// String converts a LevelType to its string representation.
// It maps each level constant to a human-readable string, returning "UNKNOWN" for invalid levels.
// Used by handlers to display the log level in output.
// Example:
//
// var level lx.LevelType = lx.LevelInfo
// fmt.Println(level.String()) // Output: INFO
func (l LevelType) String() string {
switch l {
case LevelDebug:
return DebugString
case LevelInfo:
return InfoString
case LevelWarn:
return WarnString
case LevelError:
return ErrorString
case LevelFatal:
return FatalString
case LevelNone:
return NoneString
default:
return UnknownString
}
}
// LevelParse converts a string to its corresponding LevelType.
// It parses a string (case-insensitive) and returns the corresponding LevelType, defaulting to
// LevelUnknown for unrecognized strings. Supports "WARNING" as an alias for "WARN".
func LevelParse(s string) LevelType {
switch strings.ToUpper(s) {
case DebugString:
return LevelDebug
case InfoString:
return LevelInfo
case WarnString, WarningString: // Allow both "WARN" and "WARNING"
return LevelWarn
case ErrorString:
return LevelError
case NoneString:
return LevelNone
default:
return LevelUnknown
}
}
// StyleType defines how namespace paths are formatted in log output.
// It is an integer type used to select between FlatPath ([parent/child]) and NestedPath
// ([parent]→[child]) styles, affecting how handlers render namespace hierarchies.
type StyleType int
// Entry represents a single log entry passed to handlers.
// It encapsulates all information about a log message, including its timestamp, severity,
// content, namespace, metadata, and formatting style. Handlers process Entry instances
// to produce formatted output (e.g., text, JSON). The struct is immutable once created,
// ensuring thread-safety in handler processing.
type Entry struct {
Timestamp time.Time // Time the log was created
Level LevelType // Severity level of the log (Debug, Info, Warn, Error, None)
Message string // Log message content
Namespace string // Namespace path (e.g., "parent/child")
Fields map[string]interface{} // Additional key-value metadata (e.g., {"user": "alice"})
Style StyleType // Namespace formatting style (FlatPath or NestedPath)
Error error // Associated error, if any (e.g., for error logs)
Class ClassType // Type of log entry (Text, JSON, Dump, Special, Raw)
Stack []byte // Stack trace data (if present)
Id int `json:"-"` // Unique ID for the entry, ignored in JSON output
}
// Handler defines the interface for processing log entries.
// Implementations (e.g., TextHandler, JSONHandler) format and output log entries to various
// destinations (e.g., stdout, files). The Handle method returns an error if processing fails,
// allowing the logger to handle output failures gracefully.
// Example (simplified handler implementation):
//
// type MyHandler struct{}
// func (h *MyHandler) Handle(e *Entry) error {
// fmt.Printf("[%s] %s: %s\n", e.Namespace, e.Level.String(), e.Message)
// return nil
// }
type Handler interface {
Handle(e *Entry) error // Processes a log entry, returning any error
}
// Timestamper defines an interface for handlers that support timestamp configuration.
// It includes a method to enable or disable timestamp logging and optionally set the timestamp format.
type Timestamper interface {
// Timestamped enables or disables timestamp logging and allows specifying an optional format.
// Parameters:
// enable: Boolean to enable or disable timestamp logging
// format: Optional string(s) to specify the timestamp format
Timestamped(enable bool, format ...string)
}
// ClassType represents the type of a log entry.
// It is an integer type used to categorize log entries (Text, JSON, Dump, Special, Raw),
// influencing how handlers process and format them.
type ClassType int
// String converts a ClassType to its string representation.
// It maps each class constant to a human-readable string, returning "UNKNOWN" for invalid classes.
// Used by handlers to indicate the entry type in output (e.g., JSON fields).
// Example:
//
// var class lx.ClassType = lx.ClassText
// fmt.Println(class.String()) // Output: TEST
func (t ClassType) String() string {
switch t {
case ClassText:
return TextString
case ClassJSON:
return JSONString
case ClassDump:
return DumpString
case ClassSpecial:
return SpecialString
case ClassRaw:
return RawString
default:
return UnknownString
}
}
// ParseClass converts a string to its corresponding ClassType.
// It parses a string (case-insensitive) and returns the corresponding ClassType, defaulting to
// ClassUnknown for unrecognized strings.
func ParseClass(s string) ClassType {
switch strings.ToUpper(s) {
case TextString:
return ClassText
case JSONString:
return ClassJSON
case DumpString:
return ClassDump
case SpecialString:
return ClassSpecial
case RawString:
return ClassRaw
default:
return ClassUnknown
}
}

144
vendor/github.com/olekukonko/ll/lx/types.go generated vendored Normal file
View File

@@ -0,0 +1,144 @@
package lx
import (
"strings"
"time"
)
// LevelType represents the severity of a log message.
// It is an integer type used to define log levels (Debug, Info, Warn, Error, None), with associated
// string representations for display in log output.
type LevelType int
// String converts a LevelType to its string representation.
// It maps each level constant to a human-readable string, returning "UNKNOWN" for invalid levels.
// Used by handlers to display the log level in output.
// Example:
//
// var level lx.LevelType = lx.LevelInfo
// fmt.Println(level.String()) // Output: INFO
func (l LevelType) String() string {
switch l {
case LevelDebug:
return DebugString
case LevelInfo:
return InfoString
case LevelWarn:
return WarnString
case LevelError:
return ErrorString
case LevelFatal:
return FatalString
case LevelNone:
return NoneString
default:
return UnknownString
}
}
func (l LevelType) Name(class ClassType) string {
if class == ClassRaw || class == ClassDump || class == ClassInspect || class == ClassDbg || class == ClassTimed {
return class.String()
}
return l.String()
}
// LevelParse converts a string to its corresponding LevelType.
// It parses a string (case-insensitive) and returns the corresponding LevelType, defaulting to
// LevelUnknown for unrecognized strings. Supports "WARNING" as an alias for "WARN".
func LevelParse(s string) LevelType {
switch strings.ToUpper(s) {
case DebugString:
return LevelDebug
case InfoString:
return LevelInfo
case WarnString, WarningString: // Allow both "WARN" and "WARNING"
return LevelWarn
case ErrorString:
return LevelError
case NoneString:
return LevelNone
default:
return LevelUnknown
}
}
// Entry represents a single log entry passed to handlers.
// It encapsulates all information about a log message, including its timestamp, severity,
// content, namespace, metadata, and formatting style. Handlers process Entry instances
// to produce formatted output (e.g., text, JSON). The struct is immutable once created,
// ensuring thread-safety in handler processing.
type Entry struct {
Timestamp time.Time // Time the log was created
Level LevelType // Severity level of the log (Debug, Info, Warn, Error, None)
Message string // Log message content
Namespace string // Namespace path (e.g., "parent/child")
Fields Fields // Additional key-value metadata (e.g., {"user": "alice"})
Style StyleType // Namespace formatting style (FlatPath or NestedPath)
Error error // Associated error, if any (e.g., for error logs)
Class ClassType // Type of log entry (Text, JSON, Dump, Special, Raw)
Stack []byte // Stack trace data (if present)
Id int `json:"-"` // Unique ID for the entry, ignored in JSON output
}
// StyleType defines how namespace paths are formatted in log output.
// It is an integer type used to select between FlatPath ([parent/child]) and NestedPath
// ([parent]→[child]) styles, affecting how handlers render namespace hierarchies.
type StyleType int
// ClassType represents the type of a log entry.
// It is an integer type used to categorize log entries (Text, JSON, Dump, Special, Raw),
// influencing how handlers process and format them.
type ClassType int
// String converts a ClassType to its string representation.
// It maps each class constant to a human-readable string, returning "UNKNOWN" for invalid classes.
// Used by handlers to indicate the entry type in output (e.g., JSON fields).
// Example:
//
// var class lx.ClassType = lx.ClassText
// fmt.Println(class.String()) // Output: TEST
func (t ClassType) String() string {
switch t {
case ClassText:
return TextString
case ClassJSON:
return JSONString
case ClassDump:
return DumpString
case ClassSpecial:
return SpecialString
case ClassInspect:
return InspectString
case ClassDbg:
return DbgString
case ClassRaw:
return RawString
case ClassTimed:
return TimedString
default:
return UnknownString
}
}
// ParseClass converts a string to its corresponding ClassType.
// It parses a string (case-insensitive) and returns the corresponding ClassType, defaulting to
// ClassUnknown for unrecognized strings.
func ParseClass(s string) ClassType {
switch strings.ToUpper(s) {
case TextString:
return ClassText
case JSONString:
return ClassJSON
case DumpString:
return ClassDump
case SpecialString:
return ClassSpecial
case RawString:
return ClassRaw
default:
return ClassUnknown
}
}

388
vendor/github.com/olekukonko/ll/since.go generated vendored Normal file
View File

@@ -0,0 +1,388 @@
package ll
import (
"fmt"
"strings"
"time"
"github.com/olekukonko/ll/lx"
)
// Measure executes one or more functions and logs the duration of each.
// It returns the total cumulative duration across all functions.
//
// Each function in `fns` is run sequentially. If a function is `nil`, it is skipped.
//
// Optional labels previously set via `Labels(...)` are applied to the corresponding function
// by position. If there are fewer labels than functions, missing labels are replaced with
// default names like "fn_0", "fn_1", etc. Labels are cleared after the call to prevent reuse.
//
// Example usage:
//
// logger := New("app").Enable()
//
// // Optional: add labels for functions
// logger.Labels("load_users", "process_orders")
//
// total := logger.Measure(
// func() {
// // simulate work 1
// time.Sleep(100 * time.Millisecond)
// },
// func() {
// // simulate work 2
// time.Sleep(200 * time.Millisecond)
// },
// func() {
// // simulate work 3
// time.Sleep(50 * time.Millisecond)
// },
// )
//
// // Logs something like:
// // [load_users] completed duration=100ms
// // [process_orders] completed duration=200ms
// // [fn_2] completed duration=50ms
//
// Returns the sum of durations of all executed functions.
func (l *Logger) Measure(fns ...func()) time.Duration {
if len(fns) == 0 {
return 0
}
var total time.Duration
lblPtr := l.labels.Swap(nil)
var lbls []string
if lblPtr != nil {
lbls = *lblPtr
}
for i, fn := range fns {
if fn == nil {
continue
}
// Use SinceBuilder instead of manual timing
sb := l.Since() // starts timer internally
fn()
duration := sb.Fields(
"index", i,
).Info(fmt.Sprintf("[%s] completed", func() string {
if i < len(lbls) && lbls[i] != "" {
return lbls[i]
}
return fmt.Sprintf("fn_%d", i)
}()))
total += duration
}
return total
}
// Since creates a timer that will log the duration when completed
// If startTime is provided, uses that as the start time; otherwise uses time.Now()
//
// defer logger.Since().Info("request") // Auto-start
// logger.Since(start).Info("request") // Manual timing
// logger.Since().If(debug).Debug("timing") // Conditional
func (l *Logger) Since(startTime ...time.Time) *SinceBuilder {
start := time.Now()
if len(startTime) > 0 && !startTime[0].IsZero() {
start = startTime[0]
}
return &SinceBuilder{
logger: l,
start: start,
condition: true,
fields: nil, // Lazily initialized
}
}
// SinceBuilder provides a fluent API for logging timed operations
// It mirrors FieldBuilder exactly for field operations
type SinceBuilder struct {
logger *Logger
start time.Time
condition bool
fields lx.Fields
}
// ---------------------------------------------------------------------
// Conditional Methods (match conditional.go pattern)
// ---------------------------------------------------------------------
// If adds a condition to this timer - only logs if condition is true
func (sb *SinceBuilder) If(condition bool) *SinceBuilder {
sb.condition = sb.condition && condition
return sb
}
// IfErr adds an error condition - only logs if err != nil
func (sb *SinceBuilder) IfErr(err error) *SinceBuilder {
sb.condition = sb.condition && (err != nil)
return sb
}
// IfAny logs if ANY condition is true
func (sb *SinceBuilder) IfAny(conditions ...bool) *SinceBuilder {
if !sb.condition {
return sb
}
for _, cond := range conditions {
if cond {
return sb
}
}
sb.condition = false
return sb
}
// IfOne logs if ALL conditions are true
func (sb *SinceBuilder) IfOne(conditions ...bool) *SinceBuilder {
if !sb.condition {
return sb
}
for _, cond := range conditions {
if !cond {
sb.condition = false
return sb
}
}
return sb
}
// ---------------------------------------------------------------------
// Field Methods - EXACT MATCH with FieldBuilder API
// ---------------------------------------------------------------------
// Fields adds key-value pairs as fields (variadic)
// EXACT match to FieldBuilder.Fields()
func (sb *SinceBuilder) Fields(pairs ...any) *SinceBuilder {
if sb.logger.suspend.Load() || !sb.condition {
return sb
}
// Lazy initialization
if sb.fields == nil {
sb.fields = make(lx.Fields, 0, len(pairs)/2)
}
// Process key-value pairs
for i := 0; i < len(pairs)-1; i += 2 {
if key, ok := pairs[i].(string); ok {
sb.fields = append(sb.fields, lx.Field{Key: key, Value: pairs[i+1]})
} else {
// Log error for non-string keys (matches Fields behavior)
sb.fields = append(sb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("missing key '%v'", pairs[i]),
})
}
}
// Handle uneven pairs (matches Fields behavior)
if len(pairs)%2 != 0 {
sb.fields = append(sb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("missing key '%v'", pairs[len(pairs)-1]),
})
}
return sb
}
// Field adds fields from a map
// EXACT match to FieldBuilder.Field()
func (sb *SinceBuilder) Field(fields map[string]interface{}) *SinceBuilder {
if sb.logger.suspend.Load() || !sb.condition || len(fields) == 0 {
return sb
}
// Lazy initialization
if sb.fields == nil {
sb.fields = make(lx.Fields, 0, len(fields))
}
// Copy fields from input map (preserves iteration order)
for k, v := range fields {
sb.fields = append(sb.fields, lx.Field{Key: k, Value: v})
}
return sb
}
// Err adds one or more errors as a field
// EXACT match to FieldBuilder.Err()
func (sb *SinceBuilder) Err(errs ...error) *SinceBuilder {
if sb.logger.suspend.Load() || !sb.condition {
return sb
}
// Lazy initialization
if sb.fields == nil {
sb.fields = make(lx.Fields, 0, 2)
}
// Collect non-nil errors
var nonNilErrors []error
var builder strings.Builder
count := 0
for i, err := range errs {
if err != nil {
if i > 0 && count > 0 {
builder.WriteString("; ")
}
builder.WriteString(err.Error())
nonNilErrors = append(nonNilErrors, err)
count++
}
}
if count > 0 {
if count == 1 {
sb.fields = append(sb.fields, lx.Field{Key: "error", Value: nonNilErrors[0]})
} else {
sb.fields = append(sb.fields, lx.Field{Key: "error", Value: nonNilErrors})
}
// Note: Unlike FieldBuilder.Err(), we DON'T log immediately
// The error will be included in the timing log
}
return sb
}
// Merge adds additional key-value pairs to the fields
// EXACT match to FieldBuilder.Merge()
func (sb *SinceBuilder) Merge(pairs ...any) *SinceBuilder {
if sb.logger.suspend.Load() || !sb.condition {
return sb
}
// Lazy initialization
if sb.fields == nil {
sb.fields = make(lx.Fields, 0, len(pairs)/2)
}
// Process pairs as key-value
for i := 0; i < len(pairs)-1; i += 2 {
if key, ok := pairs[i].(string); ok {
sb.fields = append(sb.fields, lx.Field{Key: key, Value: pairs[i+1]})
} else {
sb.fields = append(sb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("non-string key in Merge: %v", pairs[i]),
})
}
}
if len(pairs)%2 != 0 {
sb.fields = append(sb.fields, lx.Field{
Key: "error",
Value: fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]),
})
}
return sb
}
// ---------------------------------------------------------------------
// Logging Methods (match logger pattern)
// ---------------------------------------------------------------------
// Debug logs the duration at Debug level with message
func (sb *SinceBuilder) Debug(msg string) time.Duration {
return sb.logAtLevel(lx.LevelDebug, msg)
}
// Info logs the duration at Info level with message
func (sb *SinceBuilder) Info(msg string) time.Duration {
return sb.logAtLevel(lx.LevelInfo, msg)
}
// Warn logs the duration at Warn level with message
func (sb *SinceBuilder) Warn(msg string) time.Duration {
return sb.logAtLevel(lx.LevelWarn, msg)
}
// Error logs the duration at Error level with message
func (sb *SinceBuilder) Error(msg string) time.Duration {
return sb.logAtLevel(lx.LevelError, msg)
}
// Log is an alias for Info (for backward compatibility)
func (sb *SinceBuilder) Log(msg string) time.Duration {
return sb.Info(msg)
}
// logAtLevel internal method that handles the actual logging
func (sb *SinceBuilder) logAtLevel(level lx.LevelType, msg string) time.Duration {
// Fast path - don't even compute duration if we're not logging
if !sb.condition || sb.logger.suspend.Load() || !sb.logger.shouldLog(level) {
return time.Since(sb.start)
}
duration := time.Since(sb.start)
// Build final fields in this order:
// 1. Logger context fields (from logger.context)
// 2. Builder fields (from sb.fields)
// 3. Duration fields (always last)
// Pre-allocate with exact capacity
totalFields := 0
if sb.logger.context != nil {
totalFields += len(sb.logger.context)
}
if sb.fields != nil {
totalFields += len(sb.fields)
}
totalFields += 2 // duration_ms, duration
fields := make(lx.Fields, 0, totalFields)
// Add logger context fields first (preserves order)
if sb.logger.context != nil {
fields = append(fields, sb.logger.context...)
}
// Add builder fields
if sb.fields != nil {
fields = append(fields, sb.fields...)
}
// Add duration fields last (so they're visible at the end)
fields = append(fields,
lx.Field{Key: "duration_ms", Value: duration.Milliseconds()},
lx.Field{Key: "duration", Value: duration.String()},
)
sb.logger.log(level, lx.ClassTimed, msg, fields, false)
return duration
}
// ---------------------------------------------------------------------
// Utility Methods
// ---------------------------------------------------------------------
// Reset allows reusing the builder with a new start time
// Zero-allocation - keeps fields slice capacity
func (sb *SinceBuilder) Reset(startTime ...time.Time) *SinceBuilder {
sb.start = time.Now()
if len(startTime) > 0 && !startTime[0].IsZero() {
sb.start = startTime[0]
}
sb.condition = true
if sb.fields != nil {
sb.fields = sb.fields[:0] // Keep capacity, zero length
}
return sb
}
// Elapsed returns the current duration without logging
func (sb *SinceBuilder) Elapsed() time.Duration {
return time.Since(sb.start)
}

View File

@@ -448,7 +448,7 @@ func NewInvoiceRenderer() *InvoiceRenderer {
Settings: tw.Settings{Separators: tw.SeparatorsNone, Lines: tw.LinesNone},
Streaming: false,
}
defaultLogger := ll.New("simple-invoice-renderer")
defaultLogger := ll.New("simple-invoice-renderer").Disable()
return &InvoiceRenderer{logger: defaultLogger, rendition: rendition}
}

View File

@@ -1,194 +0,0 @@
goos: darwin
goarch: arm64
pkg: github.com/olekukonko/tablewriter/pkg/twwarp
cpu: Apple M2
│ old.txt │ new.txt │
│ sec/op │ sec/op vs base │
WrapString-8 112.8µ ± 1% 112.9µ ± 2% ~ (p=0.589 n=6)
WrapStringWithSpaces-8 113.4µ ± 1% 113.7µ ± 1% ~ (p=0.310 n=6)
geomean 113.1µ 113.3µ +0.15%
│ old.txt │ new.txt │
│ B/s │ B/s vs base │
WrapString-8 84.92Mi ± 1% 84.82Mi ± 2% ~ (p=0.589 n=6)
WrapStringWithSpaces-8 84.43Mi ± 1% 84.27Mi ± 1% ~ (p=0.310 n=6)
geomean 84.68Mi 84.55Mi -0.15%
│ old.txt │ new.txt │
│ B/op │ B/op vs base │
WrapString-8 47.35Ki ± 0% 47.35Ki ± 0% ~ (p=1.000 n=6) ¹
WrapStringWithSpaces-8 52.76Ki ± 0% 52.76Ki ± 0% ~ (p=1.000 n=6) ¹
geomean 49.98Ki 49.98Ki +0.00%
¹ all samples are equal
│ old.txt │ new.txt │
│ allocs/op │ allocs/op vs base │
WrapString-8 33.00 ± 0% 33.00 ± 0% ~ (p=1.000 n=6) ¹
WrapStringWithSpaces-8 51.00 ± 0% 51.00 ± 0% ~ (p=1.000 n=6) ¹
geomean 41.02 41.02 +0.00%
¹ all samples are equal
pkg: github.com/olekukonko/tablewriter/pkg/twwidth
│ old.txt │ new.txt │
│ sec/op │ sec/op vs base │
WidthFunction/SimpleASCII_EAfalse_NoCache-8 387.6n ± 1% 368.4n ± 2% -4.97% (p=0.002 n=6)
WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 219.0n ± 127% 217.5n ± 119% ~ (p=0.372 n=6)
WidthFunction/SimpleASCII_EAfalse_CacheHit-8 14.78n ± 1% 14.54n ± 3% ~ (p=0.061 n=6)
WidthFunction/SimpleASCII_EAtrue_NoCache-8 676.4n ± 1% 366.8n ± 2% -45.77% (p=0.002 n=6)
WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 216.1n ± 375% 216.0n ± 128% ~ (p=0.937 n=6)
WidthFunction/SimpleASCII_EAtrue_CacheHit-8 14.71n ± 0% 14.49n ± 0% -1.53% (p=0.002 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1.027µ ± 3% 1.007µ ± 1% -2.00% (p=0.002 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 219.5n ± 516% 221.4n ± 502% ~ (p=0.515 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 14.81n ± 1% 14.61n ± 1% -1.35% (p=0.009 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1.313µ ± 2% 1.009µ ± 2% -23.15% (p=0.002 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 653.2n ± 150% 218.2n ± 524% ~ (p=0.331 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 14.73n ± 2% 14.50n ± 0% -1.60% (p=0.002 n=6)
WidthFunction/EastAsian_EAfalse_NoCache-8 747.3n ± 1% 336.2n ± 1% -55.02% (p=0.002 n=6)
WidthFunction/EastAsian_EAfalse_CacheMiss-8 226.3n ± 384% 227.4n ± 113% ~ (p=0.937 n=6)
WidthFunction/EastAsian_EAfalse_CacheHit-8 14.74n ± 1% 14.58n ± 1% -1.09% (p=0.011 n=6)
WidthFunction/EastAsian_EAtrue_NoCache-8 965.4n ± 2% 348.7n ± 0% -63.88% (p=0.002 n=6)
WidthFunction/EastAsian_EAtrue_CacheMiss-8 225.4n ± 511% 225.8n ± 111% ~ (p=1.000 n=6)
WidthFunction/EastAsian_EAtrue_CacheHit-8 14.72n ± 1% 14.54n ± 3% ~ (p=0.056 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1376.0n ± 2% 983.8n ± 2% -28.50% (p=0.002 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 633.6n ± 170% 222.4n ± 513% ~ (p=0.974 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 15.73n ± 1% 15.64n ± 1% ~ (p=0.227 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1589.5n ± 1% 996.9n ± 2% -37.29% (p=0.002 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 484.8n ± 309% 221.3n ± 516% ~ (p=0.240 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 15.74n ± 1% 15.73n ± 1% ~ (p=0.485 n=6)
WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 4.916µ ± 3% 4.512µ ± 4% -8.22% (p=0.002 n=6)
WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 2.430µ ± 114% 2.182µ ± 123% ~ (p=0.699 n=6)
WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 23.75n ± 3% 23.24n ± 3% ~ (p=0.065 n=6)
WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 9.273µ ± 1% 4.519µ ± 1% -51.27% (p=0.002 n=6)
WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 4.021µ ± 131% 2.127µ ± 128% ~ (p=0.240 n=6)
WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 23.50n ± 2% 23.48n ± 1% ~ (p=0.589 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 57.36µ ± 1% 57.33µ ± 2% ~ (p=0.818 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 22.18µ ± 135% 14.55µ ± 299% ~ (p=0.589 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 44.21n ± 1% 44.20n ± 2% ~ (p=0.818 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 60.25µ ± 2% 57.90µ ± 2% -3.90% (p=0.002 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 16.11µ ± 263% 20.02µ ± 183% ~ (p=0.699 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 44.57n ± 1% 44.18n ± 2% ~ (p=0.461 n=6)
geomean 358.5n 283.9n -20.82%
│ old.txt │ new.txt │
│ B/s │ B/s vs base │
WidthFunction/SimpleASCII_EAfalse_NoCache-8 86.11Mi ± 1% 90.63Mi ± 2% +5.24% (p=0.002 n=6)
WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 152.4Mi ± 56% 153.5Mi ± 54% ~ (p=0.394 n=6)
WidthFunction/SimpleASCII_EAfalse_CacheHit-8 2.205Gi ± 1% 2.242Gi ± 3% ~ (p=0.065 n=6)
WidthFunction/SimpleASCII_EAtrue_NoCache-8 49.35Mi ± 1% 91.00Mi ± 2% +84.40% (p=0.002 n=6)
WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 154.5Mi ± 79% 154.5Mi ± 56% ~ (p=0.937 n=6)
WidthFunction/SimpleASCII_EAtrue_CacheHit-8 2.215Gi ± 0% 2.250Gi ± 0% +1.58% (p=0.002 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 56.66Mi ± 2% 57.78Mi ± 1% +1.99% (p=0.002 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 265.1Mi ± 84% 262.7Mi ± 83% ~ (p=0.485 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 3.836Gi ± 1% 3.888Gi ± 1% +1.34% (p=0.009 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 44.30Mi ± 2% 57.65Mi ± 2% +30.14% (p=0.002 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 147.3Mi ± 81% 266.7Mi ± 84% ~ (p=0.310 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 3.856Gi ± 2% 3.919Gi ± 0% +1.63% (p=0.002 n=6)
WidthFunction/EastAsian_EAfalse_NoCache-8 76.58Mi ± 1% 170.21Mi ± 1% +122.28% (p=0.002 n=6)
WidthFunction/EastAsian_EAfalse_CacheMiss-8 252.8Mi ± 79% 251.6Mi ± 53% ~ (p=0.937 n=6)
WidthFunction/EastAsian_EAfalse_CacheHit-8 3.791Gi ± 1% 3.832Gi ± 1% +1.08% (p=0.009 n=6)
WidthFunction/EastAsian_EAtrue_NoCache-8 59.27Mi ± 2% 164.10Mi ± 0% +176.87% (p=0.002 n=6)
WidthFunction/EastAsian_EAtrue_CacheMiss-8 253.9Mi ± 84% 253.4Mi ± 53% ~ (p=1.000 n=6)
WidthFunction/EastAsian_EAtrue_CacheHit-8 3.796Gi ± 1% 3.841Gi ± 3% ~ (p=0.065 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 60.29Mi ± 1% 84.33Mi ± 2% +39.88% (p=0.002 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 227.1Mi ± 79% 373.2Mi ± 84% ~ (p=1.000 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 5.154Gi ± 1% 5.181Gi ± 1% ~ (p=0.240 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 52.19Mi ± 1% 83.23Mi ± 2% +59.47% (p=0.002 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 230.9Mi ± 82% 374.9Mi ± 84% ~ (p=0.240 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 5.147Gi ± 1% 5.152Gi ± 1% ~ (p=0.485 n=6)
WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 104.8Mi ± 3% 114.1Mi ± 4% +8.95% (p=0.002 n=6)
WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 368.0Mi ± 293% 474.3Mi ± 211% ~ (p=0.699 n=6)
WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 21.17Gi ± 3% 21.64Gi ± 2% ~ (p=0.065 n=6)
WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 55.54Mi ± 1% 113.97Mi ± 1% +105.21% (p=0.002 n=6)
WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 399.8Mi ± 232% 577.5Mi ± 149% ~ (p=0.240 n=6)
WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 21.40Gi ± 2% 21.41Gi ± 1% ~ (p=0.589 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 34.08Mi ± 1% 34.10Mi ± 2% ~ (p=0.784 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 101.5Mi ± 1396% 643.9Mi ± 320% ~ (p=0.589 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 43.18Gi ± 1% 43.20Gi ± 2% ~ (p=0.818 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 32.45Mi ± 2% 33.76Mi ± 2% +4.06% (p=0.002 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 393.0Mi ± 296% 122.4Mi ± 1610% ~ (p=0.699 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 42.83Gi ± 1% 43.21Gi ± 2% ~ (p=0.485 n=6)
geomean 456.4Mi 560.6Mi +22.83%
│ old.txt │ new.txt │
│ B/op │ B/op vs base │
WidthFunction/SimpleASCII_EAfalse_NoCache-8 112.0 ± 1% 113.0 ± 0% ~ (p=0.061 n=6)
WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 55.00 ± 200% 55.00 ± 202% ~ (p=1.000 n=6)
WidthFunction/SimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/SimpleASCII_EAtrue_NoCache-8 113.0 ± 1% 113.0 ± 0% ~ (p=1.000 n=6)
WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 55.00 ± 505% 55.00 ± 205% ~ (p=0.697 n=6)
WidthFunction/SimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 185.0 ± 0% 185.0 ± 1% ~ (p=0.455 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 87.00 ± 402% 87.00 ± 401% ~ (p=1.000 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 185.0 ± 0% 185.0 ± 1% ~ (p=1.000 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 174.00 ± 115% 87.00 ± 401% ~ (p=0.621 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsian_EAfalse_NoCache-8 145.0 ± 0% 146.0 ± 0% +0.69% (p=0.002 n=6)
WidthFunction/EastAsian_EAfalse_CacheMiss-8 87.00 ± 392% 87.00 ± 167% ~ (p=0.697 n=6)
WidthFunction/EastAsian_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsian_EAtrue_NoCache-8 145.0 ± 1% 146.0 ± 1% +0.69% (p=0.013 n=6)
WidthFunction/EastAsian_EAtrue_CacheMiss-8 87.00 ± 392% 87.00 ± 164% ~ (p=0.697 n=6)
WidthFunction/EastAsian_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 193.0 ± 1% 193.0 ± 0% ~ (p=1.000 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 232.0 ± 134% 103.0 ± 485% ~ (p=0.924 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 193.0 ± 0% 193.0 ± 1% ~ (p=1.000 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 185.0 ± 203% 103.0 ± 485% ~ (p=0.621 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 1.153Ki ± 0% 1.150Ki ± 0% ~ (p=0.126 n=6)
WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 1.050Ki ± 72% 1.047Ki ± 74% ~ (p=0.939 n=6)
WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 1.152Ki ± 0% 1.155Ki ± 0% +0.30% (p=0.015 n=6)
WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 1.036Ki ± 71% 1.039Ki ± 76% ~ (p=0.981 n=6)
WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 1.355Ki ± 0% 1.358Ki ± 0% ~ (p=0.065 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 2.787Ki ± 31% 2.613Ki ± 43% ~ (p=0.805 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 1.358Ki ± 0% 1.361Ki ± 0% ~ (p=0.158 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2.625Ki ± 43% 2.741Ki ± 37% ~ (p=0.987 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
geomean ² -5.62% ²
¹ all samples are equal
² summaries must be >0 to compute geomean
│ old.txt │ new.txt │
│ allocs/op │ allocs/op vs base │
WidthFunction/SimpleASCII_EAfalse_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/SimpleASCII_EAfalse_CacheMiss-8 1.000 ± 200% 1.000 ± 200% ~ (p=1.000 n=6)
WidthFunction/SimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/SimpleASCII_EAtrue_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/SimpleASCII_EAtrue_CacheMiss-8 1.000 ± 300% 1.000 ± 200% ~ (p=0.697 n=6)
WidthFunction/SimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 6.000 ± 0% 6.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 1.000 ± 600% 1.000 ± 600% ~ (p=1.000 n=6)
WidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 6.000 ± 0% 6.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 3.500 ± 100% 1.000 ± 600% ~ (p=0.610 n=6)
WidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsian_EAfalse_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsian_EAfalse_CacheMiss-8 1.000 ± 300% 1.000 ± 200% ~ (p=0.697 n=6)
WidthFunction/EastAsian_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsian_EAtrue_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsian_EAtrue_CacheMiss-8 1.000 ± 300% 1.000 ± 200% ~ (p=0.697 n=6)
WidthFunction/EastAsian_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 5.000 ± 0% 5.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 3.000 ± 133% 1.000 ± 600% ~ (p=1.000 n=6)
WidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 5.000 ± 0% 5.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 2.500 ± 180% 1.000 ± 600% ~ (p=0.610 n=6)
WidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongSimpleASCII_EAfalse_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3.000 ± 67% 3.000 ± 67% ~ (p=1.000 n=6)
WidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongSimpleASCII_EAtrue_NoCache-8 3.000 ± 0% 3.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3.000 ± 67% 3.000 ± 67% ~ (p=1.000 n=6)
WidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 9.000 ± 0% 9.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 5.000 ± 100% 3.500 ± 186% ~ (p=0.978 n=6)
WidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 9.000 ± 0% 9.000 ± 0% ~ (p=1.000 n=6) ¹
WidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 4.000 ± 150% 4.500 ± 122% ~ (p=0.952 n=6)
WidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=6) ¹
geomean ² -9.28% ²
¹ all samples are equal
² summaries must be >0 to compute geomean

10
vendor/github.com/olekukonko/tablewriter/comb.hcl generated vendored Normal file
View File

@@ -0,0 +1,10 @@
recursive = true
output_file = "all.txt"
extensions = [".go"]
exclude_dirs = [
"_examples", "_readme", "_lab","_tmp","pkg","lab","cmd","test.txt","tmp",
"_readme","pkg","renderer"
]
exclude_files = ["README.md","README_LEGACY.md","MIGRATION.md","test.hcl","csv.go"]
use_gitignore = true
detailed = true

View File

@@ -81,6 +81,13 @@ func (b *ConfigBuilder) WithTrimSpace(state tw.State) *ConfigBuilder {
return b
}
// WithTrimTab enables or disables automatic trimming of leading/trailing tabs.
// Useful for preserving indentation in code blocks while trimming other whitespace.
func (b *ConfigBuilder) WithTrimTab(state tw.State) *ConfigBuilder {
b.config.Behavior.TrimTab = state
return b
}
// WithDebug enables/disables debug logging
func (b *ConfigBuilder) WithDebug(debug bool) *ConfigBuilder {
b.config.Debug = debug
@@ -796,6 +803,12 @@ func (bb *BehaviorConfigBuilder) WithTrimSpace(state tw.State) *BehaviorConfigBu
return bb
}
// WithTrimTab enables/disables trim tab
func (bb *BehaviorConfigBuilder) WithTrimTab(state tw.State) *BehaviorConfigBuilder {
bb.config.TrimTab = state
return bb
}
// WithHeaderHide enables/disables header visibility
func (bb *BehaviorConfigBuilder) WithHeaderHide(state tw.State) *BehaviorConfigBuilder {
bb.config.Header.Hide = state

View File

@@ -1,248 +0,0 @@
PASS
ok github.com/olekukonko/tablewriter 0.284s
? github.com/olekukonko/tablewriter/cmd/csv2table [no test files]
goos: darwin
goarch: arm64
pkg: github.com/olekukonko/tablewriter/pkg/twwarp
cpu: Apple M2
BenchmarkWrapString-8 10030 114909 ns/op 87.40 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 112188 ns/op 89.52 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 113708 ns/op 88.32 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 113233 ns/op 88.69 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 112575 ns/op 89.21 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 112604 ns/op 89.19 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113731 ns/op 88.30 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113511 ns/op 88.48 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113575 ns/op 88.43 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113746 ns/op 88.29 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113473 ns/op 88.51 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 114487 ns/op 87.72 MB/s 54024 B/op 51 allocs/op
PASS
ok github.com/olekukonko/tablewriter/pkg/twwarp 14.612s
goos: darwin
goarch: arm64
pkg: github.com/olekukonko/tablewriter/pkg/twwidth
cpu: Apple M2
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 264374 4533 ns/op 119.12 MB/s 1178 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 265746 4514 ns/op 119.62 MB/s 1177 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 263538 4509 ns/op 119.75 MB/s 1178 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 266173 4510 ns/op 119.72 MB/s 1180 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 265224 4676 ns/op 115.48 MB/s 1180 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 265696 4508 ns/op 119.80 MB/s 1177 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 251047 4859 ns/op 111.13 MB/s 1867 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 1000000 3945 ns/op 136.89 MB/s 1584 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3504475 3729 ns/op 144.81 MB/s 1474 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3664098 635.4 ns/op 849.84 MB/s 670 B/op 2 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3818680 588.6 ns/op 917.47 MB/s 667 B/op 2 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3761966 348.7 ns/op 1548.66 MB/s 583 B/op 1 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 49524442 23.54 ns/op 22938.55 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51765230 23.25 ns/op 23221.81 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51881983 23.83 ns/op 22664.79 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51665586 23.20 ns/op 23272.39 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51782077 23.23 ns/op 23250.20 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51498277 23.21 ns/op 23267.21 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 263586 4520 ns/op 119.47 MB/s 1183 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265484 4519 ns/op 119.49 MB/s 1182 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265218 4514 ns/op 119.64 MB/s 1181 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265957 4515 ns/op 119.60 MB/s 1184 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265981 4518 ns/op 119.52 MB/s 1183 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 265028 4574 ns/op 118.06 MB/s 1184 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 251682 4853 ns/op 111.27 MB/s 1869 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 1000000 3893 ns/op 138.70 MB/s 1583 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3596130 3747 ns/op 144.13 MB/s 1499 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3671358 506.1 ns/op 1066.92 MB/s 628 B/op 2 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3687993 370.6 ns/op 1456.96 MB/s 594 B/op 2 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3672946 358.4 ns/op 1506.88 MB/s 583 B/op 1 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 49266897 23.64 ns/op 22844.78 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 50158659 23.54 ns/op 22938.83 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 50689321 23.45 ns/op 23025.77 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51113672 23.52 ns/op 22954.95 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51489162 23.21 ns/op 23269.51 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51705564 23.16 ns/op 23311.21 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20930 57159 ns/op 35.86 MB/s 1389 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20882 57502 ns/op 35.65 MB/s 1395 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21103 57730 ns/op 35.51 MB/s 1391 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20889 56615 ns/op 36.21 MB/s 1393 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20808 58303 ns/op 35.16 MB/s 1391 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21104 56727 ns/op 36.14 MB/s 1387 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 38569 27485 ns/op 74.59 MB/s 3041 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 58061 ns/op 35.31 MB/s 3835 B/op 10 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 2124566 31025 ns/op 66.08 MB/s 3140 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1607 ns/op 1275.74 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1615826 1224 ns/op 1674.27 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1478348 722.9 ns/op 2835.84 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 23989044 44.26 ns/op 46313.25 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27268802 44.13 ns/op 46454.64 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27292006 44.51 ns/op 46054.40 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 24128786 44.99 ns/op 45569.06 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 26858004 44.09 ns/op 46497.43 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27259458 44.05 ns/op 46538.64 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20671 57887 ns/op 35.41 MB/s 1395 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20966 56795 ns/op 36.09 MB/s 1396 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20708 57092 ns/op 35.91 MB/s 1388 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20882 57917 ns/op 35.40 MB/s 1389 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 21244 58013 ns/op 35.34 MB/s 1393 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20854 58122 ns/op 35.27 MB/s 1396 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 38907 30289 ns/op 67.68 MB/s 3066 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1000000 56603 ns/op 36.22 MB/s 3835 B/op 10 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1949059 29030 ns/op 70.62 MB/s 3084 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1479127 933.7 ns/op 2195.47 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2335996 11012 ns/op 186.17 MB/s 2548 B/op 3 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 983864 1169 ns/op 1753.75 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27291516 44.18 ns/op 46398.32 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27220657 44.18 ns/op 46402.04 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27059124 44.91 ns/op 45645.46 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 26679783 44.04 ns/op 46551.62 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27244114 44.14 ns/op 46448.19 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27221737 44.61 ns/op 45948.75 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3247359 366.1 ns/op 95.62 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3292773 370.6 ns/op 94.44 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3275070 365.3 ns/op 95.82 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3291489 365.6 ns/op 95.73 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3282121 374.9 ns/op 93.37 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3198205 375.6 ns/op 93.18 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 3092488 419.4 ns/op 83.45 MB/s 152 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6276060 476.4 ns/op 73.46 MB/s 166 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6135336 218.8 ns/op 159.98 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6175833 216.1 ns/op 161.95 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6156606 215.2 ns/op 162.63 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6160923 216.2 ns/op 161.88 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 78655855 15.02 ns/op 2330.76 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 70905223 14.59 ns/op 2398.68 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82255629 14.49 ns/op 2415.75 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82383864 14.48 ns/op 2417.21 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82325931 14.49 ns/op 2415.73 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 82426311 14.66 ns/op 2386.73 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3265182 365.8 ns/op 95.68 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3275419 366.3 ns/op 95.56 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3057087 375.3 ns/op 93.26 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3239217 372.6 ns/op 93.94 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3246429 367.3 ns/op 95.29 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 3252763 365.3 ns/op 95.80 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 2986195 396.4 ns/op 88.30 MB/s 142 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6487422 493.6 ns/op 70.90 MB/s 168 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6261225 216.1 ns/op 161.99 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6154988 210.7 ns/op 166.13 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6308702 213.8 ns/op 163.69 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6120438 216.0 ns/op 162.05 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82184980 14.47 ns/op 2419.17 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 78985473 14.51 ns/op 2412.95 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82368319 14.47 ns/op 2419.30 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82366668 14.47 ns/op 2418.96 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82104614 14.53 ns/op 2409.59 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82399426 14.53 ns/op 2409.13 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1020 ns/op 59.80 MB/s 186 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1010 ns/op 60.40 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1007 ns/op 60.55 MB/s 186 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1006 ns/op 60.63 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1006 ns/op 60.65 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1006 ns/op 60.63 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1334 ns/op 45.74 MB/s 436 B/op 7 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6892693 1204 ns/op 50.65 MB/s 321 B/op 7 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6433399 221.7 ns/op 275.14 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6323521 221.2 ns/op 275.73 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6000822 218.5 ns/op 279.15 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6329578 220.3 ns/op 276.90 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 80806719 14.65 ns/op 4163.13 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82397774 14.63 ns/op 4169.11 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82794307 14.76 ns/op 4134.15 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82610730 14.59 ns/op 4180.13 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82639170 14.58 ns/op 4183.56 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 82560049 14.45 ns/op 4222.53 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1006 ns/op 60.61 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1012 ns/op 60.29 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1030 ns/op 59.25 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1005 ns/op 60.68 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1006 ns/op 60.64 MB/s 186 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 1000000 1012 ns/op 60.26 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 1000000 1361 ns/op 44.84 MB/s 436 B/op 7 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6967185 1216 ns/op 50.17 MB/s 323 B/op 7 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6413974 219.1 ns/op 278.46 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6381684 216.9 ns/op 281.27 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6383749 216.2 ns/op 282.14 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6360810 217.3 ns/op 280.75 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81573231 14.53 ns/op 4197.28 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82780268 14.47 ns/op 4215.84 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82845276 14.48 ns/op 4212.74 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82545850 14.51 ns/op 4203.96 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82419704 14.49 ns/op 4209.69 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82121707 14.50 ns/op 4206.82 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3552715 336.1 ns/op 178.50 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3551234 335.0 ns/op 179.09 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3588946 338.9 ns/op 177.05 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3577424 338.5 ns/op 177.25 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3554505 335.4 ns/op 178.89 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 3575703 336.2 ns/op 178.46 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 2990224 412.6 ns/op 145.42 MB/s 207 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 6066997 484.0 ns/op 123.95 MB/s 232 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5743347 224.3 ns/op 267.49 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5870154 220.6 ns/op 271.92 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5880489 228.0 ns/op 263.14 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5660132 226.8 ns/op 264.52 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81708613 14.54 ns/op 4126.40 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 79903231 14.65 ns/op 4094.56 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 80580853 14.62 ns/op 4103.14 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 82036092 14.73 ns/op 4073.52 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 83622964 14.49 ns/op 4139.65 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 82724623 14.53 ns/op 4129.78 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3463408 349.4 ns/op 171.71 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3245782 350.0 ns/op 171.41 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3461160 348.3 ns/op 172.28 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3453544 349.1 ns/op 171.87 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3443858 347.0 ns/op 172.92 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 3469286 347.4 ns/op 172.72 MB/s 146 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 3050086 428.5 ns/op 140.04 MB/s 213 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5927800 476.0 ns/op 126.05 MB/s 230 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5852149 223.0 ns/op 269.05 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5721747 224.9 ns/op 266.80 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5751147 225.7 ns/op 265.84 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5893626 225.9 ns/op 265.55 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81984477 14.52 ns/op 4132.81 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 79537578 14.59 ns/op 4112.59 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 82339353 14.56 ns/op 4119.49 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 82286889 14.92 ns/op 4020.68 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 82166224 14.53 ns/op 4129.14 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 83084276 14.52 ns/op 4131.45 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1221180 982.5 ns/op 88.55 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1210902 983.5 ns/op 88.46 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1223528 989.3 ns/op 87.94 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1212517 984.1 ns/op 88.40 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1224182 983.5 ns/op 88.46 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 1000000 1007 ns/op 86.36 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 999058 1364 ns/op 63.76 MB/s 603 B/op 7 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6682279 1218 ns/op 71.40 MB/s 465 B/op 7 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6339568 220.6 ns/op 394.46 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6226921 222.3 ns/op 391.34 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6264051 221.1 ns/op 393.47 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6234439 222.4 ns/op 391.23 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 75337251 15.64 ns/op 5562.01 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76826634 15.76 ns/op 5521.54 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76836674 15.79 ns/op 5508.81 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76840162 15.64 ns/op 5564.05 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76694060 15.60 ns/op 5577.81 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76737175 15.62 ns/op 5571.56 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1202406 1012 ns/op 85.93 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1000000 1000 ns/op 86.99 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1208559 993.7 ns/op 87.55 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1209415 990.9 ns/op 87.80 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1206118 1020 ns/op 85.33 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 1211994 990.6 ns/op 87.82 MB/s 194 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 1000000 1363 ns/op 63.84 MB/s 603 B/op 7 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6504960 1214 ns/op 71.65 MB/s 465 B/op 7 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6349030 220.2 ns/op 395.18 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6183368 220.3 ns/op 394.99 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6240484 220.6 ns/op 394.32 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6280713 222.0 ns/op 391.95 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 69630140 15.77 ns/op 5517.31 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76043014 15.65 ns/op 5559.61 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76239080 15.63 ns/op 5567.94 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75864739 15.88 ns/op 5479.13 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 71286422 15.74 ns/op 5527.29 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75704404 15.71 ns/op 5536.58 MB/s 0 B/op 0 allocs/op
PASS
ok github.com/olekukonko/tablewriter/pkg/twwidth 659.150s
? github.com/olekukonko/tablewriter/renderer [no test files]
PASS
ok github.com/olekukonko/tablewriter/tests 3.025s
PASS
ok github.com/olekukonko/tablewriter/tw 0.283s

View File

@@ -1,248 +0,0 @@
PASS
ok github.com/olekukonko/tablewriter 0.819s
? github.com/olekukonko/tablewriter/cmd/csv2table [no test files]
goos: darwin
goarch: arm64
pkg: github.com/olekukonko/tablewriter/pkg/twwarp
cpu: Apple M2
BenchmarkWrapString-8 10630 111320 ns/op 90.22 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 112981 ns/op 88.89 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 113419 ns/op 88.55 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 112794 ns/op 89.04 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 112400 ns/op 89.35 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapString-8 10000 112767 ns/op 89.06 MB/s 48488 B/op 33 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 115098 ns/op 87.26 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113343 ns/op 88.61 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113702 ns/op 88.33 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113547 ns/op 88.45 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113016 ns/op 88.86 MB/s 54024 B/op 51 allocs/op
BenchmarkWrapStringWithSpaces-8 10000 113206 ns/op 88.71 MB/s 54024 B/op 51 allocs/op
PASS
ok github.com/olekukonko/tablewriter/pkg/twwarp 15.179s
goos: darwin
goarch: arm64
pkg: github.com/olekukonko/tablewriter/pkg/twwidth
cpu: Apple M2
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 2953855 387.1 ns/op 90.40 MB/s 112 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3095179 387.8 ns/op 90.24 MB/s 112 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3096141 391.0 ns/op 89.51 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3090711 387.2 ns/op 90.40 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3066110 387.4 ns/op 90.35 MB/s 112 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_NoCache-8 3098689 389.2 ns/op 89.92 MB/s 112 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 3125685 440.9 ns/op 79.39 MB/s 159 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6477175 496.2 ns/op 70.53 MB/s 165 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6019939 217.7 ns/op 160.79 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6231590 219.2 ns/op 159.67 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6245622 216.2 ns/op 161.90 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheMiss-8 6109658 218.8 ns/op 159.95 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 80977806 14.73 ns/op 2375.87 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 80972566 14.76 ns/op 2371.06 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 81432532 14.90 ns/op 2348.78 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 80644483 14.85 ns/op 2357.10 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 81361905 14.79 ns/op 2365.80 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAfalse_CacheHit-8 81612987 14.78 ns/op 2368.60 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1777732 682.2 ns/op 51.30 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1778122 672.9 ns/op 52.01 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1779956 674.0 ns/op 51.93 MB/s 112 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1773282 678.7 ns/op 51.57 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1783092 680.2 ns/op 51.46 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_NoCache-8 1780448 674.0 ns/op 51.93 MB/s 113 B/op 3 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 1000000 1027 ns/op 34.08 MB/s 333 B/op 4 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6891168 958.3 ns/op 36.52 MB/s 227 B/op 4 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6165972 211.7 ns/op 165.30 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6370098 217.4 ns/op 161.02 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6193920 214.8 ns/op 162.92 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheMiss-8 6190384 209.4 ns/op 167.16 MB/s 55 B/op 1 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 79747688 14.75 ns/op 2372.71 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 79607492 14.75 ns/op 2372.90 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 81634501 14.73 ns/op 2376.30 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 81644916 14.70 ns/op 2381.26 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 82505884 14.70 ns/op 2380.77 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/SimpleASCII_EAtrue_CacheHit-8 81840265 14.70 ns/op 2380.34 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1053 ns/op 57.95 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1028 ns/op 59.34 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1029 ns/op 59.27 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1025 ns/op 59.49 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1026 ns/op 59.48 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_NoCache-8 1000000 1025 ns/op 59.54 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1352 ns/op 45.13 MB/s 437 B/op 7 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6619118 1219 ns/op 50.06 MB/s 320 B/op 7 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6486976 221.2 ns/op 275.81 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6508150 217.8 ns/op 280.07 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6487533 217.4 ns/op 280.56 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheMiss-8 6243558 216.4 ns/op 281.93 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 80787679 14.90 ns/op 4093.19 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81640521 14.89 ns/op 4097.92 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81596338 14.71 ns/op 4145.47 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81950889 14.84 ns/op 4111.86 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 79321578 14.78 ns/op 4126.88 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAfalse_CacheHit-8 81880058 14.75 ns/op 4134.44 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 906406 1313 ns/op 46.44 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 917503 1313 ns/op 46.46 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 915308 1312 ns/op 46.49 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 918404 1312 ns/op 46.51 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 892551 1338 ns/op 45.58 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_NoCache-8 915020 1333 ns/op 45.76 MB/s 185 B/op 6 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 791368 1633 ns/op 37.36 MB/s 374 B/op 7 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 2314653 1064 ns/op 57.34 MB/s 265 B/op 5 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6531552 1198 ns/op 50.94 MB/s 258 B/op 5 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6629763 242.5 ns/op 251.57 MB/s 90 B/op 2 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6388215 219.1 ns/op 278.36 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheMiss-8 6472197 218.6 ns/op 279.09 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 80704821 14.76 ns/op 4132.33 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82628028 14.70 ns/op 4149.56 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81870517 14.70 ns/op 4148.97 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81944124 14.99 ns/op 4068.84 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 81918950 14.70 ns/op 4150.75 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/ASCIIWithANSI_EAtrue_CacheHit-8 82547270 14.91 ns/op 4092.20 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1604370 749.9 ns/op 80.02 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1610148 749.7 ns/op 80.03 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1585026 744.8 ns/op 80.56 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1615032 749.9 ns/op 80.01 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1614980 743.3 ns/op 80.72 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_NoCache-8 1609586 741.8 ns/op 80.88 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 1000000 1095 ns/op 54.77 MB/s 428 B/op 4 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 6214893 995.6 ns/op 60.26 MB/s 316 B/op 4 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5702408 224.5 ns/op 267.21 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5712139 220.2 ns/op 272.50 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5783916 228.2 ns/op 262.91 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheMiss-8 5713358 224.0 ns/op 267.91 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 78757815 14.92 ns/op 4020.51 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81419875 14.79 ns/op 4057.15 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81656493 14.75 ns/op 4068.12 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81522430 14.73 ns/op 4073.37 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 81887037 14.70 ns/op 4080.93 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAfalse_CacheHit-8 82019505 14.72 ns/op 4074.99 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1241600 965.5 ns/op 62.14 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1243646 964.8 ns/op 62.19 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1243516 968.1 ns/op 61.98 MB/s 144 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1241917 965.3 ns/op 62.16 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1242903 985.0 ns/op 60.92 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_NoCache-8 1223456 964.3 ns/op 62.22 MB/s 145 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 1000000 1378 ns/op 43.55 MB/s 428 B/op 4 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 6265657 1229 ns/op 48.84 MB/s 316 B/op 4 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5960497 224.3 ns/op 267.52 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5961004 222.6 ns/op 269.52 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5772004 226.5 ns/op 264.87 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheMiss-8 5766748 223.5 ns/op 268.51 MB/s 87 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 78664455 14.76 ns/op 4063.92 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81305858 14.71 ns/op 4079.19 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81626406 14.71 ns/op 4078.32 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81168830 14.71 ns/op 4077.52 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81860040 14.72 ns/op 4075.37 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsian_EAtrue_CacheHit-8 81093633 14.88 ns/op 4031.15 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 837949 1397 ns/op 62.29 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 869082 1380 ns/op 63.04 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 864015 1377 ns/op 63.18 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 873742 1374 ns/op 63.33 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 875703 1375 ns/op 63.27 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_NoCache-8 866865 1375 ns/op 63.26 MB/s 194 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 772100 1709 ns/op 50.91 MB/s 543 B/op 7 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 2127564 1046 ns/op 83.14 MB/s 361 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6476034 1274 ns/op 68.30 MB/s 381 B/op 6 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6401709 221.3 ns/op 393.18 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6368766 220.2 ns/op 395.14 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheMiss-8 6404850 220.6 ns/op 394.34 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 74606566 15.83 ns/op 5494.39 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76326774 15.72 ns/op 5536.01 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76140116 15.74 ns/op 5525.94 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76340330 15.69 ns/op 5544.89 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76240900 15.69 ns/op 5544.81 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAfalse_CacheHit-8 76301294 15.73 ns/op 5531.49 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 753624 1592 ns/op 54.64 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 757292 1599 ns/op 54.42 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 758196 1588 ns/op 54.79 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 753902 1586 ns/op 54.85 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 758770 1589 ns/op 54.74 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_NoCache-8 757748 1590 ns/op 54.71 MB/s 193 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 653979 1985 ns/op 43.82 MB/s 561 B/op 7 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 2344717 731.5 ns/op 118.93 MB/s 263 B/op 3 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6440574 1420 ns/op 61.26 MB/s 369 B/op 5 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6506366 238.2 ns/op 365.22 MB/s 107 B/op 2 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6504939 220.8 ns/op 394.05 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheMiss-8 6399746 221.0 ns/op 393.66 MB/s 103 B/op 1 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75646941 15.95 ns/op 5453.57 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 75406885 15.73 ns/op 5532.42 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76186243 15.69 ns/op 5545.76 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76350855 15.76 ns/op 5521.29 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76240896 15.70 ns/op 5542.36 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/EastAsianWithANSI_EAtrue_CacheHit-8 76404126 15.90 ns/op 5471.17 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 241440 4945 ns/op 109.19 MB/s 1181 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245013 5050 ns/op 106.94 MB/s 1180 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245098 4887 ns/op 110.49 MB/s 1177 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 244785 4971 ns/op 108.62 MB/s 1179 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245007 4880 ns/op 110.66 MB/s 1182 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_NoCache-8 245986 4878 ns/op 110.71 MB/s 1181 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 232534 5203 ns/op 103.78 MB/s 1845 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 1000000 4309 ns/op 125.31 MB/s 1613 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3491629 4013 ns/op 134.57 MB/s 1471 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3670467 847.5 ns/op 637.15 MB/s 680 B/op 2 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3669694 385.0 ns/op 1402.66 MB/s 583 B/op 1 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheMiss-8 3242532 356.5 ns/op 1514.63 MB/s 583 B/op 1 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 50391319 23.77 ns/op 22714.54 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51225590 23.32 ns/op 23159.25 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 51732408 23.74 ns/op 22751.21 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 46074986 24.16 ns/op 22352.67 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 43649127 24.43 ns/op 22104.61 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAfalse_CacheHit-8 49954903 23.53 ns/op 22952.45 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 127574 9378 ns/op 57.58 MB/s 1180 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 128386 9386 ns/op 57.53 MB/s 1183 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 128604 9280 ns/op 58.19 MB/s 1178 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 129218 9264 ns/op 58.29 MB/s 1179 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 129030 9261 ns/op 58.31 MB/s 1179 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_NoCache-8 129080 9266 ns/op 58.28 MB/s 1180 B/op 3 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 123823 9282 ns/op 58.18 MB/s 1817 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 1000000 8943 ns/op 60.38 MB/s 1754 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3532728 7337 ns/op 73.60 MB/s 1481 B/op 4 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3610767 705.9 ns/op 764.94 MB/s 626 B/op 2 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3502867 387.5 ns/op 1393.73 MB/s 583 B/op 1 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheMiss-8 3706471 680.7 ns/op 793.25 MB/s 640 B/op 2 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51185895 24.01 ns/op 22492.97 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51442992 23.44 ns/op 23041.30 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 47312392 23.56 ns/op 22917.72 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51727110 23.33 ns/op 23144.01 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51212746 23.62 ns/op 22862.18 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongSimpleASCII_EAtrue_CacheHit-8 51598200 23.23 ns/op 23247.62 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21105 57258 ns/op 35.80 MB/s 1389 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20656 57558 ns/op 35.62 MB/s 1386 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21045 57257 ns/op 35.80 MB/s 1386 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20884 57463 ns/op 35.68 MB/s 1391 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 20984 56898 ns/op 36.03 MB/s 1388 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_NoCache-8 21164 57796 ns/op 35.47 MB/s 1388 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 103934 31906 ns/op 64.25 MB/s 3143 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 52097 ns/op 39.35 MB/s 3737 B/op 10 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1298925 14140 ns/op 144.98 MB/s 2637 B/op 4 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 1288 ns/op 1592.17 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 2546826 30224 ns/op 67.83 MB/s 3071 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheMiss-8 1000000 8376 ns/op 244.74 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 25786026 44.71 ns/op 45849.62 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27173578 44.15 ns/op 46427.72 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27221428 44.54 ns/op 46030.74 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27213686 44.07 ns/op 46519.79 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27233990 44.27 ns/op 46310.26 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAfalse_CacheHit-8 27164018 44.12 ns/op 46460.92 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19785 60051 ns/op 34.14 MB/s 1386 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 20198 60161 ns/op 34.08 MB/s 1391 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19585 60345 ns/op 33.97 MB/s 1390 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19956 61714 ns/op 33.22 MB/s 1391 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19554 61682 ns/op 33.24 MB/s 1388 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_NoCache-8 19830 60050 ns/op 34.14 MB/s 1393 B/op 9 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 38818 29507 ns/op 69.48 MB/s 3059 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1000000 58539 ns/op 35.02 MB/s 3835 B/op 10 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2186653 33757 ns/op 60.73 MB/s 3157 B/op 6 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1000000 1283 ns/op 1597.72 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 1653430 1256 ns/op 1632.67 MB/s 2311 B/op 1 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheMiss-8 2195628 2716 ns/op 754.79 MB/s 2317 B/op 2 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 26531894 44.76 ns/op 45801.05 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 26634384 44.68 ns/op 45878.57 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27184633 44.97 ns/op 45583.96 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27011893 44.46 ns/op 46104.62 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27183812 44.09 ns/op 46498.94 MB/s 0 B/op 0 allocs/op
BenchmarkWidthFunction/LongASCIIWithANSI_EAtrue_CacheHit-8 27269318 44.17 ns/op 46406.38 MB/s 0 B/op 0 allocs/op
PASS
ok github.com/olekukonko/tablewriter/pkg/twwidth 724.296s
? github.com/olekukonko/tablewriter/renderer [no test files]
PASS
ok github.com/olekukonko/tablewriter/tests 2.959s
PASS
ok github.com/olekukonko/tablewriter/tw 0.270s

View File

@@ -528,6 +528,17 @@ func WithTrimSpace(state tw.State) Option {
}
}
// WithTrimTab sets whether leading and trailing tab characters are automatically trimmed.
// Logs the change if debugging is enabled.
func WithTrimTab(state tw.State) Option {
return func(target *Table) {
target.config.Behavior.TrimTab = state
if target.logger != nil {
target.logger.Debugf("Option: WithTrimTab applied to Table: %v", state)
}
}
}
// WithTrimLine sets whether empty visual lines within a cell are trimmed.
// Logs the change if debugging is enabled.
func WithTrimLine(state tw.State) Option {
@@ -781,6 +792,7 @@ func defaultConfig() Config {
Behavior: tw.Behavior{
AutoHide: tw.Off,
TrimSpace: tw.On,
TrimTab: tw.On,
TrimLine: tw.On,
Structs: tw.Struct{
AutoHeader: tw.Off,
@@ -920,6 +932,7 @@ func mergeConfig(dst, src Config) Config {
dst.Debug = src.Debug || dst.Debug
dst.Behavior.AutoHide = src.Behavior.AutoHide
dst.Behavior.TrimSpace = src.Behavior.TrimSpace
dst.Behavior.TrimTab = src.Behavior.TrimTab
dst.Behavior.Compact = src.Behavior.Compact
dst.Behavior.Header = src.Behavior.Header
dst.Behavior.Footer = src.Behavior.Footer

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
// This module is a Table Writer API for the Go Programming Language.
// This module is a Table Writer API for the Go Programming Language.
// The protocols were written in pure Go and works on windows and unix systems
package twwarp
@@ -13,8 +13,7 @@ import (
"unicode"
"github.com/clipperhouse/uax29/v2/graphemes"
"github.com/olekukonko/tablewriter/pkg/twwidth" // IMPORT YOUR NEW PACKAGE
// "github.com/mattn/go-runewidth" // This can be removed if all direct uses are gone
"github.com/olekukonko/tablewriter/pkg/twwidth"
)
const (
@@ -60,8 +59,7 @@ func WrapString(s string, lim int) ([]string, int) {
var lines []string
max := 0
for _, v := range words {
// max = runewidth.StringWidth(v) // OLD
max = twwidth.Width(v) // NEW: Use twdw.Width
max = twwidth.Width(v)
if max > lim {
lim = max
}
@@ -84,10 +82,8 @@ func WrapStringWithSpaces(s string, lim int) ([]string, int) {
return []string{""}, lim
}
if strings.TrimSpace(s) == "" { // All spaces
// if runewidth.StringWidth(s) <= lim { // OLD
if twwidth.Width(s) <= lim { // NEW: Use twdw.Width
// return []string{s}, runewidth.StringWidth(s) // OLD
return []string{s}, twwidth.Width(s) // NEW: Use twdw.Width
if twwidth.Width(s) <= lim {
return []string{s}, twwidth.Width(s)
}
// For very long all-space strings, "wrap" by truncating to the limit.
if lim > 0 {
@@ -118,8 +114,7 @@ func WrapStringWithSpaces(s string, lim int) ([]string, int) {
maxCoreWordWidth := 0
for _, v := range words {
// w := runewidth.StringWidth(v) // OLD
w := twwidth.Width(v) // NEW: Use twdw.Width
w := twwidth.Width(v)
if w > maxCoreWordWidth {
maxCoreWordWidth = w
}
@@ -156,8 +151,7 @@ func stringToDisplayWidth(s string, targetWidth int) (substring string, actualWi
g := graphemes.FromString(s)
for g.Next() {
grapheme := g.Value()
// graphemeWidth := runewidth.StringWidth(grapheme) // OLD
graphemeWidth := twwidth.Width(grapheme) // NEW: Use twdw.Width
graphemeWidth := twwidth.Width(grapheme)
if currentWidth+graphemeWidth > targetWidth {
break
@@ -187,8 +181,7 @@ func WrapWords(words []string, spc, lim, pen int) [][]string {
}
lengths := make([]int, n)
for i := 0; i < n; i++ {
// lengths[i] = runewidth.StringWidth(words[i]) // OLD
lengths[i] = twwidth.Width(words[i]) // NEW: Use twdw.Width
lengths[i] = twwidth.Width(words[i])
}
nbrk := make([]int, n)
cost := make([]int, n)

View File

@@ -0,0 +1,26 @@
package twwidth
import "github.com/olekukonko/tablewriter/pkg/twcache"
// widthCache stores memoized results of Width calculations to improve performance.
var widthCache *twcache.LRU[cacheKey, int]
type cacheKey struct {
eastAsian bool
str string
}
// SetCacheCapacity changes the cache size dynamically
// If capacity <= 0, disables caching entirely
func SetCacheCapacity(capacity int) {
mu.Lock()
defer mu.Unlock()
if capacity <= 0 {
widthCache = nil // nil = fully disabled
return
}
newCache := twcache.NewLRU[cacheKey, int](capacity)
widthCache = newCache
}

View File

@@ -0,0 +1,288 @@
package twwidth
import (
"bufio"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
)
type Tab rune
const (
TabWidthDefault = 8
TabString Tab = '\t'
)
// IsTab returns true if t equals the default tab.
func (t Tab) IsTab() bool {
return t == TabString
}
func (t Tab) Byte() byte {
return byte(t)
}
func (t Tab) Rune() rune {
return rune(t)
}
func (t Tab) String() string {
return string(t)
}
// IsTab returns true if r is a tab rune.
func IsTab(r rune) bool {
return r == TabString.Rune()
}
type Tabinal struct {
once sync.Once
width int
mu sync.RWMutex
}
func (t *Tabinal) String() string {
return TabString.String()
}
// Size returns the current tab width, default if unset.
func (t *Tabinal) Size() int {
t.once.Do(t.init)
t.mu.RLock()
w := t.width
t.mu.RUnlock()
if w <= 0 {
return TabWidthDefault
}
return w
}
// SetWidth sets the tab width if valid (132).
func (t *Tabinal) SetWidth(w int) {
if w <= 0 || w > 32 {
return
}
t.mu.Lock()
t.width = w
t.mu.Unlock()
}
func (t *Tabinal) init() {
w := t.detect()
t.mu.Lock()
t.width = w
t.mu.Unlock()
}
// detect determines tab width using env, editorconfig, project, or term.
func (t *Tabinal) detect() int {
if w := envInt("TABWIDTH"); w > 0 {
return clamp(w)
}
if w := envInt("TS"); w > 0 {
return clamp(w)
}
if w := envInt("VIM_TABSTOP"); w > 0 {
return clamp(w)
}
if w := editorConfigTabWidth(); w > 0 {
return w
}
if w := projectHeuristic(); w > 0 {
return w
}
if w := termHeuristic(); w > 0 {
return w
}
return 0
}
func editorConfigTabWidth() int {
dir, err := os.Getwd()
if err != nil {
return 0
}
for dir != "" && dir != "/" && dir != "." {
path := filepath.Join(dir, ".editorconfig")
if w := parseEditorConfig(path); w > 0 {
return clamp(w)
}
parent := filepath.Dir(dir)
if parent == dir {
break
}
dir = parent
}
return 0
}
// parseEditorConfig reads tab_width or indent_size from a file.
func parseEditorConfig(path string) int {
f, err := os.Open(path)
if err != nil {
return 0
}
defer f.Close()
scanner := bufio.NewScanner(f)
inMatch := false
globalWidth := 0
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") {
continue
}
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
pattern := line[1 : len(line)-1]
inMatch = pattern == "*"
knownExts := []string{".go", ".py", ".js", ".ts", ".java", ".rs"}
for _, ext := range knownExts {
if strings.Contains(pattern, ext) {
inMatch = true
break
}
}
continue
}
if !inMatch && globalWidth == 0 {
continue
}
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
continue
}
key := strings.TrimSpace(parts[0])
val := strings.TrimSpace(parts[1])
switch key {
case "tab_width":
if w, err := strconv.Atoi(val); err == nil && w > 0 {
if inMatch {
return clamp(w)
}
if globalWidth == 0 {
globalWidth = w
}
}
case "indent_size":
if val == "tab" {
continue
}
if w, err := strconv.Atoi(val); err == nil && w > 0 {
if inMatch {
return clamp(w)
}
if globalWidth == 0 {
globalWidth = w
}
}
}
}
return globalWidth
}
// projectHeuristic returns 4 for known project types.
func projectHeuristic() int {
dir, err := os.Getwd()
if err != nil {
return 0
}
indicators := []string{
"go.mod", "go.sum",
"package.json", "package-lock.json", "yarn.lock", "pnpm-lock.yaml",
"setup.py", "pyproject.toml", "requirements.txt", "Pipfile",
"pom.xml", "build.gradle", "build.gradle.kts",
"Cargo.toml",
"composer.json",
}
for _, indicator := range indicators {
if _, err := os.Stat(filepath.Join(dir, indicator)); err == nil {
return 4
}
}
patterns := []string{"*.go", "*.py", "*.js", "*.ts", "*.java", "*.rs"}
for _, pattern := range patterns {
if matches, _ := filepath.Glob(filepath.Join(dir, pattern)); len(matches) > 0 {
return 4
}
}
return 0
}
// termHeuristic returns a default width based on the TERM variable.
func termHeuristic() int {
termEnv := strings.ToLower(os.Getenv("TERM"))
if termEnv == "" {
return 0
}
if strings.Contains(termEnv, "vt52") {
return 2
}
if strings.Contains(termEnv, "xterm") ||
strings.Contains(termEnv, "screen") ||
strings.Contains(termEnv, "tmux") ||
strings.Contains(termEnv, "linux") ||
strings.Contains(termEnv, "ansi") ||
strings.Contains(termEnv, "rxvt") {
return TabWidthDefault
}
return 0
}
func clamp(w int) int {
if w <= 0 {
return 0
}
if w > 32 {
return 32
}
return w
}
var (
globalTab *Tabinal
globalTabOnce sync.Once
)
// TabInstance returns the singleton Tabinal.
func TabInstance() *Tabinal {
globalTabOnce.Do(func() {
globalTab = &Tabinal{}
})
return globalTab
}
// TabWidth returns the detected global tab width.
func TabWidth() int {
return TabInstance().Size()
}
// SetTabWidth sets the global tab width.
func SetTabWidth(w int) {
TabInstance().SetWidth(w)
}
func envInt(k string) int {
v := os.Getenv(k)
w, _ := strconv.Atoi(v)
return w
}

View File

@@ -1,4 +1,3 @@
// width.go
package twwidth
import (
@@ -34,9 +33,6 @@ var globalOptions Options
// mu protects access to globalOptions for thread safety.
var mu sync.Mutex
// widthCache stores memoized results of Width calculations to improve performance.
var widthCache *twcache.LRU[string, int]
// ansi is a compiled regular expression for stripping ANSI escape codes from strings.
var ansi = Filter()
@@ -53,24 +49,10 @@ func init() {
// If EastAsianWidth is ON (e.g. forced via Env Var), but we detect
// a modern environment, we might technically want to narrow borders
// while keeping text wide.
//
// Note: In the standard EastAsian logic, isEastAsian will
// ALREADY be false for modern environments, so this boolean implies
// a specific "Forced On" scenario.
ForceNarrowBorders: isEastAsian && isModernEnvironment(),
}
widthCache = twcache.NewLRU[string, int](cacheCapacity)
}
// makeCacheKey generates a string key for the LRU cache from the input string
// and the current East Asian width setting.
// Prefix "0:" for false, "1:" for true.
func makeCacheKey(str string, eastAsianWidth bool) string {
if eastAsianWidth {
return cacheEastAsianPrefix + str
}
return cachePrefix + str
widthCache = twcache.NewLRU[cacheKey, int](cacheCapacity)
}
// Display calculates the visual width of a string using a specific runewidth.Condition.
@@ -124,21 +106,6 @@ func IsEastAsian() bool {
return globalOptions.EastAsianWidth
}
// SetCacheCapacity changes the cache size dynamically
// If capacity <= 0, disables caching entirely
func SetCacheCapacity(capacity int) {
mu.Lock()
defer mu.Unlock()
if capacity <= 0 {
widthCache = nil // nil = fully disabled
return
}
newCache := twcache.NewLRU[string, int](capacity)
widthCache = newCache
}
// SetCondition sets the global East Asian width setting based on a runewidth.Condition.
// Deprecated: use SetOptions with the new twwidth.Options struct instead.
// This function is kept for backward compatibility.
@@ -362,6 +329,10 @@ func Truncate(s string, maxWidth int, suffix ...string) string {
func Width(str string) int {
// Fast path ASCII (Optimization)
if len(str) == 1 && str[0] < 0x80 {
// Treat tab as special case even in fast path
if IsTab(rune(str[0])) {
return TabWidth()
}
return 1
}
@@ -369,17 +340,20 @@ func Width(str string) int {
currentOpts := globalOptions
mu.Unlock()
key := makeCacheKey(str, currentOpts.EastAsianWidth)
key := cacheKey{
eastAsian: currentOpts.EastAsianWidth,
str: str,
}
// Check Cache (Optimization)
if w, found := widthCache.Get(key); found {
return w
}
stripped := ansi.ReplaceAllLiteralString(str, "")
//stripped := ansi.ReplaceAllLiteralString(str, "")
calculatedWidth := 0
for _, r := range stripped {
for _, r := range strip(str) {
calculatedWidth += calculateRunewidth(r, currentOpts)
}
@@ -407,21 +381,27 @@ func WidthNoCache(str string) int {
// bypassing the global settings and cache. This is useful for one-shot calculations
// where global state is not desired.
func WidthWithOptions(str string, opts Options) int {
stripped := ansi.ReplaceAllLiteralString(str, "")
// stripped := ansi.ReplaceAllLiteralString(str, "")
calculatedWidth := 0
for _, r := range stripped {
for _, r := range strip(str) {
calculatedWidth += calculateRunewidth(r, opts)
}
return calculatedWidth
}
// calculateRunewidth calculates the width of a single rune based on the provided options.
// It applies narrow overrides for box drawing characters if configured.
// It applies narrow overrides for box drawing characters if configured and handles Tabs.
func calculateRunewidth(r rune, opts Options) int {
if opts.ForceNarrowBorders && isBoxDrawingChar(r) {
return 1
}
// Explicitly handle Tabinal to ensure tables have enough space
// when TrimTab is Off.
if IsTab(r) {
return TabWidth()
}
dwOpts := displaywidth.Options{EastAsianWidth: opts.EastAsianWidth}
return dwOpts.Rune(r)
}
@@ -430,3 +410,10 @@ func calculateRunewidth(r rune, opts Options) int {
func isBoxDrawingChar(r rune) bool {
return r >= 0x2500 && r <= 0x257F
}
func strip(s string) string {
if strings.IndexByte(s, '\x1b') == -1 {
return s
}
return ansi.ReplaceAllLiteralString(s, "")
}

View File

@@ -44,7 +44,7 @@ func NewBlueprint(configs ...tw.Rendition) *Blueprint {
// Merge user settings with default settings
cfg.Settings = mergeSettings(cfg.Settings, userCfg.Settings)
}
return &Blueprint{config: cfg, logger: ll.New("blueprint")}
return &Blueprint{config: cfg, logger: ll.New("blueprint").Disable()}
}
// Close performs cleanup (no-op in this implementation).
@@ -322,14 +322,22 @@ func (f *Blueprint) formatCell(content string, width int, padding tw.Padding, al
result.WriteString(content)
rightPaddingWidth = totalPaddingWidth - padLeftWidth
if rightPaddingWidth > 0 {
result.WriteString(tw.PadRight(tw.Empty, rightPadChar, rightPaddingWidth))
f.logger.Debugf("Applied right padding: '%s' for %d width", rightPadChar, rightPaddingWidth)
padChar := rightPadChar
if padChar == tw.Empty {
padChar = tw.Space
}
result.WriteString(tw.PadRight(tw.Empty, padChar, rightPaddingWidth))
f.logger.Debugf("Applied right padding: '%s' for %d width", padChar, rightPaddingWidth)
}
case tw.AlignRight:
leftPaddingWidth = totalPaddingWidth - padRightWidth
if leftPaddingWidth > 0 {
result.WriteString(tw.PadLeft(tw.Empty, leftPadChar, leftPaddingWidth))
f.logger.Debugf("Applied left padding: '%s' for %d width", leftPadChar, leftPaddingWidth)
padChar := leftPadChar
if padChar == tw.Empty {
padChar = tw.Space
}
result.WriteString(tw.PadLeft(tw.Empty, padChar, leftPaddingWidth))
f.logger.Debugf("Applied left padding: '%s' for %d width", padChar, leftPaddingWidth)
}
result.WriteString(content)
result.WriteString(rightPadChar)
@@ -337,15 +345,23 @@ func (f *Blueprint) formatCell(content string, width int, padding tw.Padding, al
leftPaddingWidth = (totalPaddingWidth-padLeftWidth-padRightWidth)/2 + padLeftWidth
rightPaddingWidth = totalPaddingWidth - leftPaddingWidth
if leftPaddingWidth > padLeftWidth {
result.WriteString(tw.PadLeft(tw.Empty, leftPadChar, leftPaddingWidth-padLeftWidth))
f.logger.Debugf("Applied left centering padding: '%s' for %d width", leftPadChar, leftPaddingWidth-padLeftWidth)
padChar := leftPadChar
if padChar == tw.Empty {
padChar = tw.Space
}
result.WriteString(tw.PadLeft(tw.Empty, padChar, leftPaddingWidth-padLeftWidth))
f.logger.Debugf("Applied left centering padding: '%s' for %d width", padChar, leftPaddingWidth-padLeftWidth)
}
result.WriteString(leftPadChar)
result.WriteString(content)
result.WriteString(rightPadChar)
if rightPaddingWidth > padRightWidth {
result.WriteString(tw.PadRight(tw.Empty, rightPadChar, rightPaddingWidth-padRightWidth))
f.logger.Debugf("Applied right centering padding: '%s' for %d width", rightPadChar, rightPaddingWidth-padRightWidth)
padChar := rightPadChar
if padChar == tw.Empty {
padChar = tw.Space
}
result.WriteString(tw.PadRight(tw.Empty, padChar, rightPaddingWidth-padRightWidth))
f.logger.Debugf("Applied right centering padding: '%s' for %d width", padChar, rightPaddingWidth-padRightWidth)
}
default:
// Default to left alignment
@@ -353,8 +369,12 @@ func (f *Blueprint) formatCell(content string, width int, padding tw.Padding, al
result.WriteString(content)
rightPaddingWidth = totalPaddingWidth - padLeftWidth
if rightPaddingWidth > 0 {
result.WriteString(tw.PadRight(tw.Empty, rightPadChar, rightPaddingWidth))
f.logger.Debugf("Applied right padding: '%s' for %d width", rightPadChar, rightPaddingWidth)
padChar := rightPadChar
if padChar == tw.Empty {
padChar = tw.Space
}
result.WriteString(tw.PadRight(tw.Empty, padChar, rightPaddingWidth))
f.logger.Debugf("Applied right padding: '%s' for %d width", padChar, rightPaddingWidth)
}
}

View File

@@ -103,7 +103,7 @@ func NewColorized(configs ...ColorizedConfig) *Colorized {
tw.Row: tw.AlignLeft,
tw.Footer: tw.AlignRight,
},
logger: ll.New("colorized", ll.WithHandler(lh.NewMemoryHandler())),
logger: ll.New("colorized", ll.WithHandler(lh.NewMemoryHandler())).Disable(),
}
// Log initialization details
f.logger.Debugf("Initialized Colorized renderer with symbols: Center=%q, Row=%q, Column=%q", f.config.Symbols.Center(), f.config.Symbols.Row(), f.config.Symbols.Column())
@@ -354,19 +354,27 @@ func (c *Colorized) formatCell(content string, width int, padding tw.Padding, al
// Calculate visual width of content
contentVisualWidth := twwidth.Width(content)
// Set default padding characters
// Set padding characters
padLeftCharStr := padding.Left
// if padLeftCharStr == tw.Empty {
// padLeftCharStr = tw.Space
//}
padRightCharStr := padding.Right
// if padRightCharStr == tw.Empty {
// padRightCharStr = tw.Space
//}
// Determine the character to use for alignment filling.
// We default to the padding character defined for that side.
// If the padding character is empty (e.g. Overwrite: true), we MUST fallback to Space
// for the alignment calculation to prevent the content from shifting incorrectly.
alignFillLeft := padLeftCharStr
if alignFillLeft == tw.Empty {
alignFillLeft = tw.Space
}
alignFillRight := padRightCharStr
if alignFillRight == tw.Empty {
alignFillRight = tw.Space
}
// Calculate padding widths
definedPadLeftWidth := twwidth.Width(padLeftCharStr)
definedPadRightWidth := twwidth.Width(padRightCharStr)
// Calculate available width for content and alignment
availableForContentAndAlign := max(width-definedPadLeftWidth-definedPadRightWidth, 0)
@@ -381,21 +389,27 @@ func (c *Colorized) formatCell(content string, width int, padding tw.Padding, al
remainingSpaceForAlignment := max(availableForContentAndAlign-contentVisualWidth, 0)
// Apply alignment padding
// Note: We use tw.Pad* helpers here instead of strings.Repeat to handle multi-byte fill chars correctly.
leftAlignmentPadSpaces := tw.Empty
rightAlignmentPadSpaces := tw.Empty
switch align {
case tw.AlignLeft:
rightAlignmentPadSpaces = strings.Repeat(tw.Space, remainingSpaceForAlignment)
rightAlignmentPadSpaces = tw.PadRight(tw.Empty, alignFillRight, remainingSpaceForAlignment)
case tw.AlignRight:
leftAlignmentPadSpaces = strings.Repeat(tw.Space, remainingSpaceForAlignment)
leftAlignmentPadSpaces = tw.PadLeft(tw.Empty, alignFillLeft, remainingSpaceForAlignment)
case tw.AlignCenter:
leftSpacesCount := remainingSpaceForAlignment / 2
rightSpacesCount := remainingSpaceForAlignment - leftSpacesCount
leftAlignmentPadSpaces = strings.Repeat(tw.Space, leftSpacesCount)
rightAlignmentPadSpaces = strings.Repeat(tw.Space, rightSpacesCount)
if leftSpacesCount > 0 {
leftAlignmentPadSpaces = tw.PadLeft(tw.Empty, alignFillLeft, leftSpacesCount)
}
if rightSpacesCount > 0 {
rightAlignmentPadSpaces = tw.PadRight(tw.Empty, alignFillRight, rightSpacesCount)
}
default:
// Default to left alignment
rightAlignmentPadSpaces = strings.Repeat(tw.Space, remainingSpaceForAlignment)
rightAlignmentPadSpaces = tw.PadRight(tw.Empty, alignFillRight, remainingSpaceForAlignment)
}
// Apply colors to content and padding
@@ -444,7 +458,7 @@ func (c *Colorized) formatCell(content string, width int, padding tw.Padding, al
sb.WriteString(coloredPadRight)
output := sb.String()
// Adjust output width if necessary
// Adjust output width if necessary (safety check)
currentVisualWidth := twwidth.Width(output)
if currentVisualWidth != width {
c.logger.Debugf("formatCell MISMATCH: content='%s', target_w=%d. Calculated parts width = %d. String: '%s'",

View File

@@ -64,7 +64,7 @@ func NewHTML(configs ...HTMLConfig) *HTML {
tableStarted: false,
tbodyStarted: false,
tfootStarted: false,
logger: ll.New("html"),
logger: ll.New("html").Disable(),
}
}

View File

@@ -37,7 +37,7 @@ func NewMarkdown(configs ...tw.Rendition) *Markdown {
if len(configs) > 0 {
cfg = mergeMarkdownConfig(cfg, configs[0])
}
return &Markdown{config: cfg, logger: ll.New("markdown")}
return &Markdown{config: cfg, logger: ll.New("markdown").Disable()}
}
// mergeMarkdownConfig combines user-provided config with Markdown defaults, enforcing Markdown-specific settings.

View File

@@ -36,7 +36,7 @@ func NewOcean(oceanConfig ...OceanConfig) *Ocean {
config: cfg,
oceanConfig: oCfg,
fixedWidths: tw.NewMapper[int, int](),
logger: ll.New("ocean"),
logger: ll.New("ocean").Disable(),
}
r.resetState()
return r

View File

@@ -139,7 +139,7 @@ func NewSVG(configs ...SVGConfig) *SVG {
allVisualLineData: make([][][]string, 3),
allVisualLineCtx: make([][]tw.Formatting, 3),
vMergeTrack: make(map[int]int),
logger: ll.New("svg"),
logger: ll.New("svg").Disable(),
}
for i := 0; i < 3; i++ {
r.allVisualLineData[i] = make([][]string, 0)

View File

@@ -7,6 +7,7 @@ import (
"reflect"
"runtime"
"strings"
"unicode"
"github.com/olekukonko/errors"
"github.com/olekukonko/ll"
@@ -412,6 +413,15 @@ func (t *Table) Options(opts ...Option) *Table {
t.logger = ll.New("table").Handler(lh.NewTextHandler(t.trace))
}
// Disable and suspend the logger before applying options to prevent premature
// debug output from renderer methods (e.g., Blueprint.Rendition) triggered by
// options like WithRendition. Without this, a previously-enabled logger would
// still be active on the renderer during option application, causing debug
// messages even when WithDebug(false) is being applied.
t.logger.Disable()
t.logger.Suspend()
t.renderer.Logger(t.logger)
// loop through options
for _, opt := range opts {
opt(t)
@@ -546,16 +556,37 @@ func (t *Table) Counters() []tw.Counter {
}
// Trimmer trims whitespace from a string based on the Tables configuration.
// It conditionally applies strings.TrimSpace to the input string if the TrimSpace behavior
// is enabled in t.config.Behavior, otherwise returning the string unchanged. This method
// is used in the logging library to format strings for tabular output, ensuring consistent
// display in log messages. Thread-safe as it only reads configuration and operates on the
// input string.
// It conditionally applies trimming based on TrimSpace and TrimTab settings.
//
// Behavior Matrix:
// - TrimSpace=On, TrimTab=On: Uses strings.TrimSpace (removes all Unicode space including \t).
// - TrimSpace=On, TrimTab=Off: Removes spaces/newlines but PRESERVES tabs.
// - TrimSpace=Off, TrimTab=On: Removes only tabs.
// - TrimSpace=Off, TrimTab=Off: Returns string unchanged.
func (t *Table) Trimmer(str string) string {
if t.config.Behavior.TrimSpace.Enabled() {
trimSpace := t.config.Behavior.TrimSpace.Enabled()
trimTab := t.config.Behavior.TrimTab.Enabled()
// Fast Path 1: If both are enabled (Default), use the stdlib optimized TrimSpace
if trimSpace && trimTab {
return strings.TrimSpace(str)
}
return str
// Fast Path 2: If both are disabled, return raw string
if !trimSpace && !trimTab {
return str
}
// Granular Trimming via TrimFunc
return strings.TrimFunc(str, func(r rune) bool {
if twwidth.IsTab(r) {
return trimTab // Return true to trim if TrimTab is On
}
if trimSpace {
return unicode.IsSpace(r) // Trim other whitespace if TrimSpace is On
}
return false
})
}
// appendSingle adds a single row to the table's row data.
@@ -935,6 +966,13 @@ func (t *Table) prepareContent(cells []string, config tw.CellConfig) [][]string
cellContent = t.Trimmer(cellContent)
if strings.Contains(cellContent, twwidth.TabString.String()) {
// Get the detected width from the singleton
width := twwidth.TabWidth()
spaces := strings.Repeat(tw.Space, width)
cellContent = strings.ReplaceAll(cellContent, twwidth.TabString.String(), spaces)
}
colPad := config.Padding.Global
if i < len(config.Padding.PerColumn) && config.Padding.PerColumn[i].Paddable() {
colPad = config.Padding.PerColumn[i]
@@ -956,7 +994,7 @@ func (t *Table) prepareContent(cells []string, config tw.CellConfig) [][]string
switch config.Formatting.AutoWrap {
case tw.WrapNormal:
var wrapped []string
if t.config.Behavior.TrimSpace.Enabled() {
if t.config.Behavior.TrimSpace.Enabled() && t.config.Behavior.TrimTab.Enabled() {
wrapped, _ = twwarp.WrapString(line, effectiveContentMaxWidth)
} else {
wrapped, _ = twwarp.WrapStringWithSpaces(line, effectiveContentMaxWidth)

View File

@@ -166,8 +166,9 @@ type Struct struct {
// Behavior defines settings that control table rendering behaviors, such as column visibility and content formatting.
type Behavior struct {
AutoHide State // AutoHide determines whether empty columns are hidden. Ignored in streaming mode.
TrimSpace State // TrimSpace enables trimming of leading and trailing spaces from cell content.
TrimSpace State // TrimSpace determines trimming of leading and trailing spaces from cell content.
TrimLine State // TrimLine determines whether empty visual lines within a cell are collapsed.
TrimTab State // TrimTab determines trimming of leading and trailing tabs from cell content.
Header Control // Header specifies control settings for the table header.
Footer Control // Footer specifies control settings for the table footer.

14
vendor/modules.txt vendored
View File

@@ -255,16 +255,12 @@ github.com/cespare/xxhash/v2
# github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73
## explicit
github.com/cevaris/ordered_map
# github.com/clipperhouse/displaywidth v0.6.2
# github.com/clipperhouse/displaywidth v0.10.0
## explicit; go 1.18
github.com/clipperhouse/displaywidth
# github.com/clipperhouse/stringish v0.1.1
## explicit; go 1.18
github.com/clipperhouse/stringish
# github.com/clipperhouse/uax29/v2 v2.3.0
# github.com/clipperhouse/uax29/v2 v2.6.0
## explicit; go 1.18
github.com/clipperhouse/uax29/v2/graphemes
github.com/clipperhouse/uax29/v2/internal/iterators
# github.com/cloudflare/circl v1.6.3
## explicit; go 1.22.0
github.com/cloudflare/circl/dh/x25519
@@ -1209,15 +1205,15 @@ github.com/oklog/run
# github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6
## explicit; go 1.21
github.com/olekukonko/cat
# github.com/olekukonko/errors v1.1.0
# github.com/olekukonko/errors v1.2.0
## explicit; go 1.21
github.com/olekukonko/errors
# github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0
# github.com/olekukonko/ll v0.1.6
## explicit; go 1.21
github.com/olekukonko/ll
github.com/olekukonko/ll/lh
github.com/olekukonko/ll/lx
# github.com/olekukonko/tablewriter v1.1.3
# github.com/olekukonko/tablewriter v1.1.4
## explicit; go 1.21
github.com/olekukonko/tablewriter
github.com/olekukonko/tablewriter/pkg/twcache