build(deps): bump github.com/go-git/go-git/v5 from 5.4.2 to 5.11.0

Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.4.2 to 5.11.0.
- [Release notes](https://github.com/go-git/go-git/releases)
- [Commits](https://github.com/go-git/go-git/compare/v5.4.2...v5.11.0)

---
updated-dependencies:
- dependency-name: github.com/go-git/go-git/v5
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-12-27 15:14:52 +00:00
committed by GitHub
parent 9676524070
commit 417e42b693
228 changed files with 13865 additions and 3127 deletions

15
go.mod
View File

@@ -114,15 +114,15 @@ require (
require (
contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20220930113650-c6815a8c17ad // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/RoaringBitmap/roaring v1.2.3 // indirect
github.com/acomagu/bufpipe v1.0.3 // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/ajg/form v1.5.1 // indirect
github.com/alexedwards/argon2id v1.0.0 // indirect
@@ -167,6 +167,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crewjam/httperr v0.2.0 // indirect
github.com/crewjam/saml v0.4.14 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
@@ -185,9 +186,9 @@ require (
github.com/gdexlab/go-render v1.0.1 // indirect
github.com/go-acme/lego/v4 v4.4.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-git/go-billy/v5 v5.3.1 // indirect
github.com/go-git/go-git/v5 v5.4.2 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-git/go-git/v5 v5.11.0 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
github.com/go-kit/log v0.2.1 // indirect
@@ -283,6 +284,7 @@ require (
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
github.com/prometheus/alertmanager v0.25.1 // indirect
@@ -301,6 +303,7 @@ require (
github.com/sethvargo/go-password v0.2.0 // indirect
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect
github.com/skeema/knownhosts v1.2.1 // indirect
github.com/sony/gobreaker v0.5.0 // indirect
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 // indirect
github.com/spf13/pflag v1.0.5 // indirect
@@ -312,7 +315,7 @@ require (
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
github.com/trustelem/zxcvbn v1.0.1 // indirect
github.com/wk8/go-ordered-map v1.0.0 // indirect
github.com/xanzy/ssh-agent v0.3.2 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect

70
go.sum
View File

@@ -761,6 +761,8 @@ cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvo
contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
@@ -807,8 +809,6 @@ github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZC
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/MicahParks/keyfunc v1.9.0 h1:lhKd5xrFHLNOWrDc4Tyb/Q1AJ4LCzQ48GVJyVIID3+o=
github.com/MicahParks/keyfunc v1.9.0/go.mod h1:IdnCilugA0O/99dW+/MkvlyrsX8+L8+x95xuVNtM5jw=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
@@ -818,15 +818,12 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks=
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20220930113650-c6815a8c17ad h1:QeeqI2zxxgZVe11UrYFXXx6gVxPVF40ygekjBzEg4XY=
github.com/ProtonMail/go-crypto v0.0.0-20220930113650-c6815a8c17ad/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY=
github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/aduffeck/gowebdav v0.0.0-20231215102054-212d4a4374f6 h1:ws0yvsikTQdmheKINP16tBzAHdttrHwbz/q3Fgl9X1Y=
github.com/aduffeck/gowebdav v0.0.0-20231215102054-212d4a4374f6/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
@@ -853,8 +850,8 @@ github.com/aliyun/alibaba-cloud-sdk-go v1.61.976/go.mod h1:pUKYbK5JQ+1Dfxk80P0qx
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 h1:I9YN9WMo3SUh7p/4wKeNvD/IQla3U3SUa61U7ul+xM4=
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964/go.mod h1:eFiR01PwTcpbzXtdMces7zxg6utvFM5puiWHpWB8D/k=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
@@ -943,7 +940,7 @@ github.com/bombsimon/logrusr/v3 v3.1.0/go.mod h1:PksPPgSFEL2I52pla2glgCyyd2OqOHA
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
github.com/c-bata/go-prompt v0.2.5/go.mod h1:vFnjEGDIIA/Lib7giyE4E9c50Lvl8j0S+7FVlAwDAVw=
@@ -974,7 +971,6 @@ github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnx
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
@@ -1024,6 +1020,8 @@ github.com/cs3org/go-cs3apis v0.0.0-20231023073225-7748710e0781/go.mod h1:UXha4T
github.com/cs3org/reva/v2 v2.18.0 h1:ZfPE+0fgp1ptLIVF+xhMwwQRG1je2OFumMVqjE42AR4=
github.com/cs3org/reva/v2 v2.18.0/go.mod h1:QW31Q1IQ9ZCJMFv3u8/SdHSyLfCcSVNcRbqIJj+Y+7o=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -1069,7 +1067,8 @@ github.com/egirna/icap v0.0.0-20181108071049-d5ee18bd70bc h1:6IxmRbXV8WXVkcYcTzk
github.com/egirna/icap v0.0.0-20181108071049-d5ee18bd70bc/go.mod h1:FdVN2WHg7zOHhJ7kZQdDorfFhIfqZaHttjAzDDvAXHE=
github.com/egirna/icap-client v0.1.1 h1:UURZRA7+36bBmMgJZHB+W4d3hfp20pDUA/QL794C0ck=
github.com/egirna/icap-client v0.1.1/go.mod h1:6yHhnak1cKRyhDoRxnzlRKJbOTXPgh9Oe3tOs7Sq3vw=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/emvi/iso-639-1 v1.0.1 h1:4s6P8Uxc/RDkwCpxAr4NHOT/15a1swy9IQkB8PJCWVI=
@@ -1106,7 +1105,6 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@@ -1128,8 +1126,8 @@ github.com/getkin/kin-openapi v0.13.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s
github.com/ggwhite/go-masker v1.1.0 h1:kN/KIvktu2U+hd3KWrSlLj7xBGD1iBfc9/xdbVgFbRc=
github.com/ggwhite/go-masker v1.1.0/go.mod h1:xnTRHwrIU9FtBADwEjUC5Dy/BVedvoTxyOE7/d3CNwY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-acme/lego/v4 v4.4.0 h1:uHhU5LpOYQOdp3aDU+XY2bajseu8fuExphTL1Ss6/Fc=
github.com/go-acme/lego/v4 v4.4.0/go.mod h1:l3+tFUFZb590dWcqhWZegynUthtaHJbG2fevUpoOOE0=
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
@@ -1151,15 +1149,14 @@ github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -1532,7 +1529,6 @@ github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df/go.mod h1:QMZY7/J/KSQEhKWFeDesPjMj+wCHReeknARU3wqlyN4=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@@ -1547,7 +1543,6 @@ github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2H
github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8=
github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
@@ -1582,7 +1577,6 @@ github.com/justinas/alice v1.2.0 h1:+MHSA/vccVCF4Uq37S42jwlkvI2Xzl7zTPCN5BnZNVo=
github.com/justinas/alice v1.2.0/go.mod h1:fN5HRH/reO/zrUflLfTN43t3vXvKzvZIENsNEe7i7qA=
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@@ -1643,8 +1637,6 @@ github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuz
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU=
github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To=
@@ -1757,7 +1749,6 @@ github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADym
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nrdcg/auroradns v1.0.1/go.mod h1:y4pc0i9QXYlFCWrhWrUSIETnZgrf4KuwjDIWmmXo3JI=
github.com/nrdcg/desec v0.5.0/go.mod h1:2ejvMazkav1VdDbv2HeQO7w+Ta1CGHqzQr27ZBYTuEQ=
github.com/nrdcg/dnspod-go v0.4.0/go.mod h1:vZSoFSFeQVm2gWLMkyX61LZ8HI3BaqtHZWgPTGKr6KQ=
@@ -1812,6 +1803,8 @@ github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk
github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -1932,7 +1925,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/sethgrid/pester v1.2.0/go.mod h1:hEUINb4RqvDxtoCaU0BNT/HV4ig5kfgOasrf1xcvr0A=
@@ -1946,12 +1938,13 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@@ -2041,9 +2034,8 @@ github.com/vinyldns/go-vinyldns v0.0.0-20200917153823-148a5f6b8f14/go.mod h1:RWc
github.com/vultr/govultr/v2 v2.0.0/go.mod h1:2PsEeg+gs3p/Fo5Pw8F9mv+DUBEOlrNZ8GmCTGmhOhs=
github.com/wk8/go-ordered-map v1.0.0 h1:BV7z+2PaK8LTSd/mWgY12HyMAo5CEgkHqbkVq2thqr8=
github.com/wk8/go-ordered-map v1.0.0/go.mod h1:9ZIbRunKbuvfPKyBP1SIKLcXNlv74YCOZ3t3VTS6gRk=
github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
github.com/xanzy/ssh-agent v0.3.2 h1:eKj4SX2Fe7mui28ZgnFW5fmTz1EIr7ugo5s6wDxdHBM=
github.com/xanzy/ssh-agent v0.3.2/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -2141,7 +2133,6 @@ go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -2157,13 +2148,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
@@ -2284,7 +2275,6 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -2399,7 +2389,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -2447,11 +2436,9 @@ golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2973,7 +2960,6 @@ gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UD
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=

12
vendor/dario.cat/mergo/.deepsource.toml vendored Normal file
View File

@@ -0,0 +1,12 @@
version = 1
test_patterns = [
"*_test.go"
]
[[analyzers]]
name = "go"
enabled = true
[analyzers.meta]
import_path = "dario.cat/mergo"

33
vendor/dario.cat/mergo/.gitignore vendored Normal file
View File

@@ -0,0 +1,33 @@
#### joe made this: http://goel.io/joe
#### go ####
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
#### vim ####
# Swap
[._]*.s[a-v][a-z]
[._]*.sw[a-p]
[._]s[a-v][a-z]
[._]sw[a-p]
# Session
Session.vim
# Temporary
.netrwhist
*~
# Auto-generated tag files
tags

12
vendor/dario.cat/mergo/.travis.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
language: go
arch:
- amd64
- ppc64le
install:
- go get -t
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- go test -race -v ./...
after_script:
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN

View File

@@ -0,0 +1,46 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

112
vendor/dario.cat/mergo/CONTRIBUTING.md vendored Normal file
View File

@@ -0,0 +1,112 @@
<!-- omit in toc -->
# Contributing to mergo
First off, thanks for taking the time to contribute! ❤️
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
> - Star the project
> - Tweet about it
> - Refer this project in your project's readme
> - Mention the project at local meetups and tell your friends/colleagues
<!-- omit in toc -->
## Table of Contents
- [Code of Conduct](#code-of-conduct)
- [I Have a Question](#i-have-a-question)
- [I Want To Contribute](#i-want-to-contribute)
- [Reporting Bugs](#reporting-bugs)
- [Suggesting Enhancements](#suggesting-enhancements)
## Code of Conduct
This project and everyone participating in it is governed by the
[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
By participating, you are expected to uphold this code. Please report unacceptable behavior
to <>.
## I Have a Question
> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
If you then still feel the need to ask a question and need clarification, we recommend the following:
- Open an [Issue](https://github.com/imdario/mergo/issues/new).
- Provide as much context as you can about what you're running into.
- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
We will then take care of the issue as soon as possible.
## I Want To Contribute
> ### Legal Notice <!-- omit in toc -->
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
### Reporting Bugs
<!-- omit in toc -->
#### Before Submitting a Bug Report
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
- Make sure that you are using the latest version.
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
- Collect information about the bug:
- Stack trace (Traceback)
- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
- Possibly your input and the output
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
<!-- omit in toc -->
#### How Do I Submit a Good Bug Report?
> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
- Explain the behavior you would expect and the actual behavior.
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
- Provide the information you collected in the previous section.
Once it's filed:
- The project team will label the issue accordingly.
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
### Suggesting Enhancements
This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
<!-- omit in toc -->
#### Before Submitting an Enhancement
- Make sure that you are using the latest version.
- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
<!-- omit in toc -->
#### How Do I Submit a Good Enhancement Suggestion?
Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
- Use a **clear and descriptive title** for the issue to identify the suggestion.
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
<!-- omit in toc -->
## Attribution
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!

28
vendor/dario.cat/mergo/LICENSE vendored Normal file
View File

@@ -0,0 +1,28 @@
Copyright (c) 2013 Dario Castañé. All rights reserved.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

248
vendor/dario.cat/mergo/README.md vendored Normal file
View File

@@ -0,0 +1,248 @@
# Mergo
[![GitHub release][5]][6]
[![GoCard][7]][8]
[![Test status][1]][2]
[![OpenSSF Scorecard][21]][22]
[![OpenSSF Best Practices][19]][20]
[![Coverage status][9]][10]
[![Sourcegraph][11]][12]
[![FOSSA status][13]][14]
[![GoDoc][3]][4]
[![Become my sponsor][15]][16]
[![Tidelift][17]][18]
[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://img.shields.io/github/release/imdario/mergo.svg
[6]: https://github.com/imdario/mergo/releases
[7]: https://goreportcard.com/badge/imdario/mergo
[8]: https://goreportcard.com/report/github.com/imdario/mergo
[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
[10]: https://coveralls.io/github/imdario/mergo?branch=master
[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://img.shields.io/github/sponsors/imdario
[16]: https://github.com/sponsors/imdario
[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
[20]: https://bestpractices.coreinfrastructure.org/projects/7177
[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
## Status
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important notes
#### 1.0.0
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
#### 0.3.9
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
### Donations
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
### Mergo in the wild
- [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch)
- [Shopify/themekit](https://github.com/Shopify/themekit)
- [imdario/zas](https://github.com/imdario/zas)
- [matcornic/hermes](https://github.com/matcornic/hermes)
- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
- [kataras/iris](https://github.com/kataras/iris)
- [michaelsauter/crane](https://github.com/michaelsauter/crane)
- [go-task/task](https://github.com/go-task/task)
- [sensu/uchiwa](https://github.com/sensu/uchiwa)
- [ory/hydra](https://github.com/ory/hydra)
- [sisatech/vcli](https://github.com/sisatech/vcli)
- [dairycart/dairycart](https://github.com/dairycart/dairycart)
- [projectcalico/felix](https://github.com/projectcalico/felix)
- [resin-os/balena](https://github.com/resin-os/balena)
- [go-kivik/kivik](https://github.com/go-kivik/kivik)
- [Telefonica/govice](https://github.com/Telefonica/govice)
- [supergiant/supergiant](supergiant/supergiant)
- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
- [russross/canvasassignments](https://github.com/russross/canvasassignments)
- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
- [divshot/gitling](https://github.com/divshot/gitling)
- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
- [elwinar/rambler](https://github.com/elwinar/rambler)
- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
- [jfbus/impressionist](https://github.com/jfbus/impressionist)
- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
- [thoas/picfit](https://github.com/thoas/picfit)
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
## Install
go get dario.cat/mergo
// use in your .go code
import (
"dario.cat/mergo"
)
## Usage
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
```go
if err := mergo.Merge(&dst, src); err != nil {
// ...
}
```
Also, you can merge overwriting values using the transformer `WithOverride`.
```go
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
// ...
}
```
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
```go
if err := mergo.Map(&dst, srcMap); err != nil {
// ...
}
```
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
Here is a nice example:
```go
package main
import (
"fmt"
"dario.cat/mergo"
)
type Foo struct {
A string
B int64
}
func main() {
src := Foo{
A: "one",
B: 2,
}
dest := Foo{
A: "two",
}
mergo.Merge(&dest, src)
fmt.Println(dest)
// Will print
// {two 2}
}
```
Note: if test are failing due missing package, please execute:
go get gopkg.in/yaml.v3
### Transformers
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
```go
package main
import (
"fmt"
"dario.cat/mergo"
"reflect"
"time"
)
type timeTransformer struct {
}
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error {
if dst.CanSet() {
isZero := dst.MethodByName("IsZero")
result := isZero.Call([]reflect.Value{})
if result[0].Bool() {
dst.Set(src)
}
}
return nil
}
}
return nil
}
type Snapshot struct {
Time time.Time
// ...
}
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
```
## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
## About
Written by [Dario Castañé](http://dario.im).
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)

14
vendor/dario.cat/mergo/SECURITY.md vendored Normal file
View File

@@ -0,0 +1,14 @@
# Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 0.3.x | :white_check_mark: |
| < 0.3 | :x: |
## Security contact information
To report a security vulnerability, please use the
[Tidelift security contact](https://tidelift.com/security).
Tidelift will coordinate the fix and disclosure.

148
vendor/dario.cat/mergo/doc.go vendored Normal file
View File

@@ -0,0 +1,148 @@
// Copyright 2013 Dario Castañé. All rights reserved.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
# Status
It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
# Important notes
1.0.0
In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`.
0.3.9
Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
# Install
Do your usual installation procedure:
go get dario.cat/mergo
// use in your .go code
import (
"dario.cat/mergo"
)
# Usage
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
if err := mergo.Merge(&dst, src); err != nil {
// ...
}
Also, you can merge overwriting values using the transformer WithOverride.
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
// ...
}
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
if err := mergo.Map(&dst, srcMap); err != nil {
// ...
}
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
Here is a nice example:
package main
import (
"fmt"
"dario.cat/mergo"
)
type Foo struct {
A string
B int64
}
func main() {
src := Foo{
A: "one",
B: 2,
}
dest := Foo{
A: "two",
}
mergo.Merge(&dest, src)
fmt.Println(dest)
// Will print
// {two 2}
}
# Transformers
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
package main
import (
"fmt"
"dario.cat/mergo"
"reflect"
"time"
)
type timeTransformer struct {
}
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error {
if dst.CanSet() {
isZero := dst.MethodByName("IsZero")
result := isZero.Call([]reflect.Value{})
if result[0].Bool() {
dst.Set(src)
}
}
return nil
}
}
return nil
}
type Snapshot struct {
Time time.Time
// ...
}
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
# Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
# About
Written by Dario Castañé: https://da.rio.hn
# License
BSD 3-Clause license, as Go language.
*/
package mergo

178
vendor/dario.cat/mergo/map.go vendored Normal file
View File

@@ -0,0 +1,178 @@
// Copyright 2014 Dario Castañé. All rights reserved.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on src/pkg/reflect/deepequal.go from official
// golang's stdlib.
package mergo
import (
"fmt"
"reflect"
"unicode"
"unicode/utf8"
)
func changeInitialCase(s string, mapper func(rune) rune) string {
if s == "" {
return s
}
r, n := utf8.DecodeRuneInString(s)
return string(mapper(r)) + s[n:]
}
func isExported(field reflect.StructField) bool {
r, _ := utf8.DecodeRuneInString(field.Name)
return r >= 'A' && r <= 'Z'
}
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
if dst.CanAddr() {
addr := dst.UnsafeAddr()
h := 17 * addr
seen := visited[h]
typ := dst.Type()
for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ {
return nil
}
}
// Remember, remember...
visited[h] = &visit{typ, seen, addr}
}
zeroValue := reflect.Value{}
switch dst.Kind() {
case reflect.Map:
dstMap := dst.Interface().(map[string]interface{})
for i, n := 0, src.NumField(); i < n; i++ {
srcType := src.Type()
field := srcType.Field(i)
if !isExported(field) {
continue
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
dstMap[fieldName] = src.Field(i).Interface()
}
}
case reflect.Ptr:
if dst.IsNil() {
v := reflect.New(dst.Type().Elem())
dst.Set(v)
}
dst = dst.Elem()
fallthrough
case reflect.Struct:
srcMap := src.Interface().(map[string]interface{})
for key := range srcMap {
config.overwriteWithEmptyValue = true
srcValue := srcMap[key]
fieldName := changeInitialCase(key, unicode.ToUpper)
dstElement := dst.FieldByName(fieldName)
if dstElement == zeroValue {
// We discard it because the field doesn't exist.
continue
}
srcElement := reflect.ValueOf(srcValue)
dstKind := dstElement.Kind()
srcKind := srcElement.Kind()
if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
srcElement = srcElement.Elem()
srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
} else if dstKind == reflect.Ptr {
// Can this work? I guess it can't.
if srcKind != reflect.Ptr && srcElement.CanAddr() {
srcPtr := srcElement.Addr()
srcElement = reflect.ValueOf(srcPtr)
srcKind = reflect.Ptr
}
}
if !srcElement.IsValid() {
continue
}
if srcKind == dstKind {
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
} else if srcKind == reflect.Map {
if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
} else {
return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
}
}
}
return
}
// Map sets fields' values in dst from src.
// src can be a map with string keys or a struct. dst must be the opposite:
// if src is a map, dst must be a valid pointer to struct. If src is a struct,
// dst must be map[string]interface{}.
// It won't merge unexported (private) fields and will do recursively
// any exported field.
// If dst is a map, keys will be src fields' names in lower camel case.
// Missing key in src that doesn't match a field in dst will be skipped. This
// doesn't apply if dst is a map.
// This is separated method from Merge because it is cleaner and it keeps sane
// semantics: merging equal types, mapping different (restricted) types.
func Map(dst, src interface{}, opts ...func(*Config)) error {
return _map(dst, src, opts...)
}
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
// non-empty src attribute values.
// Deprecated: Use Map(…) with WithOverride
func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
return _map(dst, src, append(opts, WithOverride)...)
}
func _map(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerArgument
}
var (
vDst, vSrc reflect.Value
err error
)
config := &Config{}
for _, opt := range opts {
opt(config)
}
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
return err
}
// To be friction-less, we redirect equal-type arguments
// to deepMerge. Only because arguments can be anything.
if vSrc.Kind() == vDst.Kind() {
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}
switch vSrc.Kind() {
case reflect.Struct:
if vDst.Kind() != reflect.Map {
return ErrExpectedMapAsDestination
}
case reflect.Map:
if vDst.Kind() != reflect.Struct {
return ErrExpectedStructAsDestination
}
default:
return ErrNotSupported
}
return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}

409
vendor/dario.cat/mergo/merge.go vendored Normal file
View File

@@ -0,0 +1,409 @@
// Copyright 2013 Dario Castañé. All rights reserved.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on src/pkg/reflect/deepequal.go from official
// golang's stdlib.
package mergo
import (
"fmt"
"reflect"
)
func hasMergeableFields(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i)
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
exported = exported || hasMergeableFields(dst.Field(i))
} else if isExportedComponent(&field) {
exported = exported || len(field.PkgPath) == 0
}
}
return
}
func isExportedComponent(field *reflect.StructField) bool {
pkgPath := field.PkgPath
if len(pkgPath) > 0 {
return false
}
c := field.Name[0]
if 'a' <= c && c <= 'z' || c == '_' {
return false
}
return true
}
type Config struct {
Transformers Transformers
Overwrite bool
ShouldNotDereference bool
AppendSlice bool
TypeCheck bool
overwriteWithEmptyValue bool
overwriteSliceWithEmptyValue bool
sliceDeepCopy bool
debug bool
}
type Transformers interface {
Transformer(reflect.Type) func(dst, src reflect.Value) error
}
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
typeCheck := config.TypeCheck
overwriteWithEmptySrc := config.overwriteWithEmptyValue
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
sliceDeepCopy := config.sliceDeepCopy
if !src.IsValid() {
return
}
if dst.CanAddr() {
addr := dst.UnsafeAddr()
h := 17 * addr
seen := visited[h]
typ := dst.Type()
for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ {
return nil
}
}
// Remember, remember...
visited[h] = &visit{typ, seen, addr}
}
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return
}
}
switch dst.Kind() {
case reflect.Struct:
if hasMergeableFields(dst) {
for i, n := 0, dst.NumField(); i < n; i++ {
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
return
}
}
} else {
if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
dst.Set(src)
}
}
case reflect.Map:
if dst.IsNil() && !src.IsNil() {
if dst.CanSet() {
dst.Set(reflect.MakeMap(dst.Type()))
} else {
dst = src
return
}
}
if src.Kind() != reflect.Map {
if overwrite && dst.CanSet() {
dst.Set(src)
}
return
}
for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key)
if !srcElement.IsValid() {
continue
}
dstElement := dst.MapIndex(key)
switch srcElement.Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
if srcElement.IsNil() {
if overwrite {
dst.SetMapIndex(key, srcElement)
}
continue
}
fallthrough
default:
if !srcElement.CanInterface() {
continue
}
switch reflect.TypeOf(srcElement.Interface()).Kind() {
case reflect.Struct:
fallthrough
case reflect.Ptr:
fallthrough
case reflect.Map:
srcMapElm := srcElement
dstMapElm := dstElement
if srcMapElm.CanInterface() {
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
if dstMapElm.IsValid() {
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
}
}
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
return
}
case reflect.Slice:
srcSlice := reflect.ValueOf(srcElement.Interface())
var dstSlice reflect.Value
if !dstElement.IsValid() || dstElement.IsNil() {
dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
} else {
dstSlice = reflect.ValueOf(dstElement.Interface())
}
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
if typeCheck && srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = srcSlice
} else if config.AppendSlice {
if srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
} else if sliceDeepCopy {
i := 0
for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
srcElement := srcSlice.Index(i)
dstElement := dstSlice.Index(i)
if srcElement.CanInterface() {
srcElement = reflect.ValueOf(srcElement.Interface())
}
if dstElement.CanInterface() {
dstElement = reflect.ValueOf(dstElement.Interface())
}
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
}
}
dst.SetMapIndex(key, dstSlice)
}
}
if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
continue
}
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
continue
}
}
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
dst.SetMapIndex(key, srcElement)
}
}
// Ensure that all keys in dst are deleted if they are not in src.
if overwriteWithEmptySrc {
for _, key := range dst.MapKeys() {
srcElement := src.MapIndex(key)
if !srcElement.IsValid() {
dst.SetMapIndex(key, reflect.Value{})
}
}
}
case reflect.Slice:
if !dst.CanSet() {
break
}
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
dst.Set(src)
} else if config.AppendSlice {
if src.Type() != dst.Type() {
return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
}
dst.Set(reflect.AppendSlice(dst, src))
} else if sliceDeepCopy {
for i := 0; i < src.Len() && i < dst.Len(); i++ {
srcElement := src.Index(i)
dstElement := dst.Index(i)
if srcElement.CanInterface() {
srcElement = reflect.ValueOf(srcElement.Interface())
}
if dstElement.CanInterface() {
dstElement = reflect.ValueOf(dstElement.Interface())
}
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
}
}
case reflect.Ptr:
fallthrough
case reflect.Interface:
if isReflectNil(src) {
if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
dst.Set(src)
}
break
}
if src.Kind() != reflect.Interface {
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
} else if src.Kind() == reflect.Ptr {
if !config.ShouldNotDereference {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
} else {
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
dst.Set(src)
}
}
} else if dst.Elem().Type() == src.Type() {
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
return
}
} else {
return ErrDifferentArgumentsTypes
}
break
}
if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
break
}
if dst.Elem().Kind() == src.Elem().Kind() {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
break
}
default:
mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
if mustSet {
if dst.CanSet() {
dst.Set(src)
} else {
dst = src
}
}
}
return
}
// Merge will fill any empty for value type attributes on the dst struct using corresponding
// src attributes if they themselves are not empty. dst and src must be valid same-type structs
// and dst must be a pointer to struct.
// It won't merge unexported (private) fields and will do recursively any exported field.
func Merge(dst, src interface{}, opts ...func(*Config)) error {
return merge(dst, src, opts...)
}
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
// non-empty src attribute values.
// Deprecated: use Merge(…) with WithOverride
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
return merge(dst, src, append(opts, WithOverride)...)
}
// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
func WithTransformers(transformers Transformers) func(*Config) {
return func(config *Config) {
config.Transformers = transformers
}
}
// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
func WithOverride(config *Config) {
config.Overwrite = true
}
// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
func WithOverwriteWithEmptyValue(config *Config) {
config.Overwrite = true
config.overwriteWithEmptyValue = true
}
// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
func WithOverrideEmptySlice(config *Config) {
config.overwriteSliceWithEmptyValue = true
}
// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
// (i.e. a non-nil pointer is never considered empty).
func WithoutDereference(config *Config) {
config.ShouldNotDereference = true
}
// WithAppendSlice will make merge append slices instead of overwriting it.
func WithAppendSlice(config *Config) {
config.AppendSlice = true
}
// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
func WithTypeCheck(config *Config) {
config.TypeCheck = true
}
// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
func WithSliceDeepCopy(config *Config) {
config.sliceDeepCopy = true
config.Overwrite = true
}
func merge(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerArgument
}
var (
vDst, vSrc reflect.Value
err error
)
config := &Config{}
for _, opt := range opts {
opt(config)
}
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
return err
}
if vDst.Type() != vSrc.Type() {
return ErrDifferentArgumentsTypes
}
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}
// IsReflectNil is the reflect value provided nil
func isReflectNil(v reflect.Value) bool {
k := v.Kind()
switch k {
case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
// Both interface and slice are nil if first word is 0.
// Both are always bigger than a word; assume flagIndir.
return v.IsNil()
default:
return false
}
}

81
vendor/dario.cat/mergo/mergo.go vendored Normal file
View File

@@ -0,0 +1,81 @@
// Copyright 2013 Dario Castañé. All rights reserved.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on src/pkg/reflect/deepequal.go from official
// golang's stdlib.
package mergo
import (
"errors"
"reflect"
)
// Errors reported by Mergo when it finds invalid arguments.
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerArgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
// in progress. The comparison algorithm assumes that all
// checks in progress are true when it reencounters them.
// Visited are stored in a map indexed by 17 * a1 + a2;
type visit struct {
typ reflect.Type
next *visit
ptr uintptr
}
// From src/pkg/encoding/json/encode.go.
func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
if v.IsNil() {
return true
}
if shouldDereference {
return isEmptyValue(v.Elem(), shouldDereference)
}
return false
case reflect.Func:
return v.IsNil()
case reflect.Invalid:
return true
}
return false
}
func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
if dst == nil || src == nil {
err = ErrNilArguments
return
}
vDst = reflect.ValueOf(dst).Elem()
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}
vSrc = reflect.ValueOf(src)
// We check if vSrc is a pointer to dereference it.
if vSrc.Kind() == reflect.Ptr {
vSrc = vSrc.Elem()
}
return
}

View File

@@ -191,7 +191,7 @@ func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int,
return x3, y3, z3
}
//TODO: double check if it is okay
// TODO: double check if it is okay
// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
// We have a slight problem in that the identity of the group (the
@@ -239,7 +239,7 @@ func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
//TODO: double check if it is okay
// TODO: double check if it is okay
// GenerateKey returns a public/private key pair. The private key is generated
// using the given reader, which must return random data.
func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) {

View File

@@ -80,4 +80,4 @@ func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int)
func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar))
}
}

View File

@@ -67,7 +67,7 @@ func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte {
if len(nonce) > e.nonceSize {
panic("crypto/eax: Nonce too long for this instance")
}
ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize)
ret, out := byteutil.SliceForAppend(dst, len(plaintext)+e.tagSize)
omacNonce := e.omacT(0, nonce)
omacAdata := e.omacT(1, adata)
@@ -85,7 +85,7 @@ func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte {
return ret
}
func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
func (e *eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
if len(nonce) > e.nonceSize {
panic("crypto/eax: Nonce too long for this instance")
}

View File

@@ -41,7 +41,7 @@ func ShiftNBytesLeft(dst, x []byte, n int) {
bits := uint(n % 8)
l := len(dst)
for i := 0; i < l-1; i++ {
dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits))
dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits))
}
dst[l-1] = dst[l-1] << bits
@@ -56,7 +56,6 @@ func XorBytesMut(X, Y []byte) {
}
}
// XorBytes assumes equal input length, puts X XOR Y into Z
func XorBytes(Z, X, Y []byte) {
for i := 0; i < len(X); i++ {
@@ -67,10 +66,10 @@ func XorBytes(Z, X, Y []byte) {
// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X)
func RightXor(X, Y []byte) []byte {
offset := len(X) - len(Y)
xored := make([]byte, len(X));
xored := make([]byte, len(X))
copy(xored, X)
for i := 0; i < len(Y); i++ {
xored[offset + i] ^= Y[i]
xored[offset+i] ^= Y[i]
}
return xored
}
@@ -89,4 +88,3 @@ func SliceForAppend(in []byte, n int) (head, tail []byte) {
tail = head[len(in):]
return
}

View File

@@ -93,13 +93,13 @@ func NewOCBWithNonceAndTagSize(
return nil, ocbError("Custom tag length exceeds blocksize")
}
return &ocb{
block: block,
tagSize: tagSize,
nonceSize: nonceSize,
mask: initializeMaskTable(block),
block: block,
tagSize: tagSize,
nonceSize: nonceSize,
mask: initializeMaskTable(block),
reusableKtop: reusableKtop{
noncePrefix: nil,
Ktop: nil,
Ktop: nil,
},
}, nil
}

View File

@@ -4,21 +4,22 @@ package ocb
var rfc7253TestVectorTaglen96 = struct {
key, nonce, header, plaintext, ciphertext string
}{"0F0E0D0C0B0A09080706050403020100",
"BBAA9988776655443322110D",
"000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
"000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
"1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"}
"BBAA9988776655443322110D",
"000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
"000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
"1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"}
var rfc7253AlgorithmTest = []struct {
KEYLEN, TAGLEN int
OUTPUT string }{
{128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"},
{192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"},
{256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"},
{128, 96, "77A3D8E73589158D25D01209"},
{192, 96, "05D56EAD2752C86BE6932C5E"},
{256, 96, "5458359AC23B0CBA9E6330DD"},
{128, 64, "192C9B7BD90BA06A"},
{192, 64, "0066BC6E0EF34E24"},
{256, 64, "7D4EA5D445501CBE"},
}
OUTPUT string
}{
{128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"},
{192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"},
{256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"},
{128, 96, "77A3D8E73589158D25D01209"},
{192, 96, "05D56EAD2752C86BE6932C5E"},
{256, 96, "5458359AC23B0CBA9E6330DD"},
{128, 64, "192C9B7BD90BA06A"},
{192, 64, "0066BC6E0EF34E24"},
{256, 64, "7D4EA5D445501CBE"},
}

View File

@@ -10,19 +10,22 @@ import (
"bufio"
"bytes"
"encoding/base64"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"io"
"github.com/ProtonMail/go-crypto/openpgp/errors"
)
// A Block represents an OpenPGP armored structure.
//
// The encoded form is:
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
//
// where Headers is a possibly empty sequence of Key: Value lines.
//
// Since the armored data can be very large, this package presents a streaming
@@ -206,12 +209,16 @@ TryNextBlock:
break
}
i := bytes.Index(line, []byte(": "))
i := bytes.Index(line, []byte(":"))
if i == -1 {
goto TryNextBlock
}
lastKey = string(line[:i])
p.Header[lastKey] = string(line[i+2:])
var value string
if len(line) > i+2 {
value = string(line[i+2:])
}
p.Header[lastKey] = value
}
p.lReader.in = r

View File

@@ -96,7 +96,8 @@ func (l *lineBreaker) Close() (err error) {
// trailer.
//
// It's built into a stack of io.Writers:
// encoding -> base64 encoder -> lineBreaker -> out
//
// encoding -> base64 encoder -> lineBreaker -> out
type encoding struct {
out io.Writer
breaker *lineBreaker

View File

@@ -34,7 +34,7 @@ type PrivateKey struct {
func NewPublicKey(curve ecc.ECDHCurve, kdfHash algorithm.Hash, kdfCipher algorithm.Cipher) *PublicKey {
return &PublicKey{
curve: curve,
curve: curve,
KDF: KDF{
Hash: kdfHash,
Cipher: kdfCipher,
@@ -167,7 +167,7 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead
if _, err := param.Write(fingerprint[:20]); err != nil {
return nil, err
}
if param.Len() - len(curveOID) != 45 {
if param.Len()-len(curveOID) != 45 {
return nil, errors.New("ecdh: malformed KDF Param")
}
@@ -181,15 +181,19 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead
j := zbLen - 1
if stripLeading {
// Work around old go crypto bug where the leading zeros are missing.
for ; i < zbLen && zb[i] == 0; i++ {}
for i < zbLen && zb[i] == 0 {
i++
}
}
if stripTrailing {
// Work around old OpenPGP.js bug where insignificant trailing zeros in
// this little-endian number are missing.
// (See https://github.com/openpgpjs/openpgpjs/pull/853.)
for ; j >= 0 && zb[j] == 0; j-- {}
for j >= 0 && zb[j] == 0 {
j--
}
}
if _, err := h.Write(zb[i:j+1]); err != nil {
if _, err := h.Write(zb[i : j+1]); err != nil {
return nil, err
}
if _, err := h.Write(param.Bytes()); err != nil {

View File

@@ -10,7 +10,7 @@ import (
)
type PublicKey struct {
X, Y *big.Int
X, Y *big.Int
curve ecc.ECDSACurve
}

View File

@@ -9,7 +9,7 @@ import (
)
type PublicKey struct {
X []byte
X []byte
curve ecc.EdDSACurve
}

View File

@@ -71,8 +71,8 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err
// returns the plaintext of the message. An error can result only if the
// ciphertext is invalid. Users should keep in mind that this is a padding
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
// be used to break the cryptosystem. See Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1, Daniel
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)

24
vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go generated vendored Normal file
View File

@@ -0,0 +1,24 @@
package openpgp
import (
"crypto"
"github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
)
// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
// hash id.
func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
return algorithm.HashIdToHash(id)
}
// HashIdToString returns the name of the hash function corresponding to the
// given OpenPGP hash id.
func HashIdToString(id byte) (name string, ok bool) {
return algorithm.HashIdToString(id)
}
// HashToHashId returns an OpenPGP hash id which corresponds the given Hash.
func HashToHashId(h crypto.Hash) (id byte, ok bool) {
return algorithm.HashToHashId(h)
}

View File

@@ -16,7 +16,7 @@ type AEADMode uint8
const (
AEADModeEAX = AEADMode(1)
AEADModeOCB = AEADMode(2)
AEADModeGCM = AEADMode(100)
AEADModeGCM = AEADMode(3)
)
// TagLength returns the length in bytes of authentication tags.

View File

@@ -32,26 +32,25 @@ type Hash interface {
// The following vars mirror the crypto/Hash supported hash functions.
var (
MD5 Hash = cryptoHash{1, crypto.MD5}
SHA1 Hash = cryptoHash{2, crypto.SHA1}
RIPEMD160 Hash = cryptoHash{3, crypto.RIPEMD160}
SHA256 Hash = cryptoHash{8, crypto.SHA256}
SHA384 Hash = cryptoHash{9, crypto.SHA384}
SHA512 Hash = cryptoHash{10, crypto.SHA512}
SHA224 Hash = cryptoHash{11, crypto.SHA224}
SHA1 Hash = cryptoHash{2, crypto.SHA1}
SHA256 Hash = cryptoHash{8, crypto.SHA256}
SHA384 Hash = cryptoHash{9, crypto.SHA384}
SHA512 Hash = cryptoHash{10, crypto.SHA512}
SHA224 Hash = cryptoHash{11, crypto.SHA224}
SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256}
SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512}
)
// HashById represents the different hash functions specified for OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14
var (
HashById = map[uint8]Hash{
MD5.Id(): MD5,
SHA1.Id(): SHA1,
RIPEMD160.Id(): RIPEMD160,
SHA256.Id(): SHA256,
SHA384.Id(): SHA384,
SHA512.Id(): SHA512,
SHA224.Id(): SHA224,
SHA256.Id(): SHA256,
SHA384.Id(): SHA384,
SHA512.Id(): SHA512,
SHA224.Id(): SHA224,
SHA3_256.Id(): SHA3_256,
SHA3_512.Id(): SHA3_512,
}
)
@@ -68,13 +67,12 @@ func (h cryptoHash) Id() uint8 {
}
var hashNames = map[uint8]string{
MD5.Id(): "MD5",
SHA1.Id(): "SHA1",
RIPEMD160.Id(): "RIPEMD160",
SHA256.Id(): "SHA256",
SHA384.Id(): "SHA384",
SHA512.Id(): "SHA512",
SHA224.Id(): "SHA224",
SHA256.Id(): "SHA256",
SHA384.Id(): "SHA384",
SHA512.Id(): "SHA512",
SHA224.Id(): "SHA224",
SHA3_256.Id(): "SHA3-256",
SHA3_512.Id(): "SHA3-512",
}
func (h cryptoHash) String() string {
@@ -84,3 +82,62 @@ func (h cryptoHash) String() string {
}
return s
}
// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
// hash id.
func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
if hash, ok := HashById[id]; ok {
return hash.HashFunc(), true
}
return 0, false
}
// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP
// hash id, allowing sha1.
func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) {
if hash, ok := HashById[id]; ok {
return hash.HashFunc(), true
}
if id == SHA1.Id() {
return SHA1.HashFunc(), true
}
return 0, false
}
// HashIdToString returns the name of the hash function corresponding to the
// given OpenPGP hash id.
func HashIdToString(id byte) (name string, ok bool) {
if hash, ok := HashById[id]; ok {
return hash.String(), true
}
return "", false
}
// HashToHashId returns an OpenPGP hash id which corresponds the given Hash.
func HashToHashId(h crypto.Hash) (id byte, ok bool) {
for id, hash := range HashById {
if hash.HashFunc() == h {
return id, true
}
}
return 0, false
}
// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash,
// allowing instances of SHA1
func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) {
for id, hash := range HashById {
if hash.HashFunc() == h {
return id, true
}
}
if h == SHA1.HashFunc() {
return SHA1.Id(), true
}
return 0, false
}

View File

@@ -9,7 +9,7 @@ import (
x25519lib "github.com/cloudflare/circl/dh/x25519"
)
type curve25519 struct {}
type curve25519 struct{}
func NewCurve25519() *curve25519 {
return &curve25519{}
@@ -21,14 +21,14 @@ func (c *curve25519) GetCurveName() string {
// MarshalBytePoint encodes the public point from native format, adding the prefix.
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
func (c *curve25519) MarshalBytePoint(point [] byte) []byte {
func (c *curve25519) MarshalBytePoint(point []byte) []byte {
return append([]byte{0x40}, point...)
}
// UnmarshalBytePoint decodes the public point to native format, removing the prefix.
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
func (c *curve25519) UnmarshalBytePoint(point []byte) []byte {
if len(point) != x25519lib.Size + 1 {
if len(point) != x25519lib.Size+1 {
return nil
}

View File

@@ -11,76 +11,76 @@ import (
type CurveInfo struct {
GenName string
Oid *encoding.OID
Curve Curve
Oid *encoding.OID
Curve Curve
}
var Curves = []CurveInfo{
{
// NIST P-256
GenName: "P256",
Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}),
Curve: NewGenericCurve(elliptic.P256()),
Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}),
Curve: NewGenericCurve(elliptic.P256()),
},
{
// NIST P-384
GenName: "P384",
Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}),
Curve: NewGenericCurve(elliptic.P384()),
Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}),
Curve: NewGenericCurve(elliptic.P384()),
},
{
// NIST P-521
GenName: "P521",
Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}),
Curve: NewGenericCurve(elliptic.P521()),
Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}),
Curve: NewGenericCurve(elliptic.P521()),
},
{
// SecP256k1
GenName: "SecP256k1",
Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}),
Curve: NewGenericCurve(bitcurves.S256()),
Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}),
Curve: NewGenericCurve(bitcurves.S256()),
},
{
// Curve25519
GenName: "Curve25519",
Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}),
Curve: NewCurve25519(),
Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}),
Curve: NewCurve25519(),
},
{
// X448
GenName: "Curve448",
Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}),
Curve: NewX448(),
Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}),
Curve: NewX448(),
},
{
// Ed25519
GenName: "Curve25519",
Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}),
Curve: NewEd25519(),
Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}),
Curve: NewEd25519(),
},
{
// Ed448
GenName: "Curve448",
Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}),
Curve: NewEd448(),
Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}),
Curve: NewEd448(),
},
{
// BrainpoolP256r1
GenName: "BrainpoolP256",
Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}),
Curve: NewGenericCurve(brainpool.P256r1()),
Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}),
Curve: NewGenericCurve(brainpool.P256r1()),
},
{
// BrainpoolP384r1
GenName: "BrainpoolP384",
Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}),
Curve: NewGenericCurve(brainpool.P384r1()),
Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}),
Curve: NewGenericCurve(brainpool.P384r1()),
},
{
// BrainpoolP512r1
GenName: "BrainpoolP512",
Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}),
Curve: NewGenericCurve(brainpool.P512r1()),
Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}),
Curve: NewGenericCurve(brainpool.P512r1()),
},
}

View File

@@ -38,7 +38,7 @@ type EdDSACurve interface {
type ECDHCurve interface {
Curve
MarshalBytePoint([]byte) (encoded []byte)
UnmarshalBytePoint(encoded []byte) ([]byte)
UnmarshalBytePoint(encoded []byte) []byte
MarshalByteSecret(d []byte) []byte
UnmarshalByteSecret(d []byte) []byte
GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error)

View File

@@ -10,7 +10,8 @@ import (
)
const ed25519Size = 32
type ed25519 struct {}
type ed25519 struct{}
func NewEd25519() *ed25519 {
return &ed25519{}
@@ -29,7 +30,7 @@ func (c *ed25519) MarshalBytePoint(x []byte) []byte {
// UnmarshalBytePoint decodes a point from prefixed format to native.
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) {
if len(point) != ed25519lib.PublicKeySize + 1 {
if len(point) != ed25519lib.PublicKeySize+1 {
return nil
}
@@ -52,7 +53,7 @@ func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) {
// Handle stripped leading zeroes
d = make([]byte, ed25519lib.SeedSize)
copy(d[ed25519lib.SeedSize - len(s):], s)
copy(d[ed25519lib.SeedSize-len(s):], s)
return
}

View File

@@ -9,7 +9,7 @@ import (
ed448lib "github.com/cloudflare/circl/sign/ed448"
)
type ed448 struct {}
type ed448 struct{}
func NewEd448() *ed448 {
return &ed448{}
@@ -29,7 +29,7 @@ func (c *ed448) MarshalBytePoint(x []byte) []byte {
// UnmarshalBytePoint decodes a point from prefixed format to native.
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) {
if len(point) != ed448lib.PublicKeySize + 1 {
if len(point) != ed448lib.PublicKeySize+1 {
return nil
}
@@ -48,7 +48,7 @@ func (c *ed448) MarshalByteSecret(d []byte) []byte {
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) {
// Check prefixed size
if len(s) != ed448lib.SeedSize + 1 {
if len(s) != ed448lib.SeedSize+1 {
return nil
}
@@ -66,7 +66,7 @@ func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) {
// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format.
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2
func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) {
if len(r) != ed448lib.SignatureSize + 1 {
if len(r) != ed448lib.SignatureSize+1 {
return nil
}

View File

@@ -9,7 +9,7 @@ import (
x448lib "github.com/cloudflare/circl/dh/x448"
)
type x448 struct {}
type x448 struct{}
func NewX448() *x448 {
return &x448{}
@@ -28,7 +28,7 @@ func (c *x448) MarshalBytePoint(point []byte) []byte {
// UnmarshalBytePoint decodes a point from prefixed format to native.
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
func (c *x448) UnmarshalBytePoint(point []byte) []byte {
if len(point) != x448lib.Size + 1 {
if len(point) != x448lib.Size+1 {
return nil
}
@@ -44,7 +44,7 @@ func (c *x448) MarshalByteSecret(d []byte) []byte {
// UnmarshalByteSecret decodes a scalar from prefixed format to native.
// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2
func (c *x448) UnmarshalByteSecret(d []byte) []byte {
if len(d) != x448lib.Size + 1 {
if len(d) != x448lib.Size+1 {
return nil
}

View File

@@ -82,27 +82,24 @@ func (t *Entity) addUserId(name, comment, email string, config *packet.Config, c
isPrimaryId := len(t.Identities) == 0
selfSignature := &packet.Signature{
Version: primary.PublicKey.Version,
SigType: packet.SigTypePositiveCert,
PubKeyAlgo: primary.PublicKey.PubKeyAlgo,
Hash: config.Hash(),
CreationTime: creationTime,
KeyLifetimeSecs: &keyLifetimeSecs,
IssuerKeyId: &primary.PublicKey.KeyId,
IssuerFingerprint: primary.PublicKey.Fingerprint,
IsPrimaryId: &isPrimaryId,
FlagsValid: true,
FlagSign: true,
FlagCertify: true,
MDC: true, // true by default, see 5.8 vs. 5.14
AEAD: config.AEAD() != nil,
V5Keys: config != nil && config.V5Keys,
}
selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config)
selfSignature.CreationTime = creationTime
selfSignature.KeyLifetimeSecs = &keyLifetimeSecs
selfSignature.IsPrimaryId = &isPrimaryId
selfSignature.FlagsValid = true
selfSignature.FlagSign = true
selfSignature.FlagCertify = true
selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14
selfSignature.SEIPDv2 = config.AEAD() != nil
// Set the PreferredHash for the SelfSignature from the packet.Config.
// If it is not the must-implement algorithm from rfc4880bis, append that.
selfSignature.PreferredHash = []uint8{hashToHashId(config.Hash())}
hash, ok := algorithm.HashToHashId(config.Hash())
if !ok {
return errors.UnsupportedError("unsupported preferred hash function")
}
selfSignature.PreferredHash = []uint8{hash}
if config.Hash() != crypto.SHA256 {
selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256))
}
@@ -123,9 +120,16 @@ func (t *Entity) addUserId(name, comment, email string, config *packet.Config, c
}
// And for DefaultMode.
selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())}
if config.AEAD().Mode() != packet.AEADModeEAX {
selfSignature.PreferredAEAD = append(selfSignature.PreferredAEAD, uint8(packet.AEADModeEAX))
modes := []uint8{uint8(config.AEAD().Mode())}
if config.AEAD().Mode() != packet.AEADModeOCB {
modes = append(modes, uint8(packet.AEADModeOCB))
}
// For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB)
for _, cipher := range selfSignature.PreferredSymmetric {
for _, mode := range modes {
selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode})
}
}
// User ID binding signature
@@ -153,42 +157,30 @@ func (e *Entity) AddSigningSubkey(config *packet.Config) error {
return err
}
sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw)
sub.IsSubkey = true
if config != nil && config.V5Keys {
sub.UpgradeToV5()
}
subkey := Subkey{
PublicKey: &sub.PublicKey,
PrivateKey: sub,
Sig: &packet.Signature{
Version: e.PrimaryKey.Version,
CreationTime: creationTime,
KeyLifetimeSecs: &keyLifetimeSecs,
SigType: packet.SigTypeSubkeyBinding,
PubKeyAlgo: e.PrimaryKey.PubKeyAlgo,
Hash: config.Hash(),
FlagsValid: true,
FlagSign: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
EmbeddedSignature: &packet.Signature{
Version: e.PrimaryKey.Version,
CreationTime: creationTime,
SigType: packet.SigTypePrimaryKeyBinding,
PubKeyAlgo: sub.PublicKey.PubKeyAlgo,
Hash: config.Hash(),
IssuerKeyId: &e.PrimaryKey.KeyId,
},
},
}
if config != nil && config.V5Keys {
subkey.PublicKey.UpgradeToV5()
}
subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
subkey.Sig.CreationTime = creationTime
subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
subkey.Sig.FlagsValid = true
subkey.Sig.FlagSign = true
subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config)
subkey.Sig.EmbeddedSignature.CreationTime = creationTime
err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config)
if err != nil {
return err
}
subkey.PublicKey.IsSubkey = true
subkey.PrivateKey.IsSubkey = true
if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil {
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
if err != nil {
return err
}
@@ -210,30 +202,24 @@ func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Ti
return err
}
sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw)
sub.IsSubkey = true
if config != nil && config.V5Keys {
sub.UpgradeToV5()
}
subkey := Subkey{
PublicKey: &sub.PublicKey,
PrivateKey: sub,
Sig: &packet.Signature{
Version: e.PrimaryKey.Version,
CreationTime: creationTime,
KeyLifetimeSecs: &keyLifetimeSecs,
SigType: packet.SigTypeSubkeyBinding,
PubKeyAlgo: e.PrimaryKey.PubKeyAlgo,
Hash: config.Hash(),
FlagsValid: true,
FlagEncryptStorage: true,
FlagEncryptCommunications: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
if config != nil && config.V5Keys {
subkey.PublicKey.UpgradeToV5()
}
subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
subkey.Sig.CreationTime = creationTime
subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
subkey.Sig.FlagsValid = true
subkey.Sig.FlagEncryptStorage = true
subkey.Sig.FlagEncryptCommunications = true
subkey.PublicKey.IsSubkey = true
subkey.PrivateKey.IsSubkey = true
if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil {
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
if err != nil {
return err
}

View File

@@ -150,11 +150,9 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
}
// If we don't have any candidate subkeys for encryption and
// the primary key doesn't have any usage metadata then we
// assume that the primary key is ok. Or, if the primary key is
// marked as ok to encrypt with, then we can obviously use it.
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
// If we don't have any subkeys for encryption and the primary key
// is marked as OK to encrypt with, then we can use it.
if i.SelfSignature.FlagsValid && i.SelfSignature.FlagEncryptCommunications &&
e.PrimaryKey.PubKeyAlgo.CanEncrypt() {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true
}
@@ -162,7 +160,6 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
return Key{}, false
}
// CertificationKey return the best candidate Key for certifying a key with this
// Entity.
func (e *Entity) CertificationKey(now time.Time) (Key, bool) {
@@ -203,8 +200,8 @@ func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key,
var maxTime time.Time
for idx, subkey := range e.Subkeys {
if subkey.Sig.FlagsValid &&
(flags & packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) &&
(flags & packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) &&
(flags&packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) &&
(flags&packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) &&
subkey.PublicKey.PubKeyAlgo.CanSign() &&
!subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
!subkey.Sig.SigExpired(now) &&
@@ -221,12 +218,11 @@ func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key,
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
}
// If we have no candidate subkey then we assume that it's ok to sign
// with the primary key. Or, if the primary key is marked as ok to
// sign with, then we can use it.
if !i.SelfSignature.FlagsValid || (
(flags & packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) &&
(flags & packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign)) &&
// If we don't have any subkeys for signing and the primary key
// is marked as OK to sign with, then we can use it.
if i.SelfSignature.FlagsValid &&
(flags&packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) &&
(flags&packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign) &&
e.PrimaryKey.PubKeyAlgo.CanSign() &&
(id == 0 || e.PrimaryKey.KeyId == id) {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true
@@ -256,6 +252,44 @@ func (e *Entity) Revoked(now time.Time) bool {
return revoked(e.Revocations, now)
}
// EncryptPrivateKeys encrypts all non-encrypted keys in the entity with the same key
// derived from the provided passphrase. Public keys and dummy keys are ignored,
// and don't cause an error to be returned.
func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) error {
var keysToEncrypt []*packet.PrivateKey
// Add entity private key to encrypt.
if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted {
keysToEncrypt = append(keysToEncrypt, e.PrivateKey)
}
// Add subkeys to encrypt.
for _, sub := range e.Subkeys {
if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && !sub.PrivateKey.Encrypted {
keysToEncrypt = append(keysToEncrypt, sub.PrivateKey)
}
}
return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config)
}
// DecryptPrivateKeys decrypts all encrypted keys in the entitiy with the given passphrase.
// Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored,
// and don't cause an error to be returned.
func (e *Entity) DecryptPrivateKeys(passphrase []byte) error {
var keysToDecrypt []*packet.PrivateKey
// Add entity private key to decrypt.
if e.PrivateKey != nil && !e.PrivateKey.Dummy() && e.PrivateKey.Encrypted {
keysToDecrypt = append(keysToDecrypt, e.PrivateKey)
}
// Add subkeys to decrypt.
for _, sub := range e.Subkeys {
if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted {
keysToDecrypt = append(keysToDecrypt, sub.PrivateKey)
}
}
return packet.DecryptPrivateKeys(keysToDecrypt, passphrase)
}
// Revoked returns whether the identity has been revoked by a self-signature.
// Note that third-party revocation signatures are not supported.
func (i *Identity) Revoked(now time.Time) bool {
@@ -303,7 +337,11 @@ func (el EntityList) KeysById(id uint64) (keys []Key) {
// the bitwise-OR of packet.KeyFlag* values.
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
for _, key := range el.KeysById(id) {
if key.SelfSignature != nil && key.SelfSignature.FlagsValid && requiredUsage != 0 {
if requiredUsage != 0 {
if key.SelfSignature == nil || !key.SelfSignature.FlagsValid {
continue
}
var usage byte
if key.SelfSignature.FlagCertify {
usage |= packet.KeyFlagCertify
@@ -331,7 +369,7 @@ func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
func (el EntityList) DecryptionKeys() (keys []Key) {
for _, e := range el {
for _, subKey := range e.Subkeys {
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
if subKey.PrivateKey != nil && subKey.Sig.FlagsValid && (subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
}
}
@@ -466,7 +504,7 @@ EachPacket:
// Else, ignoring the signature as it does not follow anything
// we would know to attach it to.
case *packet.PrivateKey:
if pkt.IsSubkey == false {
if !pkt.IsSubkey {
packets.Unread(p)
break EachPacket
}
@@ -475,7 +513,7 @@ EachPacket:
return nil, err
}
case *packet.PublicKey:
if pkt.IsSubkey == false {
if !pkt.IsSubkey {
packets.Unread(p)
break EachPacket
}
@@ -751,18 +789,7 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co
return errors.InvalidArgumentError("given identity string not found in Entity")
}
sig := &packet.Signature{
Version: certificationKey.PrivateKey.Version,
SigType: packet.SigTypeGenericCert,
PubKeyAlgo: certificationKey.PrivateKey.PubKeyAlgo,
Hash: config.Hash(),
CreationTime: config.Now(),
IssuerKeyId: &certificationKey.PrivateKey.KeyId,
}
if config.SigLifetime() != 0 {
sig.SigLifetimeSecs = &config.SigLifetimeSecs
}
sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config)
signingUserID := config.SigningUserId()
if signingUserID != "" {
@@ -783,16 +810,9 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co
// specified reason code and text (RFC4880 section-5.2.3.23).
// If config is nil, sensible defaults will be used.
func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
revSig := &packet.Signature{
Version: e.PrimaryKey.Version,
CreationTime: config.Now(),
SigType: packet.SigTypeKeyRevocation,
PubKeyAlgo: e.PrimaryKey.PubKeyAlgo,
Hash: config.Hash(),
RevocationReason: &reason,
RevocationReasonText: reasonText,
IssuerKeyId: &e.PrimaryKey.KeyId,
}
revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config)
revSig.RevocationReason = &reason
revSig.RevocationReasonText = reasonText
if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil {
return err
@@ -809,16 +829,9 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea
return errors.InvalidArgumentError("given subkey is not associated with this key")
}
revSig := &packet.Signature{
Version: e.PrimaryKey.Version,
CreationTime: config.Now(),
SigType: packet.SigTypeSubkeyRevocation,
PubKeyAlgo: e.PrimaryKey.PubKeyAlgo,
Hash: config.Hash(),
RevocationReason: &reason,
RevocationReasonText: reasonText,
IssuerKeyId: &e.PrimaryKey.KeyId,
}
revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config)
revSig.RevocationReason = &reason
revSig.RevocationReasonText = reasonText
if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil {
return err

View File

@@ -518,3 +518,21 @@ XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d
QgqsfguR1PqPuJxpXV4bSr6CGAAAAA==
=MSvh
-----END PGP PRIVATE KEY BLOCK-----`
const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK-----
xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR
ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u
zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI
CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA
AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B
FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG
wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5
8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT
z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X
joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F
RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev
VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP
=Z8YJ
-----END PGP PRIVATE KEY BLOCK-----
`

View File

@@ -4,6 +4,14 @@ package packet
import "math/bits"
// CipherSuite contains a combination of Cipher and Mode
type CipherSuite struct {
// The cipher function
Cipher CipherFunction
// The AEAD mode of operation.
Mode AEADMode
}
// AEADConfig collects a number of AEAD parameters along with sensible defaults.
// A nil AEADConfig is valid and results in all default values.
type AEADConfig struct {
@@ -15,12 +23,13 @@ type AEADConfig struct {
// Mode returns the AEAD mode of operation.
func (conf *AEADConfig) Mode() AEADMode {
// If no preference is specified, OCB is used (which is mandatory to implement).
if conf == nil || conf.DefaultMode == 0 {
return AEADModeEAX
return AEADModeOCB
}
mode := conf.DefaultMode
if mode != AEADModeEAX && mode != AEADModeOCB &&
mode != AEADModeExperimentalGCM {
if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM {
panic("AEAD mode unsupported")
}
return mode
@@ -28,6 +37,8 @@ func (conf *AEADConfig) Mode() AEADMode {
// ChunkSizeByte returns the byte indicating the chunk size. The effective
// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6)
// limit to 16 = 4 MiB
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
func (conf *AEADConfig) ChunkSizeByte() byte {
if conf == nil || conf.ChunkSize == 0 {
return 12 // 1 << (12 + 6) == 262144 bytes
@@ -38,8 +49,8 @@ func (conf *AEADConfig) ChunkSizeByte() byte {
switch {
case exponent < 6:
exponent = 6
case exponent > 27:
exponent = 27
case exponent > 16:
exponent = 16
}
return byte(exponent - 6)

View File

@@ -0,0 +1,264 @@
// Copyright (C) 2019 ProtonTech AG
package packet
import (
"bytes"
"crypto/cipher"
"encoding/binary"
"io"
"github.com/ProtonMail/go-crypto/openpgp/errors"
)
// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption.
type aeadCrypter struct {
aead cipher.AEAD
chunkSize int
initialNonce []byte
associatedData []byte // Chunk-independent associated data
chunkIndex []byte // Chunk counter
packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet
bytesProcessed int // Amount of plaintext bytes encrypted/decrypted
buffer bytes.Buffer // Buffered bytes across chunks
}
// computeNonce takes the incremental index and computes an eXclusive OR with
// the least significant 8 bytes of the receivers' initial nonce (see sec.
// 5.16.1 and 5.16.2). It returns the resulting nonce.
func (wo *aeadCrypter) computeNextNonce() (nonce []byte) {
if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected {
return append(wo.initialNonce, wo.chunkIndex...)
}
nonce = make([]byte, len(wo.initialNonce))
copy(nonce, wo.initialNonce)
offset := len(wo.initialNonce) - 8
for i := 0; i < 8; i++ {
nonce[i+offset] ^= wo.chunkIndex[i]
}
return
}
// incrementIndex performs an integer increment by 1 of the integer represented by the
// slice, modifying it accordingly.
func (wo *aeadCrypter) incrementIndex() error {
index := wo.chunkIndex
if len(index) == 0 {
return errors.AEADError("Index has length 0")
}
for i := len(index) - 1; i >= 0; i-- {
if index[i] < 255 {
index[i]++
return nil
}
index[i] = 0
}
return errors.AEADError("cannot further increment index")
}
// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when
// necessary, similar to aeadEncrypter.
type aeadDecrypter struct {
aeadCrypter // Embedded ciphertext opener
reader io.Reader // 'reader' is a partialLengthReader
peekedBytes []byte // Used to detect last chunk
eof bool
}
// Read decrypts bytes and reads them into dst. It decrypts when necessary and
// buffers extra decrypted bytes. It returns the number of bytes copied into dst
// and an error.
func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) {
// Return buffered plaintext bytes from previous calls
if ar.buffer.Len() > 0 {
return ar.buffer.Read(dst)
}
// Return EOF if we've previously validated the final tag
if ar.eof {
return 0, io.EOF
}
// Read a chunk
tagLen := ar.aead.Overhead()
cipherChunkBuf := new(bytes.Buffer)
_, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen))
cipherChunk := cipherChunkBuf.Bytes()
if errRead != nil && errRead != io.EOF {
return 0, errRead
}
decrypted, errChunk := ar.openChunk(cipherChunk)
if errChunk != nil {
return 0, errChunk
}
// Return decrypted bytes, buffering if necessary
if len(dst) < len(decrypted) {
n = copy(dst, decrypted[:len(dst)])
ar.buffer.Write(decrypted[len(dst):])
} else {
n = copy(dst, decrypted)
}
// Check final authentication tag
if errRead == io.EOF {
errChunk := ar.validateFinalTag(ar.peekedBytes)
if errChunk != nil {
return n, errChunk
}
ar.eof = true // Mark EOF for when we've returned all buffered data
}
return
}
// Close is noOp. The final authentication tag of the stream was already
// checked in the last Read call. In the future, this function could be used to
// wipe the reader and peeked, decrypted bytes, if necessary.
func (ar *aeadDecrypter) Close() (err error) {
return nil
}
// openChunk decrypts and checks integrity of an encrypted chunk, returning
// the underlying plaintext and an error. It accesses peeked bytes from next
// chunk, to identify the last chunk and decrypt/validate accordingly.
func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) {
tagLen := ar.aead.Overhead()
// Restore carried bytes from last call
chunkExtra := append(ar.peekedBytes, data...)
// 'chunk' contains encrypted bytes, followed by an authentication tag.
chunk := chunkExtra[:len(chunkExtra)-tagLen]
ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:]
adata := ar.associatedData
if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
adata = append(ar.associatedData, ar.chunkIndex...)
}
nonce := ar.computeNextNonce()
plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata)
if err != nil {
return nil, err
}
ar.bytesProcessed += len(plainChunk)
if err = ar.aeadCrypter.incrementIndex(); err != nil {
return nil, err
}
return plainChunk, nil
}
// Checks the summary tag. It takes into account the total decrypted bytes into
// the associated data. It returns an error, or nil if the tag is valid.
func (ar *aeadDecrypter) validateFinalTag(tag []byte) error {
// Associated: tag, version, cipher, aead, chunk size, ...
amountBytes := make([]byte, 8)
binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed))
adata := ar.associatedData
if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
// ... index ...
adata = append(ar.associatedData, ar.chunkIndex...)
}
// ... and total number of encrypted octets
adata = append(adata, amountBytes...)
nonce := ar.computeNextNonce()
_, err := ar.aead.Open(nil, nonce, tag, adata)
if err != nil {
return err
}
return nil
}
// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according
// to the AEAD block size, and buffers the extra encrypted bytes for next write.
type aeadEncrypter struct {
aeadCrypter // Embedded plaintext sealer
writer io.WriteCloser // 'writer' is a partialLengthWriter
}
// Write encrypts and writes bytes. It encrypts when necessary and buffers extra
// plaintext bytes for next call. When the stream is finished, Close() MUST be
// called to append the final tag.
func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
// Append plaintextBytes to existing buffered bytes
n, err = aw.buffer.Write(plaintextBytes)
if err != nil {
return n, err
}
// Encrypt and write chunks
for aw.buffer.Len() >= aw.chunkSize {
plainChunk := aw.buffer.Next(aw.chunkSize)
encryptedChunk, err := aw.sealChunk(plainChunk)
if err != nil {
return n, err
}
_, err = aw.writer.Write(encryptedChunk)
if err != nil {
return n, err
}
}
return
}
// Close encrypts and writes the remaining buffered plaintext if any, appends
// the final authentication tag, and closes the embedded writer. This function
// MUST be called at the end of a stream.
func (aw *aeadEncrypter) Close() (err error) {
// Encrypt and write a chunk if there's buffered data left, or if we haven't
// written any chunks yet.
if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 {
plainChunk := aw.buffer.Bytes()
lastEncryptedChunk, err := aw.sealChunk(plainChunk)
if err != nil {
return err
}
_, err = aw.writer.Write(lastEncryptedChunk)
if err != nil {
return err
}
}
// Compute final tag (associated data: packet tag, version, cipher, aead,
// chunk size...
adata := aw.associatedData
if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
// ... index ...
adata = append(aw.associatedData, aw.chunkIndex...)
}
// ... and total number of encrypted octets
amountBytes := make([]byte, 8)
binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed))
adata = append(adata, amountBytes...)
nonce := aw.computeNextNonce()
finalTag := aw.aead.Seal(nil, nonce, nil, adata)
_, err = aw.writer.Write(finalTag)
if err != nil {
return err
}
return aw.writer.Close()
}
// sealChunk Encrypts and authenticates the given chunk.
func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) {
if len(data) > aw.chunkSize {
return nil, errors.AEADError("chunk exceeds maximum length")
}
if aw.associatedData == nil {
return nil, errors.AEADError("can't seal without headers")
}
adata := aw.associatedData
if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
adata = append(aw.associatedData, aw.chunkIndex...)
}
nonce := aw.computeNextNonce()
encrypted := aw.aead.Seal(nil, nonce, data, adata)
aw.bytesProcessed += len(data)
if err := aw.aeadCrypter.incrementIndex(); err != nil {
return nil, err
}
return encrypted, nil
}

View File

@@ -3,17 +3,14 @@
package packet
import (
"bytes"
"crypto/cipher"
"crypto/rand"
"encoding/binary"
"io"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
)
// AEADEncrypted represents an AEAD Encrypted Packet (tag 20, RFC4880bis-5.16).
// AEADEncrypted represents an AEAD Encrypted Packet.
// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
type AEADEncrypted struct {
cipher CipherFunction
mode AEADMode
@@ -25,33 +22,6 @@ type AEADEncrypted struct {
// Only currently defined version
const aeadEncryptedVersion = 1
// An AEAD opener/sealer, its configuration, and data for en/decryption.
type aeadCrypter struct {
aead cipher.AEAD
chunkSize int
initialNonce []byte
associatedData []byte // Chunk-independent associated data
chunkIndex []byte // Chunk counter
bytesProcessed int // Amount of plaintext bytes encrypted/decrypted
buffer bytes.Buffer // Buffered bytes across chunks
}
// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according
// to the AEAD block size, and buffers the extra encrypted bytes for next write.
type aeadEncrypter struct {
aeadCrypter // Embedded plaintext sealer
writer io.WriteCloser // 'writer' is a partialLengthWriter
}
// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when
// necessary, similar to aeadEncrypter.
type aeadDecrypter struct {
aeadCrypter // Embedded ciphertext opener
reader io.Reader // 'reader' is a partialLengthReader
peekedBytes []byte // Used to detect last chunk
eof bool
}
func (ae *AEADEncrypted) parse(buf io.Reader) error {
headerData := make([]byte, 4)
if n, err := io.ReadFull(buf, headerData); n < 4 {
@@ -59,10 +29,14 @@ func (ae *AEADEncrypted) parse(buf io.Reader) error {
}
// Read initial nonce
mode := AEADMode(headerData[2])
nonceLen := mode.NonceLength()
if nonceLen == 0 {
nonceLen := mode.IvLength()
// This packet supports only EAX and OCB
// https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
if nonceLen == 0 || mode > AEADModeOCB {
return errors.AEADError("unknown mode")
}
initialNonce := make([]byte, nonceLen)
if n, err := io.ReadFull(buf, initialNonce); n < nonceLen {
return errors.AEADError("could not read aead nonce:" + err.Error())
@@ -75,7 +49,7 @@ func (ae *AEADEncrypted) parse(buf io.Reader) error {
}
ae.cipher = CipherFunction(c)
ae.mode = mode
ae.chunkSizeByte = byte(headerData[3])
ae.chunkSizeByte = headerData[3]
return nil
}
@@ -105,225 +79,13 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) {
initialNonce: ae.initialNonce,
associatedData: ae.associatedData(),
chunkIndex: make([]byte, 8),
packetTag: packetTypeAEADEncrypted,
},
reader: ae.Contents,
peekedBytes: peekedBytes}, nil
}
// Read decrypts bytes and reads them into dst. It decrypts when necessary and
// buffers extra decrypted bytes. It returns the number of bytes copied into dst
// and an error.
func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) {
// Return buffered plaintext bytes from previous calls
if ar.buffer.Len() > 0 {
return ar.buffer.Read(dst)
}
// Return EOF if we've previously validated the final tag
if ar.eof {
return 0, io.EOF
}
// Read a chunk
tagLen := ar.aead.Overhead()
cipherChunkBuf := new(bytes.Buffer)
_, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen))
cipherChunk := cipherChunkBuf.Bytes()
if errRead != nil && errRead != io.EOF {
return 0, errRead
}
decrypted, errChunk := ar.openChunk(cipherChunk)
if errChunk != nil {
return 0, errChunk
}
// Return decrypted bytes, buffering if necessary
if len(dst) < len(decrypted) {
n = copy(dst, decrypted[:len(dst)])
ar.buffer.Write(decrypted[len(dst):])
} else {
n = copy(dst, decrypted)
}
// Check final authentication tag
if errRead == io.EOF {
errChunk := ar.validateFinalTag(ar.peekedBytes)
if errChunk != nil {
return n, errChunk
}
ar.eof = true // Mark EOF for when we've returned all buffered data
}
return
}
// Close is noOp. The final authentication tag of the stream was already
// checked in the last Read call. In the future, this function could be used to
// wipe the reader and peeked, decrypted bytes, if necessary.
func (ar *aeadDecrypter) Close() (err error) {
return nil
}
// SerializeAEADEncrypted initializes the aeadCrypter and returns a writer.
// This writer encrypts and writes bytes (see aeadEncrypter.Write()).
func SerializeAEADEncrypted(w io.Writer, key []byte, cipher CipherFunction, mode AEADMode, config *Config) (io.WriteCloser, error) {
writeCloser := noOpCloser{w}
writer, err := serializeStreamHeader(writeCloser, packetTypeAEADEncrypted)
if err != nil {
return nil, err
}
// Data for en/decryption: tag, version, cipher, aead mode, chunk size
aeadConf := config.AEAD()
prefix := []byte{
0xD4,
aeadEncryptedVersion,
byte(config.Cipher()),
byte(aeadConf.Mode()),
aeadConf.ChunkSizeByte(),
}
n, err := writer.Write(prefix[1:])
if err != nil || n < 4 {
return nil, errors.AEADError("could not write AEAD headers")
}
// Sample nonce
nonceLen := aeadConf.Mode().NonceLength()
nonce := make([]byte, nonceLen)
n, err = rand.Read(nonce)
if err != nil {
panic("Could not sample random nonce")
}
_, err = writer.Write(nonce)
if err != nil {
return nil, err
}
blockCipher := CipherFunction(config.Cipher()).new(key)
alg := AEADMode(aeadConf.Mode()).new(blockCipher)
chunkSize := decodeAEADChunkSize(aeadConf.ChunkSizeByte())
return &aeadEncrypter{
aeadCrypter: aeadCrypter{
aead: alg,
chunkSize: chunkSize,
associatedData: prefix,
chunkIndex: make([]byte, 8),
initialNonce: nonce,
},
writer: writer}, nil
}
// Write encrypts and writes bytes. It encrypts when necessary and buffers extra
// plaintext bytes for next call. When the stream is finished, Close() MUST be
// called to append the final tag.
func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
// Append plaintextBytes to existing buffered bytes
n, err = aw.buffer.Write(plaintextBytes)
if err != nil {
return n, err
}
// Encrypt and write chunks
for aw.buffer.Len() >= aw.chunkSize {
plainChunk := aw.buffer.Next(aw.chunkSize)
encryptedChunk, err := aw.sealChunk(plainChunk)
if err != nil {
return n, err
}
_, err = aw.writer.Write(encryptedChunk)
if err != nil {
return n, err
}
}
return
}
// Close encrypts and writes the remaining buffered plaintext if any, appends
// the final authentication tag, and closes the embedded writer. This function
// MUST be called at the end of a stream.
func (aw *aeadEncrypter) Close() (err error) {
// Encrypt and write a chunk if there's buffered data left, or if we haven't
// written any chunks yet.
if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 {
plainChunk := aw.buffer.Bytes()
lastEncryptedChunk, err := aw.sealChunk(plainChunk)
if err != nil {
return err
}
_, err = aw.writer.Write(lastEncryptedChunk)
if err != nil {
return err
}
}
// Compute final tag (associated data: packet tag, version, cipher, aead,
// chunk size, index, total number of encrypted octets).
adata := append(aw.associatedData[:], aw.chunkIndex[:]...)
adata = append(adata, make([]byte, 8)...)
binary.BigEndian.PutUint64(adata[13:], uint64(aw.bytesProcessed))
nonce := aw.computeNextNonce()
finalTag := aw.aead.Seal(nil, nonce, nil, adata)
_, err = aw.writer.Write(finalTag)
if err != nil {
return err
}
return aw.writer.Close()
}
// sealChunk Encrypts and authenticates the given chunk.
func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) {
if len(data) > aw.chunkSize {
return nil, errors.AEADError("chunk exceeds maximum length")
}
if aw.associatedData == nil {
return nil, errors.AEADError("can't seal without headers")
}
adata := append(aw.associatedData, aw.chunkIndex...)
nonce := aw.computeNextNonce()
encrypted := aw.aead.Seal(nil, nonce, data, adata)
aw.bytesProcessed += len(data)
if err := aw.aeadCrypter.incrementIndex(); err != nil {
return nil, err
}
return encrypted, nil
}
// openChunk decrypts and checks integrity of an encrypted chunk, returning
// the underlying plaintext and an error. It access peeked bytes from next
// chunk, to identify the last chunk and decrypt/validate accordingly.
func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) {
tagLen := ar.aead.Overhead()
// Restore carried bytes from last call
chunkExtra := append(ar.peekedBytes, data...)
// 'chunk' contains encrypted bytes, followed by an authentication tag.
chunk := chunkExtra[:len(chunkExtra)-tagLen]
ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:]
adata := append(ar.associatedData, ar.chunkIndex...)
nonce := ar.computeNextNonce()
plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata)
if err != nil {
return nil, err
}
ar.bytesProcessed += len(plainChunk)
if err = ar.aeadCrypter.incrementIndex(); err != nil {
return nil, err
}
return plainChunk, nil
}
// Checks the summary tag. It takes into account the total decrypted bytes into
// the associated data. It returns an error, or nil if the tag is valid.
func (ar *aeadDecrypter) validateFinalTag(tag []byte) error {
// Associated: tag, version, cipher, aead, chunk size, index, and octets
amountBytes := make([]byte, 8)
binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed))
adata := append(ar.associatedData, ar.chunkIndex...)
adata = append(adata, amountBytes...)
nonce := ar.computeNextNonce()
_, err := ar.aead.Open(nil, nonce, tag, adata)
if err != nil {
return err
}
return nil
}
// Associated data for chunks: tag, version, cipher, mode, chunk size byte
// associatedData for chunks: tag, version, cipher, mode, chunk size byte
func (ae *AEADEncrypted) associatedData() []byte {
return []byte{
0xD4,
@@ -332,33 +94,3 @@ func (ae *AEADEncrypted) associatedData() []byte {
byte(ae.mode),
ae.chunkSizeByte}
}
// computeNonce takes the incremental index and computes an eXclusive OR with
// the least significant 8 bytes of the receivers' initial nonce (see sec.
// 5.16.1 and 5.16.2). It returns the resulting nonce.
func (wo *aeadCrypter) computeNextNonce() (nonce []byte) {
nonce = make([]byte, len(wo.initialNonce))
copy(nonce, wo.initialNonce)
offset := len(wo.initialNonce) - 8
for i := 0; i < 8; i++ {
nonce[i+offset] ^= wo.chunkIndex[i]
}
return
}
// incrementIndex performs an integer increment by 1 of the integer represented by the
// slice, modifying it accordingly.
func (wo *aeadCrypter) incrementIndex() error {
index := wo.chunkIndex
if len(index) == 0 {
return errors.AEADError("Index has length 0")
}
for i := len(index) - 1; i >= 0; i-- {
if index[i] < 255 {
index[i]++
return nil
}
index[i] = 0
}
return errors.AEADError("cannot further increment index")
}

View File

@@ -10,6 +10,8 @@ import (
"io"
"math/big"
"time"
"github.com/ProtonMail/go-crypto/openpgp/s2k"
)
// Config collects a number of parameters along with sensible defaults.
@@ -33,16 +35,24 @@ type Config struct {
DefaultCompressionAlgo CompressionAlgo
// CompressionConfig configures the compression settings.
CompressionConfig *CompressionConfig
// S2KCount is only used for symmetric encryption. It
// determines the strength of the passphrase stretching when
// S2K (String to Key) config, used for key derivation in the context of secret key encryption
// and password-encrypted data.
// If nil, the default configuration is used
S2KConfig *s2k.Config
// Iteration count for Iterated S2K (String to Key).
// Only used if sk2.Mode is nil.
// This value is duplicated here from s2k.Config for backwards compatibility.
// It determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 1024 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 65536 used. Not all
// should be between 65536 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 16777216 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. When set, it is strongly encrouraged to
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
//
// Deprecated: SK2Count should be configured in S2KConfig instead.
S2KCount int
// RSABits is the number of bits in new RSA keys made with NewEntity.
// If zero, then 2048 bit keys are created.
@@ -94,6 +104,12 @@ type Config struct {
// might be no other way than to tolerate the missing MDC. Setting this flag, allows this
// mode of operation. It should be considered a measure of last resort.
InsecureAllowUnauthenticatedMessages bool
// KnownNotations is a map of Notation Data names to bools, which controls
// the notation names that are allowed to be present in critical Notation Data
// signature subpackets.
KnownNotations map[string]bool
// SignatureNotations is a list of Notations to be added to any signatures.
SignatureNotations []*Notation
}
func (c *Config) Random() io.Reader {
@@ -119,9 +135,9 @@ func (c *Config) Cipher() CipherFunction {
func (c *Config) Now() time.Time {
if c == nil || c.Time == nil {
return time.Now()
return time.Now().Truncate(time.Second)
}
return c.Time()
return c.Time().Truncate(time.Second)
}
// KeyLifetime returns the validity period of the key.
@@ -147,13 +163,6 @@ func (c *Config) Compression() CompressionAlgo {
return c.DefaultCompressionAlgo
}
func (c *Config) PasswordHashIterations() int {
if c == nil || c.S2KCount == 0 {
return 0
}
return c.S2KCount
}
func (c *Config) RSAModulusBits() int {
if c == nil || c.RSABits == 0 {
return 2048
@@ -175,6 +184,27 @@ func (c *Config) CurveName() Curve {
return c.Curve
}
// Deprecated: The hash iterations should now be queried via the S2K() method.
func (c *Config) PasswordHashIterations() int {
if c == nil || c.S2KCount == 0 {
return 0
}
return c.S2KCount
}
func (c *Config) S2K() *s2k.Config {
if c == nil {
return nil
}
// for backwards compatibility
if c != nil && c.S2KCount > 0 && c.S2KConfig == nil {
return &s2k.Config{
S2KCount: c.S2KCount,
}
}
return c.S2KConfig
}
func (c *Config) AEAD() *AEADConfig {
if c == nil {
return nil
@@ -202,3 +232,17 @@ func (c *Config) AllowUnauthenticatedMessages() bool {
}
return c.InsecureAllowUnauthenticatedMessages
}
func (c *Config) KnownNotation(notationName string) bool {
if c == nil {
return false
}
return c.KnownNotations[notationName]
}
func (c *Config) Notations() []*Notation {
if c == nil {
return nil
}
return c.SignatureNotations
}

View File

@@ -25,7 +25,7 @@ const encryptedKeyVersion = 3
type EncryptedKey struct {
KeyId uint64
Algo PublicKeyAlgorithm
CipherFunc CipherFunction // only valid after a successful Decrypt
CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet
Key []byte // only valid after a successful Decrypt
encryptedMPI1, encryptedMPI2 encoding.Field
@@ -123,6 +123,10 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
}
e.CipherFunc = CipherFunction(b[0])
if !e.CipherFunc.IsSupported() {
return errors.UnsupportedError("unsupported encryption function")
}
e.Key = b[1 : len(b)-2]
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
checksum := checksumKeyMaterial(e.Key)

View File

@@ -0,0 +1,29 @@
package packet
// Notation type represents a Notation Data subpacket
// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16
type Notation struct {
Name string
Value []byte
IsCritical bool
IsHumanReadable bool
}
func (notation *Notation) getData() []byte {
nameData := []byte(notation.Name)
nameLen := len(nameData)
valueLen := len(notation.Value)
data := make([]byte, 8+nameLen+valueLen)
if notation.IsHumanReadable {
data[0] = 0x80
}
data[4] = byte(nameLen >> 8)
data[5] = byte(nameLen)
data[6] = byte(valueLen >> 8)
data[7] = byte(valueLen)
copy(data[8:8+nameLen], nameData)
copy(data[8+nameLen:], notation.Value)
return data
}

View File

@@ -8,7 +8,7 @@ import (
"crypto"
"encoding/binary"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"github.com/ProtonMail/go-crypto/openpgp/s2k"
"github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
"io"
"strconv"
)
@@ -37,7 +37,7 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) {
}
var ok bool
ops.Hash, ok = s2k.HashIdToHash(buf[2])
ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
if !ok {
return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
}
@@ -55,7 +55,7 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error {
buf[0] = onePassSignatureVersion
buf[1] = uint8(ops.SigType)
var ok bool
buf[2], ok = s2k.HashToHashId(ops.Hash)
buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash)
if !ok {
return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
}

View File

@@ -84,8 +84,9 @@ func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
// as found in signature and user attribute packets.
type OpaqueSubpacket struct {
SubType uint8
Contents []byte
SubType uint8
EncodedLength []byte // Store the original encoded length for signature verifications.
Contents []byte
}
// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
@@ -109,6 +110,7 @@ func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
// RFC 4880, section 5.2.3.1
var subLen uint32
var encodedLength []byte
if len(contents) < 1 {
goto Truncated
}
@@ -119,6 +121,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke
if len(contents) < subHeaderLen {
goto Truncated
}
encodedLength = contents[0:1]
subLen = uint32(contents[0])
contents = contents[1:]
case contents[0] < 255:
@@ -126,6 +129,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke
if len(contents) < subHeaderLen {
goto Truncated
}
encodedLength = contents[0:2]
subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
contents = contents[2:]
default:
@@ -133,16 +137,19 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke
if len(contents) < subHeaderLen {
goto Truncated
}
encodedLength = contents[0:5]
subLen = uint32(contents[1])<<24 |
uint32(contents[2])<<16 |
uint32(contents[3])<<8 |
uint32(contents[4])
contents = contents[5:]
}
if subLen > uint32(len(contents)) || subLen == 0 {
goto Truncated
}
subPacket.SubType = contents[0]
subPacket.EncodedLength = encodedLength
subPacket.Contents = contents[1:subLen]
return
Truncated:
@@ -152,7 +159,9 @@ Truncated:
func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
buf := make([]byte, 6)
n := serializeSubpacketLength(buf, len(osp.Contents)+1)
copy(buf, osp.EncodedLength)
n := len(osp.EncodedLength)
buf[n] = osp.SubType
if _, err = w.Write(buf[:n+1]); err != nil {
return

View File

@@ -302,21 +302,21 @@ func consumeAll(r io.Reader) (n int64, err error) {
type packetType uint8
const (
packetTypeEncryptedKey packetType = 1
packetTypeSignature packetType = 2
packetTypeSymmetricKeyEncrypted packetType = 3
packetTypeOnePassSignature packetType = 4
packetTypePrivateKey packetType = 5
packetTypePublicKey packetType = 6
packetTypePrivateSubkey packetType = 7
packetTypeCompressed packetType = 8
packetTypeSymmetricallyEncrypted packetType = 9
packetTypeLiteralData packetType = 11
packetTypeUserId packetType = 13
packetTypePublicSubkey packetType = 14
packetTypeUserAttribute packetType = 17
packetTypeSymmetricallyEncryptedMDC packetType = 18
packetTypeAEADEncrypted packetType = 20
packetTypeEncryptedKey packetType = 1
packetTypeSignature packetType = 2
packetTypeSymmetricKeyEncrypted packetType = 3
packetTypeOnePassSignature packetType = 4
packetTypePrivateKey packetType = 5
packetTypePublicKey packetType = 6
packetTypePrivateSubkey packetType = 7
packetTypeCompressed packetType = 8
packetTypeSymmetricallyEncrypted packetType = 9
packetTypeLiteralData packetType = 11
packetTypeUserId packetType = 13
packetTypePublicSubkey packetType = 14
packetTypeUserAttribute packetType = 17
packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18
packetTypeAEADEncrypted packetType = 20
)
// EncryptedDataPacket holds encrypted data. It is currently implemented by
@@ -361,9 +361,9 @@ func Read(r io.Reader) (p Packet, err error) {
p = new(UserId)
case packetTypeUserAttribute:
p = new(UserAttribute)
case packetTypeSymmetricallyEncryptedMDC:
case packetTypeSymmetricallyEncryptedIntegrityProtected:
se := new(SymmetricallyEncrypted)
se.MDC = true
se.IntegrityProtected = true
p = se
case packetTypeAEADEncrypted:
p = new(AEADEncrypted)
@@ -384,18 +384,18 @@ func Read(r io.Reader) (p Packet, err error) {
type SignatureType uint8
const (
SigTypeBinary SignatureType = 0x00
SigTypeText = 0x01
SigTypeGenericCert = 0x10
SigTypePersonaCert = 0x11
SigTypeCasualCert = 0x12
SigTypePositiveCert = 0x13
SigTypeSubkeyBinding = 0x18
SigTypePrimaryKeyBinding = 0x19
SigTypeDirectSignature = 0x1F
SigTypeKeyRevocation = 0x20
SigTypeSubkeyRevocation = 0x28
SigTypeCertificationRevocation = 0x30
SigTypeBinary SignatureType = 0x00
SigTypeText = 0x01
SigTypeGenericCert = 0x10
SigTypePersonaCert = 0x11
SigTypeCasualCert = 0x12
SigTypePositiveCert = 0x13
SigTypeSubkeyBinding = 0x18
SigTypePrimaryKeyBinding = 0x19
SigTypeDirectSignature = 0x1F
SigTypeKeyRevocation = 0x20
SigTypeSubkeyRevocation = 0x28
SigTypeCertificationRevocation = 0x30
)
// PublicKeyAlgorithm represents the different public key system specified for
@@ -455,6 +455,11 @@ func (cipher CipherFunction) KeySize() int {
return algorithm.CipherFunction(cipher).KeySize()
}
// IsSupported returns true if the cipher is supported from the library
func (cipher CipherFunction) IsSupported() bool {
return algorithm.CipherFunction(cipher).KeySize() > 0
}
// blockSize returns the block size, in bytes, of cipher.
func (cipher CipherFunction) blockSize() int {
return algorithm.CipherFunction(cipher).BlockSize()
@@ -490,15 +495,16 @@ const (
// AEADMode represents the different Authenticated Encryption with Associated
// Data specified for OpenPGP.
// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
type AEADMode algorithm.AEADMode
const (
AEADModeEAX AEADMode = 1
AEADModeOCB AEADMode = 2
AEADModeExperimentalGCM AEADMode = 100
AEADModeEAX AEADMode = 1
AEADModeOCB AEADMode = 2
AEADModeGCM AEADMode = 3
)
func (mode AEADMode) NonceLength() int {
func (mode AEADMode) IvLength() int {
return algorithm.AEADMode(mode).NonceLength()
}
@@ -527,13 +533,19 @@ const (
type Curve string
const (
Curve25519 Curve = "Curve25519"
Curve448 Curve = "Curve448"
CurveNistP256 Curve = "P256"
CurveNistP384 Curve = "P384"
CurveNistP521 Curve = "P521"
CurveSecP256k1 Curve = "SecP256k1"
Curve25519 Curve = "Curve25519"
Curve448 Curve = "Curve448"
CurveNistP256 Curve = "P256"
CurveNistP384 Curve = "P384"
CurveNistP521 Curve = "P521"
CurveSecP256k1 Curve = "SecP256k1"
CurveBrainpoolP256 Curve = "BrainpoolP256"
CurveBrainpoolP384 Curve = "BrainpoolP384"
CurveBrainpoolP512 Curve = "BrainpoolP512"
)
// TrustLevel represents a trust level per RFC4880 5.2.3.13
type TrustLevel uint8
// TrustAmount represents a trust amount per RFC4880 5.2.3.13
type TrustAmount uint8

View File

@@ -49,7 +49,7 @@ type PrivateKey struct {
s2kParams *s2k.Params
}
//S2KType s2k packet type
// S2KType s2k packet type
type S2KType uint8
const (
@@ -179,6 +179,9 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) {
return
}
pk.cipher = CipherFunction(buf[0])
if pk.cipher != 0 && !pk.cipher.IsSupported() {
return errors.UnsupportedError("unsupported cipher function in private key")
}
pk.s2kParams, err = s2k.ParseIntoParams(r)
if err != nil {
return
@@ -367,8 +370,8 @@ func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error {
return err
}
// Decrypt decrypts an encrypted private key using a passphrase.
func (pk *PrivateKey) Decrypt(passphrase []byte) error {
// decrypt decrypts an encrypted private key using a decryption key.
func (pk *PrivateKey) decrypt(decryptionKey []byte) error {
if pk.Dummy() {
return errors.ErrDummyPrivateKey("dummy key found")
}
@@ -376,9 +379,7 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error {
return nil
}
key := make([]byte, pk.cipher.KeySize())
pk.s2k(key, passphrase)
block := pk.cipher.new(key)
block := pk.cipher.new(decryptionKey)
cfb := cipher.NewCFBDecrypter(block, pk.iv)
data := make([]byte, len(pk.encryptedData))
@@ -427,35 +428,79 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error {
return nil
}
// Encrypt encrypts an unencrypted private key using a passphrase.
func (pk *PrivateKey) Encrypt(passphrase []byte) error {
func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) error {
if pk.Dummy() {
return errors.ErrDummyPrivateKey("dummy key found")
}
if !pk.Encrypted {
return nil
}
key, err := keyCache.GetOrComputeDerivedKey(passphrase, pk.s2kParams, pk.cipher.KeySize())
if err != nil {
return err
}
return pk.decrypt(key)
}
// Decrypt decrypts an encrypted private key using a passphrase.
func (pk *PrivateKey) Decrypt(passphrase []byte) error {
if pk.Dummy() {
return errors.ErrDummyPrivateKey("dummy key found")
}
if !pk.Encrypted {
return nil
}
key := make([]byte, pk.cipher.KeySize())
pk.s2k(key, passphrase)
return pk.decrypt(key)
}
// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase.
// Avoids recomputation of similar s2k key derivations.
func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error {
// Create a cache to avoid recomputation of key derviations for the same passphrase.
s2kCache := &s2k.Cache{}
for _, key := range keys {
if key != nil && !key.Dummy() && key.Encrypted {
err := key.decryptWithCache(passphrase, s2kCache)
if err != nil {
return err
}
}
}
return nil
}
// encrypt encrypts an unencrypted private key.
func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction CipherFunction) error {
if pk.Dummy() {
return errors.ErrDummyPrivateKey("dummy key found")
}
if pk.Encrypted {
return nil
}
// check if encryptionKey has the correct size
if len(key) != cipherFunction.KeySize() {
return errors.InvalidArgumentError("supplied encryption key has the wrong size")
}
priv := bytes.NewBuffer(nil)
err := pk.serializePrivateKey(priv)
if err != nil {
return err
}
//Default config of private key encryption
pk.cipher = CipherAES256
s2kConfig := &s2k.Config{
S2KMode: 3, //Iterated
S2KCount: 65536,
Hash: crypto.SHA256,
}
pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig)
if err != nil {
return err
}
privateKeyBytes := priv.Bytes()
key := make([]byte, pk.cipher.KeySize())
pk.sha1Checksum = true
pk.cipher = cipherFunction
pk.s2kParams = params
pk.s2k, err = pk.s2kParams.Function()
if err != nil {
return err
}
pk.s2k(key, passphrase)
}
privateKeyBytes := priv.Bytes()
pk.sha1Checksum = true
block := pk.cipher.new(key)
pk.iv = make([]byte, pk.cipher.blockSize())
_, err = rand.Read(pk.iv)
@@ -486,6 +531,62 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error {
return err
}
// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config.
func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error {
params, err := s2k.Generate(config.Random(), config.S2K())
if err != nil {
return err
}
// Derive an encryption key with the configured s2k function.
key := make([]byte, config.Cipher().KeySize())
s2k, err := params.Function()
if err != nil {
return err
}
s2k(key, passphrase)
// Encrypt the private key with the derived encryption key.
return pk.encrypt(key, params, config.Cipher())
}
// EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase.
// Only derives one key from the passphrase, which is then used to encrypt each key.
func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) error {
params, err := s2k.Generate(config.Random(), config.S2K())
if err != nil {
return err
}
// Derive an encryption key with the configured s2k function.
encryptionKey := make([]byte, config.Cipher().KeySize())
s2k, err := params.Function()
if err != nil {
return err
}
s2k(encryptionKey, passphrase)
for _, key := range keys {
if key != nil && !key.Dummy() && !key.Encrypted {
err = key.encrypt(encryptionKey, params, config.Cipher())
if err != nil {
return err
}
}
}
return nil
}
// Encrypt encrypts an unencrypted private key using a passphrase.
func (pk *PrivateKey) Encrypt(passphrase []byte) error {
// Default config of private key encryption
config := &Config{
S2KConfig: &s2k.Config{
S2KMode: s2k.IteratedSaltedS2K,
S2KCount: 65536,
Hash: crypto.SHA256,
} ,
DefaultCipher: CipherAES256,
}
return pk.EncryptWithConfig(passphrase, config)
}
func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) {
switch priv := pk.PrivateKey.(type) {
case *rsa.PrivateKey:

View File

@@ -415,6 +415,10 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) {
return
}
if len(pk.p.Bytes()) == 0 {
return errors.StructuralError("empty EdDSA public key")
}
pub := eddsa.NewPublicKey(c)
switch flag := pk.p.Bytes()[0]; flag {
@@ -596,7 +600,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro
}
signed.Write(sig.HashSuffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) {
return errors.SignatureError("hash tag doesn't match")
}

View File

@@ -17,8 +17,8 @@ import (
"github.com/ProtonMail/go-crypto/openpgp/ecdsa"
"github.com/ProtonMail/go-crypto/openpgp/eddsa"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
"github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
"github.com/ProtonMail/go-crypto/openpgp/s2k"
)
const (
@@ -66,11 +66,24 @@ type Signature struct {
SigLifetimeSecs, KeyLifetimeSecs *uint32
PreferredSymmetric, PreferredHash, PreferredCompression []uint8
PreferredAEAD []uint8
PreferredCipherSuites [][2]uint8
IssuerKeyId *uint64
IssuerFingerprint []byte
SignerUserId *string
IsPrimaryId *bool
Notations []*Notation
// TrustLevel and TrustAmount can be set by the signer to assert that
// the key is not only valid but also trustworthy at the specified
// level.
// See RFC 4880, section 5.2.3.13 for details.
TrustLevel TrustLevel
TrustAmount TrustAmount
// TrustRegularExpression can be used in conjunction with trust Signature
// packets to limit the scope of the trust that is extended.
// See RFC 4880, section 5.2.3.14 for details.
TrustRegularExpression *string
// PolicyURI can be set to the URI of a document that describes the
// policy under which the signature was issued. See RFC 4880, section
@@ -89,8 +102,8 @@ type Signature struct {
// In a self-signature, these flags are set there is a features subpacket
// indicating that the issuer implementation supports these features
// (section 5.2.5.25).
MDC, AEAD, V5Keys bool
// see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket
SEIPDv1, SEIPDv2 bool
// EmbeddedSignature, if non-nil, is a signature of the parent key, by
// this key. This prevents an attacker from claiming another's signing
@@ -126,7 +139,13 @@ func (sig *Signature) parse(r io.Reader) (err error) {
}
var ok bool
sig.Hash, ok = s2k.HashIdToHash(buf[2])
if sig.Version < 5 {
sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
} else {
sig.Hash, ok = algorithm.HashIdToHash(buf[2])
}
if !ok {
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
}
@@ -137,7 +156,11 @@ func (sig *Signature) parse(r io.Reader) (err error) {
if err != nil {
return
}
sig.buildHashSuffix(hashedSubpackets)
err = sig.buildHashSuffix(hashedSubpackets)
if err != nil {
return
}
err = parseSignatureSubpackets(sig, hashedSubpackets, true)
if err != nil {
return
@@ -221,9 +244,12 @@ type signatureSubpacketType uint8
const (
creationTimeSubpacket signatureSubpacketType = 2
signatureExpirationSubpacket signatureSubpacketType = 3
trustSubpacket signatureSubpacketType = 5
regularExpressionSubpacket signatureSubpacketType = 6
keyExpirationSubpacket signatureSubpacketType = 9
prefSymmetricAlgosSubpacket signatureSubpacketType = 11
issuerSubpacket signatureSubpacketType = 16
notationDataSubpacket signatureSubpacketType = 20
prefHashAlgosSubpacket signatureSubpacketType = 21
prefCompressionSubpacket signatureSubpacketType = 22
primaryUserIdSubpacket signatureSubpacketType = 25
@@ -234,7 +260,7 @@ const (
featuresSubpacket signatureSubpacketType = 30
embeddedSignatureSubpacket signatureSubpacketType = 32
issuerFingerprintSubpacket signatureSubpacketType = 33
prefAeadAlgosSubpacket signatureSubpacketType = 34
prefCipherSuitesSubpacket signatureSubpacketType = 39
)
// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
@@ -245,6 +271,10 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
packetType signatureSubpacketType
isCritical bool
)
if len(subpacket) == 0 {
err = errors.StructuralError("zero length signature subpacket")
return
}
switch {
case subpacket[0] < 192:
length = uint32(subpacket[0])
@@ -278,12 +308,14 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
isCritical = subpacket[0]&0x80 == 0x80
subpacket = subpacket[1:]
sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
if !isHashed &&
packetType != issuerSubpacket &&
packetType != issuerFingerprintSubpacket &&
packetType != embeddedSignatureSubpacket {
return
}
switch packetType {
case creationTimeSubpacket:
if !isHashed {
err = errors.StructuralError("signature creation time in non-hashed area")
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("signature creation time not four bytes")
return
@@ -292,20 +324,35 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
sig.CreationTime = time.Unix(int64(t), 0)
case signatureExpirationSubpacket:
// Signature expiration time, section 5.2.3.10
if !isHashed {
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("expiration subpacket with bad length")
return
}
sig.SigLifetimeSecs = new(uint32)
*sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
case keyExpirationSubpacket:
// Key expiration time, section 5.2.3.6
if !isHashed {
case trustSubpacket:
if len(subpacket) != 2 {
err = errors.StructuralError("trust subpacket with bad length")
return
}
// Trust level and amount, section 5.2.3.13
sig.TrustLevel = TrustLevel(subpacket[0])
sig.TrustAmount = TrustAmount(subpacket[1])
case regularExpressionSubpacket:
if len(subpacket) == 0 {
err = errors.StructuralError("regexp subpacket with bad length")
return
}
// Trust regular expression, section 5.2.3.14
// RFC specifies the string should be null-terminated; remove a null byte from the end
if subpacket[len(subpacket)-1] != 0x00 {
err = errors.StructuralError("expected regular expression to be null-terminated")
return
}
trustRegularExpression := string(subpacket[:len(subpacket)-1])
sig.TrustRegularExpression = &trustRegularExpression
case keyExpirationSubpacket:
// Key expiration time, section 5.2.3.6
if len(subpacket) != 4 {
err = errors.StructuralError("key expiration subpacket with bad length")
return
@@ -314,41 +361,52 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
*sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
case prefSymmetricAlgosSubpacket:
// Preferred symmetric algorithms, section 5.2.3.7
if !isHashed {
return
}
sig.PreferredSymmetric = make([]byte, len(subpacket))
copy(sig.PreferredSymmetric, subpacket)
case issuerSubpacket:
// Issuer, section 5.2.3.5
if sig.Version > 4 {
err = errors.StructuralError("issuer subpacket found in v5 key")
return
}
// Issuer, section 5.2.3.5
if len(subpacket) != 8 {
err = errors.StructuralError("issuer subpacket with bad length")
return
}
sig.IssuerKeyId = new(uint64)
*sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
case prefHashAlgosSubpacket:
// Preferred hash algorithms, section 5.2.3.8
if !isHashed {
case notationDataSubpacket:
// Notation data, section 5.2.3.16
if len(subpacket) < 8 {
err = errors.StructuralError("notation data subpacket with bad length")
return
}
nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5])
valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7])
if len(subpacket) != int(nameLength)+int(valueLength)+8 {
err = errors.StructuralError("notation data subpacket with bad length")
return
}
notation := Notation{
IsHumanReadable: (subpacket[0] & 0x80) == 0x80,
Name: string(subpacket[8:(nameLength + 8)]),
Value: subpacket[(nameLength + 8):(valueLength + nameLength + 8)],
IsCritical: isCritical,
}
sig.Notations = append(sig.Notations, &notation)
case prefHashAlgosSubpacket:
// Preferred hash algorithms, section 5.2.3.8
sig.PreferredHash = make([]byte, len(subpacket))
copy(sig.PreferredHash, subpacket)
case prefCompressionSubpacket:
// Preferred compression algorithms, section 5.2.3.9
if !isHashed {
return
}
sig.PreferredCompression = make([]byte, len(subpacket))
copy(sig.PreferredCompression, subpacket)
case primaryUserIdSubpacket:
// Primary User ID, section 5.2.3.19
if !isHashed {
return
}
if len(subpacket) != 1 {
err = errors.StructuralError("primary user id subpacket with bad length")
return
@@ -359,9 +417,6 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
}
case keyFlagsSubpacket:
// Key flags, section 5.2.3.21
if !isHashed {
return
}
if len(subpacket) == 0 {
err = errors.StructuralError("empty key flags subpacket")
return
@@ -393,9 +448,6 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
sig.SignerUserId = &userId
case reasonForRevocationSubpacket:
// Reason For Revocation, section 5.2.3.23
if !isHashed {
return
}
if len(subpacket) == 0 {
err = errors.StructuralError("empty revocation reason subpacket")
return
@@ -407,18 +459,13 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
// Features subpacket, section 5.2.3.24 specifies a very general
// mechanism for OpenPGP implementations to signal support for new
// features.
if !isHashed {
return
}
if len(subpacket) > 0 {
if subpacket[0]&0x01 != 0 {
sig.MDC = true
sig.SEIPDv1 = true
}
if subpacket[0]&0x02 != 0 {
sig.AEAD = true
}
if subpacket[0]&0x04 != 0 {
sig.V5Keys = true
// 0x02 and 0x04 are reserved
if subpacket[0]&0x08 != 0 {
sig.SEIPDv2 = true
}
}
case embeddedSignatureSubpacket:
@@ -441,11 +488,12 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
}
case policyUriSubpacket:
// Policy URI, section 5.2.3.20
if !isHashed {
return
}
sig.PolicyURI = string(subpacket)
case issuerFingerprintSubpacket:
if len(subpacket) == 0 {
err = errors.StructuralError("empty issuer fingerprint subpacket")
return
}
v, l := subpacket[0], len(subpacket[1:])
if v == 5 && l != 32 || v != 5 && l != 20 {
return nil, errors.StructuralError("bad fingerprint length")
@@ -458,13 +506,19 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
} else {
*sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21])
}
case prefAeadAlgosSubpacket:
// Preferred symmetric algorithms, section 5.2.3.8
if !isHashed {
case prefCipherSuitesSubpacket:
// Preferred AEAD cipher suites
// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites
if len(subpacket)%2 != 0 {
err = errors.StructuralError("invalid aead cipher suite length")
return
}
sig.PreferredAEAD = make([]byte, len(subpacket))
copy(sig.PreferredAEAD, subpacket)
sig.PreferredCipherSuites = make([][2]byte, len(subpacket)/2)
for i := 0; i < len(subpacket)/2; i++ {
sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]}
}
default:
if isCritical {
err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
@@ -562,7 +616,15 @@ func (sig *Signature) SigExpired(currentTime time.Time) bool {
// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) {
hash, ok := s2k.HashToHashId(sig.Hash)
var hashId byte
var ok bool
if sig.Version < 5 {
hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash)
} else {
hashId, ok = algorithm.HashToHashId(sig.Hash)
}
if !ok {
sig.HashSuffix = nil
return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
@@ -572,7 +634,7 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) {
uint8(sig.Version),
uint8(sig.SigType),
uint8(sig.PubKeyAlgo),
uint8(hash),
uint8(hashId),
uint8(len(hashedSubpackets) >> 8),
uint8(len(hashedSubpackets)),
})
@@ -842,7 +904,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
if sig.IssuerKeyId != nil && sig.Version == 4 {
keyId := make([]byte, 8)
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId})
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
}
if sig.IssuerFingerprint != nil {
contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...)
@@ -885,23 +947,40 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
}
for _, notation := range sig.Notations {
subpackets = append(
subpackets,
outputSubpacket{
true,
notationDataSubpacket,
notation.IsCritical,
notation.getData(),
})
}
// The following subpackets may only appear in self-signatures.
var features = byte(0x00)
if sig.MDC {
if sig.SEIPDv1 {
features |= 0x01
}
if sig.AEAD {
features |= 0x02
}
if sig.V5Keys {
features |= 0x04
if sig.SEIPDv2 {
features |= 0x08
}
if features != 0x00 {
subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}})
}
if sig.TrustLevel != 0 {
subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}})
}
if sig.TrustRegularExpression != nil {
// RFC specifies the string should be null-terminated; add a null byte to the end
subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")})
}
if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
keyLifetime := make([]byte, 4)
binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
@@ -928,8 +1007,13 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)})
}
if len(sig.PreferredAEAD) > 0 {
subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD})
if len(sig.PreferredCipherSuites) > 0 {
serialized := make([]byte, len(sig.PreferredCipherSuites)*2)
for i, cipherSuite := range sig.PreferredCipherSuites {
serialized[2*i] = cipherSuite[0]
serialized[2*i+1] = cipherSuite[1]
}
subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized})
}
// Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23.
@@ -971,7 +1055,7 @@ func (sig *Signature) AddMetadataToHashSuffix() {
n := sig.HashSuffix[len(sig.HashSuffix)-8:]
l := uint64(
uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 |
uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7]))
uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7]))
suffix := bytes.NewBuffer(nil)
suffix.Write(sig.HashSuffix[:l])

View File

@@ -14,8 +14,8 @@ import (
"github.com/ProtonMail/go-crypto/openpgp/s2k"
)
// This is the largest session key that we'll support. Since no 512-bit cipher
// has even been seriously used, this is comfortably large.
// This is the largest session key that we'll support. Since at most 256-bit cipher
// is supported in OpenPGP, this is large enough to contain also the auth tag.
const maxSessionKeySizeInBytes = 64
// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
@@ -25,13 +25,16 @@ type SymmetricKeyEncrypted struct {
CipherFunc CipherFunction
Mode AEADMode
s2k func(out, in []byte)
aeadNonce []byte
encryptedKey []byte
iv []byte
encryptedKey []byte // Contains also the authentication tag for AEAD
}
// parse parses an SymmetricKeyEncrypted packet as specified in
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses
func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
// RFC 4880, section 5.3.
var buf [2]byte
var buf [1]byte
// Version
if _, err := readFull(r, buf[:]); err != nil {
return err
}
@@ -39,17 +42,22 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
if ske.Version != 4 && ske.Version != 5 {
return errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
}
ske.CipherFunc = CipherFunction(buf[1])
if ske.CipherFunc.KeySize() == 0 {
return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
// Cipher function
if _, err := readFull(r, buf[:]); err != nil {
return err
}
ske.CipherFunc = CipherFunction(buf[0])
if !ske.CipherFunc.IsSupported() {
return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0])))
}
if ske.Version == 5 {
mode := make([]byte, 1)
if _, err := r.Read(mode); err != nil {
// AEAD mode
if _, err := readFull(r, buf[:]); err != nil {
return errors.StructuralError("cannot read AEAD octet from packet")
}
ske.Mode = AEADMode(mode[0])
ske.Mode = AEADMode(buf[0])
}
var err error
@@ -61,13 +69,14 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
}
if ske.Version == 5 {
// AEAD nonce
nonce := make([]byte, ske.Mode.NonceLength())
_, err := readFull(r, nonce)
if err != nil && err != io.ErrUnexpectedEOF {
return err
// AEAD IV
iv := make([]byte, ske.Mode.IvLength())
_, err := readFull(r, iv)
if err != nil {
return errors.StructuralError("cannot read AEAD IV")
}
ske.aeadNonce = nonce
ske.iv = iv
}
encryptedKey := make([]byte, maxSessionKeySizeInBytes)
@@ -128,11 +137,10 @@ func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction,
}
func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) {
blockCipher := CipherFunction(ske.CipherFunc).new(key)
aead := ske.Mode.new(blockCipher)
adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)}
plaintextKey, err := aead.Open(nil, ske.aeadNonce, ske.encryptedKey, adata)
aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata)
plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata)
if err != nil {
return nil, err
}
@@ -142,17 +150,12 @@ func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) {
// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w.
// The packet contains a random session key, encrypted by a key derived from
// the given passphrase. The session key is returned and must be passed to
// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on
// whether config.AEADConfig != nil.
// SerializeSymmetricallyEncrypted.
// If config is nil, sensible defaults will be used.
func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
cipherFunc := config.Cipher()
keySize := cipherFunc.KeySize()
if keySize == 0 {
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
}
sessionKey := make([]byte, keySize)
sessionKey := make([]byte, cipherFunc.KeySize())
_, err = io.ReadFull(config.Random(), sessionKey)
if err != nil {
return
@@ -169,9 +172,8 @@ func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Conf
// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w.
// The packet contains the given session key, encrypted by a key derived from
// the given passphrase. The session key must be passed to
// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on
// whether config.AEADConfig != nil.
// the given passphrase. The returned session key must be passed to
// SerializeSymmetricallyEncrypted.
// If config is nil, sensible defaults will be used.
func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) {
var version int
@@ -181,16 +183,17 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass
version = 4
}
cipherFunc := config.Cipher()
keySize := cipherFunc.KeySize()
if keySize == 0 {
return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
// cipherFunc must be AES
if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 {
return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc)))
}
keySize := cipherFunc.KeySize()
s2kBuf := new(bytes.Buffer)
keyEncryptingKey := make([]byte, keySize)
// s2k.Serialize salts and stretches the passphrase, and writes the
// resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, config.S2K())
if err != nil {
return
}
@@ -201,20 +204,20 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass
case 4:
packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
case 5:
nonceLen := config.AEAD().Mode().NonceLength()
ivLen := config.AEAD().Mode().IvLength()
tagLen := config.AEAD().Mode().TagLength()
packetLength = 3 + len(s2kBytes) + nonceLen + keySize + tagLen
packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen
}
err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
if err != nil {
return
}
buf := make([]byte, 2)
// Symmetric Key Encrypted Version
buf[0] = byte(version)
buf := []byte{byte(version)}
// Cipher function
buf[1] = byte(cipherFunc)
buf = append(buf, byte(cipherFunc))
if version == 5 {
// AEAD mode
@@ -241,19 +244,20 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass
return
}
case 5:
blockCipher := cipherFunc.new(keyEncryptingKey)
mode := config.AEAD().Mode()
aead := mode.new(blockCipher)
// Sample nonce using random reader
nonce := make([]byte, config.AEAD().Mode().NonceLength())
_, err = io.ReadFull(config.Random(), nonce)
adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)}
aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata)
// Sample iv using random reader
iv := make([]byte, config.AEAD().Mode().IvLength())
_, err = io.ReadFull(config.Random(), iv)
if err != nil {
return
}
// Seal and write (encryptedData includes auth. tag)
adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)}
encryptedData := aead.Seal(nil, nonce, sessionKey, adata)
_, err = w.Write(nonce)
encryptedData := aead.Seal(nil, iv, sessionKey, adata)
_, err = w.Write(iv)
if err != nil {
return
}
@@ -265,3 +269,8 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass
return
}
func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) {
blockCipher := c.new(inputKey)
return mode.new(blockCipher)
}

View File

@@ -5,36 +5,54 @@
package packet
import (
"crypto/cipher"
"crypto/sha1"
"crypto/subtle"
"hash"
"io"
"strconv"
"github.com/ProtonMail/go-crypto/openpgp/errors"
)
const aeadSaltSize = 32
// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
// encrypted Contents will consist of more OpenPGP packets. See RFC 4880,
// sections 5.7 and 5.13.
type SymmetricallyEncrypted struct {
MDC bool // true iff this is a type 18 packet and thus has an embedded MAC.
Contents io.Reader
prefix []byte
Version int
Contents io.Reader // contains tag for version 2
IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9
// Specific to version 1
prefix []byte
// Specific to version 2
Cipher CipherFunction
Mode AEADMode
ChunkSizeByte byte
Salt [aeadSaltSize]byte
}
const symmetricallyEncryptedVersion = 1
const (
symmetricallyEncryptedVersionMdc = 1
symmetricallyEncryptedVersionAead = 2
)
func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
if se.MDC {
if se.IntegrityProtected {
// See RFC 4880, section 5.13.
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
if buf[0] != symmetricallyEncryptedVersion {
switch buf[0] {
case symmetricallyEncryptedVersionMdc:
se.Version = symmetricallyEncryptedVersionMdc
case symmetricallyEncryptedVersionAead:
se.Version = symmetricallyEncryptedVersionAead
if err := se.parseAead(r); err != nil {
return err
}
default:
return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
}
}
@@ -46,245 +64,27 @@ func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
// packet can be read. An incorrect key will only be detected after trying
// to decrypt the entire data.
func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
keySize := c.KeySize()
if keySize == 0 {
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
}
if len(key) != keySize {
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
if se.Version == symmetricallyEncryptedVersionAead {
return se.decryptAead(key)
}
if se.prefix == nil {
se.prefix = make([]byte, c.blockSize()+2)
_, err := readFull(se.Contents, se.prefix)
if err != nil {
return nil, err
}
} else if len(se.prefix) != c.blockSize()+2 {
return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
}
ocfbResync := OCFBResync
if se.MDC {
// MDC packets use a different form of OCFB mode.
ocfbResync = OCFBNoResync
}
s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
plaintext := cipher.StreamReader{S: s, R: se.Contents}
if se.MDC {
// MDC packets have an embedded hash that we need to check.
h := sha1.New()
h.Write(se.prefix)
return &seMDCReader{in: plaintext, h: h}, nil
}
// Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
return seReader{plaintext}, nil
}
// seReader wraps an io.Reader with a no-op Close method.
type seReader struct {
in io.Reader
}
func (ser seReader) Read(buf []byte) (int, error) {
return ser.in.Read(buf)
}
func (ser seReader) Close() error {
return nil
}
const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
// MDC packet containing a hash of the previous Contents which is checked
// against the running hash. See RFC 4880, section 5.13.
type seMDCReader struct {
in io.Reader
h hash.Hash
trailer [mdcTrailerSize]byte
scratch [mdcTrailerSize]byte
trailerUsed int
error bool
eof bool
}
func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
if ser.error {
err = io.ErrUnexpectedEOF
return
}
if ser.eof {
err = io.EOF
return
}
// If we haven't yet filled the trailer buffer then we must do that
// first.
for ser.trailerUsed < mdcTrailerSize {
n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
ser.trailerUsed += n
if err == io.EOF {
if ser.trailerUsed != mdcTrailerSize {
n = 0
err = io.ErrUnexpectedEOF
ser.error = true
return
}
ser.eof = true
n = 0
return
}
if err != nil {
n = 0
return
}
}
// If it's a short read then we read into a temporary buffer and shift
// the data into the caller's buffer.
if len(buf) <= mdcTrailerSize {
n, err = readFull(ser.in, ser.scratch[:len(buf)])
copy(buf, ser.trailer[:n])
ser.h.Write(buf[:n])
copy(ser.trailer[:], ser.trailer[n:])
copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
if n < len(buf) {
ser.eof = true
err = io.EOF
}
return
}
n, err = ser.in.Read(buf[mdcTrailerSize:])
copy(buf, ser.trailer[:])
ser.h.Write(buf[:n])
copy(ser.trailer[:], buf[n:])
if err == io.EOF {
ser.eof = true
}
return
}
// This is a new-format packet tag byte for a type 19 (MDC) packet.
const mdcPacketTagByte = byte(0x80) | 0x40 | 19
func (ser *seMDCReader) Close() error {
if ser.error {
return errors.ErrMDCMissing
}
for !ser.eof {
// We haven't seen EOF so we need to read to the end
var buf [1024]byte
_, err := ser.Read(buf[:])
if err == io.EOF {
break
}
if err != nil {
return errors.ErrMDCMissing
}
}
ser.h.Write(ser.trailer[:2])
final := ser.h.Sum(nil)
if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
return errors.ErrMDCHashMismatch
}
// The hash already includes the MDC header, but we still check its value
// to confirm encryption correctness
if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
return errors.ErrMDCMissing
}
return nil
}
// An seMDCWriter writes through to an io.WriteCloser while maintains a running
// hash of the data written. On close, it emits an MDC packet containing the
// running hash.
type seMDCWriter struct {
w io.WriteCloser
h hash.Hash
}
func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
w.h.Write(buf)
return w.w.Write(buf)
}
func (w *seMDCWriter) Close() (err error) {
var buf [mdcTrailerSize]byte
buf[0] = mdcPacketTagByte
buf[1] = sha1.Size
w.h.Write(buf[:2])
digest := w.h.Sum(nil)
copy(buf[2:], digest)
_, err = w.w.Write(buf[:])
if err != nil {
return
}
return w.w.Close()
}
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
type noOpCloser struct {
w io.Writer
}
func (c noOpCloser) Write(data []byte) (n int, err error) {
return c.w.Write(data)
}
func (c noOpCloser) Close() error {
return nil
return se.decryptMdc(c, key)
}
// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
// to w and returns a WriteCloser to which the to-be-encrypted packets can be
// written.
// If config is nil, sensible defaults will be used.
func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) {
if c.KeySize() != len(key) {
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
}
func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) {
writeCloser := noOpCloser{w}
ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected)
if err != nil {
return
}
_, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion})
if err != nil {
return
if aeadSupported {
return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key)
}
block := c.new(key)
blockSize := block.BlockSize()
iv := make([]byte, blockSize)
_, err = config.Random().Read(iv)
if err != nil {
return
}
s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
_, err = ciphertext.Write(prefix)
if err != nil {
return
}
plaintext := cipher.StreamWriter{S: s, W: ciphertext}
h := sha1.New()
h.Write(iv)
h.Write(iv[blockSize-2:])
Contents = &seMDCWriter{w: plaintext, h: h}
return
return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config)
}

View File

@@ -0,0 +1,156 @@
// Copyright 2023 Proton AG. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/cipher"
"crypto/sha256"
"io"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"golang.org/x/crypto/hkdf"
)
// parseAead parses a V2 SEIPD packet (AEAD) as specified in
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error {
headerData := make([]byte, 3)
if n, err := io.ReadFull(r, headerData); n < 3 {
return errors.StructuralError("could not read aead header: " + err.Error())
}
// Cipher
se.Cipher = CipherFunction(headerData[0])
// cipherFunc must have block size 16 to use AEAD
if se.Cipher.blockSize() != 16 {
return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher))
}
// Mode
se.Mode = AEADMode(headerData[1])
if se.Mode.TagLength() == 0 {
return errors.UnsupportedError("unknown aead mode: " + string(se.Mode))
}
// Chunk size
se.ChunkSizeByte = headerData[2]
if se.ChunkSizeByte > 16 {
return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte))
}
// Salt
if n, err := io.ReadFull(r, se.Salt[:]); n < aeadSaltSize {
return errors.StructuralError("could not read aead salt: " + err.Error())
}
return nil
}
// associatedData for chunks: tag, version, cipher, mode, chunk size byte
func (se *SymmetricallyEncrypted) associatedData() []byte {
return []byte{
0xD2,
symmetricallyEncryptedVersionAead,
byte(se.Cipher),
byte(se.Mode),
se.ChunkSizeByte,
}
}
// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) {
aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData())
// Carry the first tagLen bytes
tagLen := se.Mode.TagLength()
peekedBytes := make([]byte, tagLen)
n, err := io.ReadFull(se.Contents, peekedBytes)
if n < tagLen || (err != nil && err != io.EOF) {
return nil, errors.StructuralError("not enough data to decrypt:" + err.Error())
}
return &aeadDecrypter{
aeadCrypter: aeadCrypter{
aead: aead,
chunkSize: decodeAEADChunkSize(se.ChunkSizeByte),
initialNonce: nonce,
associatedData: se.associatedData(),
chunkIndex: make([]byte, 8),
packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
},
reader: se.Contents,
peekedBytes: peekedBytes,
}, nil
}
// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) {
// cipherFunc must have block size 16 to use AEAD
if cipherSuite.Cipher.blockSize() != 16 {
return nil, errors.InvalidArgumentError("invalid aead cipher function")
}
if cipherSuite.Cipher.KeySize() != len(inputKey) {
return nil, errors.InvalidArgumentError("error in aead serialization: bad key length")
}
// Data for en/decryption: tag, version, cipher, aead mode, chunk size
prefix := []byte{
0xD2,
symmetricallyEncryptedVersionAead,
byte(cipherSuite.Cipher),
byte(cipherSuite.Mode),
chunkSizeByte,
}
// Write header (that correspond to prefix except first byte)
n, err := ciphertext.Write(prefix[1:])
if err != nil || n < 4 {
return nil, err
}
// Random salt
salt := make([]byte, aeadSaltSize)
if _, err := rand.Read(salt); err != nil {
return nil, err
}
if _, err := ciphertext.Write(salt); err != nil {
return nil, err
}
aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix)
return &aeadEncrypter{
aeadCrypter: aeadCrypter{
aead: aead,
chunkSize: decodeAEADChunkSize(chunkSizeByte),
associatedData: prefix,
chunkIndex: make([]byte, 8),
initialNonce: nonce,
packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
},
writer: ciphertext,
}, nil
}
func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) {
hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData)
encryptionKey := make([]byte, c.KeySize())
_, _ = readFull(hkdfReader, encryptionKey)
// Last 64 bits of nonce are the counter
nonce = make([]byte, mode.IvLength()-8)
_, _ = readFull(hkdfReader, nonce)
blockCipher := c.new(encryptionKey)
aead = mode.new(blockCipher)
return
}

View File

@@ -0,0 +1,256 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/cipher"
"crypto/sha1"
"crypto/subtle"
"hash"
"io"
"strconv"
"github.com/ProtonMail/go-crypto/openpgp/errors"
)
// seMdcReader wraps an io.Reader with a no-op Close method.
type seMdcReader struct {
in io.Reader
}
func (ser seMdcReader) Read(buf []byte) (int, error) {
return ser.in.Read(buf)
}
func (ser seMdcReader) Close() error {
return nil
}
func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) {
if !c.IsSupported() {
return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c)))
}
if len(key) != c.KeySize() {
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
}
if se.prefix == nil {
se.prefix = make([]byte, c.blockSize()+2)
_, err := readFull(se.Contents, se.prefix)
if err != nil {
return nil, err
}
} else if len(se.prefix) != c.blockSize()+2 {
return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
}
ocfbResync := OCFBResync
if se.IntegrityProtected {
// MDC packets use a different form of OCFB mode.
ocfbResync = OCFBNoResync
}
s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
plaintext := cipher.StreamReader{S: s, R: se.Contents}
if se.IntegrityProtected {
// IntegrityProtected packets have an embedded hash that we need to check.
h := sha1.New()
h.Write(se.prefix)
return &seMDCReader{in: plaintext, h: h}, nil
}
// Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
return seMdcReader{plaintext}, nil
}
const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
// MDC packet containing a hash of the previous Contents which is checked
// against the running hash. See RFC 4880, section 5.13.
type seMDCReader struct {
in io.Reader
h hash.Hash
trailer [mdcTrailerSize]byte
scratch [mdcTrailerSize]byte
trailerUsed int
error bool
eof bool
}
func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
if ser.error {
err = io.ErrUnexpectedEOF
return
}
if ser.eof {
err = io.EOF
return
}
// If we haven't yet filled the trailer buffer then we must do that
// first.
for ser.trailerUsed < mdcTrailerSize {
n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
ser.trailerUsed += n
if err == io.EOF {
if ser.trailerUsed != mdcTrailerSize {
n = 0
err = io.ErrUnexpectedEOF
ser.error = true
return
}
ser.eof = true
n = 0
return
}
if err != nil {
n = 0
return
}
}
// If it's a short read then we read into a temporary buffer and shift
// the data into the caller's buffer.
if len(buf) <= mdcTrailerSize {
n, err = readFull(ser.in, ser.scratch[:len(buf)])
copy(buf, ser.trailer[:n])
ser.h.Write(buf[:n])
copy(ser.trailer[:], ser.trailer[n:])
copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
if n < len(buf) {
ser.eof = true
err = io.EOF
}
return
}
n, err = ser.in.Read(buf[mdcTrailerSize:])
copy(buf, ser.trailer[:])
ser.h.Write(buf[:n])
copy(ser.trailer[:], buf[n:])
if err == io.EOF {
ser.eof = true
}
return
}
// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet.
const mdcPacketTagByte = byte(0x80) | 0x40 | 19
func (ser *seMDCReader) Close() error {
if ser.error {
return errors.ErrMDCMissing
}
for !ser.eof {
// We haven't seen EOF so we need to read to the end
var buf [1024]byte
_, err := ser.Read(buf[:])
if err == io.EOF {
break
}
if err != nil {
return errors.ErrMDCMissing
}
}
ser.h.Write(ser.trailer[:2])
final := ser.h.Sum(nil)
if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
return errors.ErrMDCHashMismatch
}
// The hash already includes the MDC header, but we still check its value
// to confirm encryption correctness
if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
return errors.ErrMDCMissing
}
return nil
}
// An seMDCWriter writes through to an io.WriteCloser while maintains a running
// hash of the data written. On close, it emits an MDC packet containing the
// running hash.
type seMDCWriter struct {
w io.WriteCloser
h hash.Hash
}
func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
w.h.Write(buf)
return w.w.Write(buf)
}
func (w *seMDCWriter) Close() (err error) {
var buf [mdcTrailerSize]byte
buf[0] = mdcPacketTagByte
buf[1] = sha1.Size
w.h.Write(buf[:2])
digest := w.h.Sum(nil)
copy(buf[2:], digest)
_, err = w.w.Write(buf[:])
if err != nil {
return
}
return w.w.Close()
}
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
type noOpCloser struct {
w io.Writer
}
func (c noOpCloser) Write(data []byte) (n int, err error) {
return c.w.Write(data)
}
func (c noOpCloser) Close() error {
return nil
}
func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) {
// Disallow old cipher suites
if !c.IsSupported() || c < CipherAES128 {
return nil, errors.InvalidArgumentError("invalid mdc cipher function")
}
if c.KeySize() != len(key) {
return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length")
}
_, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc})
if err != nil {
return
}
block := c.new(key)
blockSize := block.BlockSize()
iv := make([]byte, blockSize)
_, err = config.Random().Read(iv)
if err != nil {
return
}
s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
_, err = ciphertext.Write(prefix)
if err != nil {
return
}
plaintext := cipher.StreamWriter{S: s, W: ciphertext}
h := sha1.New()
h.Write(iv)
h.Write(iv[blockSize-2:])
Contents = &seMDCWriter{w: plaintext, h: h}
return
}

View File

@@ -42,9 +42,16 @@ func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error
if err = jpeg.Encode(&buf, photo, nil); err != nil {
return
}
lengthBuf := make([]byte, 5)
n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1)
lengthBuf = lengthBuf[:n]
uat.Contents = append(uat.Contents, &OpaqueSubpacket{
SubType: UserAttrImageSubpacket,
Contents: buf.Bytes()})
SubType: UserAttrImageSubpacket,
EncodedLength: lengthBuf,
Contents: buf.Bytes(),
})
}
return
}

View File

@@ -8,13 +8,16 @@ package openpgp // import "github.com/ProtonMail/go-crypto/openpgp"
import (
"crypto"
_ "crypto/sha256"
_ "crypto/sha512"
"hash"
"io"
"strconv"
"github.com/ProtonMail/go-crypto/openpgp/armor"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
"github.com/ProtonMail/go-crypto/openpgp/packet"
_ "golang.org/x/crypto/sha3"
)
// SignatureType is the armor type for a PGP signature.
@@ -131,8 +134,8 @@ ParsePackets:
}
}
case *packet.SymmetricallyEncrypted:
if !p.MDC && !config.AllowUnauthenticatedMessages() {
return nil, errors.UnsupportedError("message is not authenticated")
if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() {
return nil, errors.UnsupportedError("message is not integrity protected")
}
edp = p
break ParsePackets
@@ -208,13 +211,11 @@ FindKey:
if len(symKeys) != 0 && passphrase != nil {
for _, s := range symKeys {
key, cipherFunc, err := s.Decrypt(passphrase)
// On wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc:
// In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc:
// only for < 5% of cases we will proceed to decrypt the data
if err == nil {
decrypted, err = edp.Decrypt(cipherFunc, key)
// TODO: ErrKeyIncorrect is no longer thrown on SEIP decryption,
// but it might still be relevant for when we implement AEAD decryption (otherwise, remove?)
if err != nil && err != errors.ErrKeyIncorrect {
if err != nil {
return nil, err
}
if decrypted != nil {
@@ -304,14 +305,14 @@ FindLiteralData:
// should be preprocessed (i.e. to normalize line endings). Thus this function
// returns two hashes. The second should be used to hash the message itself and
// performs any needed preprocessing.
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
if hashId == crypto.MD5 {
return nil, nil, errors.UnsupportedError("insecure hash algorithm: MD5")
func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok {
return nil, nil, errors.UnsupportedError("unsupported hash function")
}
if !hashId.Available() {
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
if !hashFunc.Available() {
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc)))
}
h := hashId.New()
h := hashFunc.New()
switch sigType {
case packet.SigTypeBinary:
@@ -383,19 +384,7 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) {
key := scr.md.SignedBy
signatureError := key.PublicKey.VerifySignature(scr.h, sig)
if signatureError == nil {
now := scr.config.Now()
if key.Revoked(now) ||
key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key)
key.Entity.PrimaryIdentity().Revoked(now) {
signatureError = errors.ErrKeyRevoked
}
if sig.SigExpired(now) {
signatureError = errors.ErrSignatureExpired
}
if key.PublicKey.KeyExpired(key.SelfSignature, now) ||
key.SelfSignature.SigExpired(now) {
signatureError = errors.ErrKeyExpired
}
signatureError = checkSignatureDetails(key, sig, scr.config)
}
scr.md.Signature = sig
scr.md.SignatureError = signatureError
@@ -434,8 +423,24 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) {
return n, nil
}
// VerifyDetachedSignature takes a signed file and a detached signature and
// returns the signature packet and the entity the signature was signed by,
// if any, and a possible signature verification error.
// If the signer isn't known, ErrUnknownIssuer is returned.
func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
var expectedHashes []crypto.Hash
return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config)
}
// VerifyDetachedSignatureAndHash performs the same actions as
// VerifyDetachedSignature and checks that the expected hash functions were used.
func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config)
}
// CheckDetachedSignature takes a signed file and a detached signature and
// returns the signer if the signature is valid. If the signer isn't known,
// returns the entity the signature was signed by, if any, and a possible
// signature verification error. If the signer isn't known,
// ErrUnknownIssuer is returned.
func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) {
var expectedHashes []crypto.Hash
@@ -445,6 +450,11 @@ func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config
// CheckDetachedSignatureAndHash performs the same actions as
// CheckDetachedSignature and checks that the expected hash functions were used.
func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) {
_, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config)
return
}
func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
var issuerKeyId uint64
var hashFunc crypto.Hash
var sigType packet.SignatureType
@@ -453,23 +463,22 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader,
expectedHashesLen := len(expectedHashes)
packets := packet.NewReader(signature)
var sig *packet.Signature
for {
p, err = packets.Next()
if err == io.EOF {
return nil, errors.ErrUnknownIssuer
return nil, nil, errors.ErrUnknownIssuer
}
if err != nil {
return nil, err
return nil, nil, err
}
var ok bool
sig, ok = p.(*packet.Signature)
if !ok {
return nil, errors.StructuralError("non signature packet found")
return nil, nil, errors.StructuralError("non signature packet found")
}
if sig.IssuerKeyId == nil {
return nil, errors.StructuralError("signature doesn't have an issuer")
return nil, nil, errors.StructuralError("signature doesn't have an issuer")
}
issuerKeyId = *sig.IssuerKeyId
hashFunc = sig.Hash
@@ -480,7 +489,7 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader,
break
}
if i+1 == expectedHashesLen {
return nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers")
return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers")
}
}
@@ -496,34 +505,21 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader,
h, wrappedHash, err := hashForSignature(hashFunc, sigType)
if err != nil {
return nil, err
return nil, nil, err
}
if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
return nil, err
return nil, nil, err
}
for _, key := range keys {
err = key.PublicKey.VerifySignature(h, sig)
if err == nil {
now := config.Now()
if key.Revoked(now) ||
key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key)
key.Entity.PrimaryIdentity().Revoked(now) {
return key.Entity, errors.ErrKeyRevoked
}
if sig.SigExpired(now) {
return key.Entity, errors.ErrSignatureExpired
}
if key.PublicKey.KeyExpired(key.SelfSignature, now) ||
key.SelfSignature.SigExpired(now) {
return key.Entity, errors.ErrKeyExpired
}
return key.Entity, nil
return sig, key.Entity, checkSignatureDetails(&key, sig, config)
}
}
return nil, err
return nil, nil, err
}
// CheckArmoredDetachedSignature performs the same actions as
@@ -536,3 +532,61 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader,
return CheckDetachedSignature(keyring, signed, body, config)
}
// checkSignatureDetails returns an error if:
// - The signature (or one of the binding signatures mentioned below)
// has a unknown critical notation data subpacket
// - The primary key of the signing entity is revoked
// - The primary identity is revoked
// - The signature is expired
// - The primary key of the signing entity is expired according to the
// primary identity binding signature
//
// ... or, if the signature was signed by a subkey and:
// - The signing subkey is revoked
// - The signing subkey is expired according to the subkey binding signature
// - The signing subkey binding signature is expired
// - The signing subkey cross-signature is expired
//
// NOTE: The order of these checks is important, as the caller may choose to
// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never
// ignore any other errors.
//
// TODO: Also return an error if:
// - The primary key is expired according to a direct-key signature
// - (For V5 keys only:) The direct-key signature (exists and) is expired
func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error {
now := config.Now()
primaryIdentity := key.Entity.PrimaryIdentity()
signedBySubKey := key.PublicKey != key.Entity.PrimaryKey
sigsToCheck := []*packet.Signature{signature, primaryIdentity.SelfSignature}
if signedBySubKey {
sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature)
}
for _, sig := range sigsToCheck {
for _, notation := range sig.Notations {
if notation.IsCritical && !config.KnownNotation(notation.Name) {
return errors.SignatureError("unknown critical notation: " + notation.Name)
}
}
}
if key.Entity.Revoked(now) || // primary key is revoked
(signedBySubKey && key.Revoked(now)) || // subkey is revoked
primaryIdentity.Revoked(now) { // primary identity is revoked
return errors.ErrKeyRevoked
}
if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired
return errors.ErrKeyExpired
}
if signedBySubKey {
if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired
return errors.ErrKeyExpired
}
}
for _, sig := range sigsToCheck {
if sig.SigExpired(now) { // any of the relevant signatures are expired
return errors.ErrSignatureExpired
}
}
return nil
}

View File

@@ -1,8 +1,8 @@
package openpgp
const testKey1KeyId = 0xA34D7E18C20C31BB
const testKey3KeyId = 0x338934250CCC0360
const testKeyP256KeyId = 0xd44a2c495918513e
const testKey1KeyId uint64 = 0xA34D7E18C20C31BB
const testKey3KeyId uint64 = 0x338934250CCC0360
const testKeyP256KeyId uint64 = 0xd44a2c495918513e
const signedInput = "Signed message\nline 2\nline 3\n"
const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n"
@@ -106,7 +106,7 @@ const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6
const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101`
const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000`
@@ -171,3 +171,104 @@ y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv
UQdl5MlBka1QSNbMq2Bz7XwNPg4=
=6lbM
-----END PGP MESSAGE-----`
const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK-----
xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm
bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN
aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8
nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl
+bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J
BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK
chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG
ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw
FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln
cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+
s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh
6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ
sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz
dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt
YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ
1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i
aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr
fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF
gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q
Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj
jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF
qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH
dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI
zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/
0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8
9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x
Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH
UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7
/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ
nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl
owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM
GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu
a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0
M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD
lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5
01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb
hW1Hj9AO8lzggBQ=
=Nt+N
-----END PGP PUBLIC KEY BLOCK-----
`
const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE-----
wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8
N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3
AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI
NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv
KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y
EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN
AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb
UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB
ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+
7A5CBid5
=aQkm
-----END PGP SIGNATURE-----
`
const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE-----
owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x
U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM
f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10
2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n
vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA=
=fRXs
-----END PGP MESSAGE-----`
const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK-----
mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+
fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5
GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0
JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS
YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6
AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki
Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf
9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa
JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag
Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr
woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb
LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA
SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP
GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2
bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X
W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD
AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY
hz3tYjKhoFTKEIq3y3Pp
=h/aX
-----END PGP PUBLIC KEY BLOCK-----`

View File

@@ -3,7 +3,8 @@
// license that can be found in the LICENSE file.
// Package s2k implements the various OpenPGP string-to-key transforms as
// specified in RFC 4800 section 3.7.1.
// specified in RFC 4800 section 3.7.1, and Argon2 specified in
// draft-ietf-openpgp-crypto-refresh-08 section 3.7.1.4.
package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k"
import (
@@ -14,70 +15,47 @@ import (
"github.com/ProtonMail/go-crypto/openpgp/errors"
"github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
"golang.org/x/crypto/argon2"
)
// Config collects configuration parameters for s2k key-stretching
// transformations. A nil *Config is valid and results in all default
// values. Currently, Config is used only by the Serialize function in
// this package.
type Config struct {
// S2KMode is the mode of s2k function.
// It can be 0 (simple), 1(salted), 3(iterated)
// 2(reserved) 100-110(private/experimental).
S2KMode uint8
// Hash is the default hash function to be used. If
// nil, SHA256 is used.
Hash crypto.Hash
// S2KCount is only used for symmetric encryption. It
// determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 65536 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 16777216 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. See RFC 4880 Section 3.7.1.3.
S2KCount int
}
type Mode uint8
// Defines the default S2KMode constants
//
// 0 (simple), 1(salted), 3(iterated), 4(argon2)
const (
SimpleS2K Mode = 0
SaltedS2K Mode = 1
IteratedSaltedS2K Mode = 3
Argon2S2K Mode = 4
GnuS2K Mode = 101
)
const Argon2SaltSize int = 16
// Params contains all the parameters of the s2k packet
type Params struct {
// mode is the mode of s2k function.
// It can be 0 (simple), 1(salted), 3(iterated)
// 2(reserved) 100-110(private/experimental).
mode uint8
mode Mode
// hashId is the ID of the hash function used in any of the modes
hashId byte
// salt is a byte array to use as a salt in hashing process
salt []byte
// salt is a byte array to use as a salt in hashing process or argon2
saltBytes [Argon2SaltSize]byte
// countByte is used to determine how many rounds of hashing are to
// be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3.
countByte byte
}
func (c *Config) hash() crypto.Hash {
if c == nil || uint(c.Hash) == 0 {
return crypto.SHA256
}
return c.Hash
}
// EncodedCount get encoded count
func (c *Config) EncodedCount() uint8 {
if c == nil || c.S2KCount == 0 {
return 224 // The common case. Corresponding to 16777216
}
i := c.S2KCount
switch {
case i < 65536:
i = 65536
case i > 65011712:
i = 65011712
}
return encodeCount(i)
// passes is a parameter in Argon2 to determine the number of iterations
// See RFC the crypto refresh Section 3.7.1.4.
passes byte
// parallelism is a parameter in Argon2 to determine the degree of paralellism
// See RFC the crypto refresh Section 3.7.1.4.
parallelism byte
// memoryExp is a parameter in Argon2 to determine the memory usage
// i.e., 2 ** memoryExp kibibytes
// See RFC the crypto refresh Section 3.7.1.4.
memoryExp byte
}
// encodeCount converts an iterative "count" in the range 1024 to
@@ -106,6 +84,31 @@ func decodeCount(c uint8) int {
return (16 + int(c&15)) << (uint32(c>>4) + 6)
}
// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to
// 2**31, inclusive, to an encoded memory. The return value is the
// octet that is actually stored in the GPG file. encodeMemory panics
// if is not in the above range
// See OpenPGP crypto refresh Section 3.7.1.4.
func encodeMemory(memory uint32, parallelism uint8) uint8 {
if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) {
panic("Memory argument memory is outside the required range")
}
for exp := 3; exp < 31; exp++ {
compare := decodeMemory(uint8(exp))
if compare >= memory {
return uint8(exp)
}
}
return 31
}
// decodeMemory computes the decoded memory in kibibytes as 2**memoryExponent
func decodeMemory(memoryExponent uint8) uint32 {
return uint32(1) << memoryExponent
}
// Simple writes to out the result of computing the Simple S2K function (RFC
// 4880, section 3.7.1.1) using the given hash and input passphrase.
func Simple(out []byte, h hash.Hash, in []byte) {
@@ -169,25 +172,53 @@ func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
}
}
// Argon2 writes to out the key derived from the password (in) with the Argon2
// function (the crypto refresh, section 3.7.1.4)
func Argon2(out []byte, in []byte, salt []byte, passes uint8, paralellism uint8, memoryExp uint8) {
key := argon2.IDKey(in, salt, uint32(passes), decodeMemory(memoryExp), paralellism, uint32(len(out)))
copy(out[:], key)
}
// Generate generates valid parameters from given configuration.
// It will enforce salted + hashed s2k method
// It will enforce the Iterated and Salted or Argon2 S2K method.
func Generate(rand io.Reader, c *Config) (*Params, error) {
hashId, ok := HashToHashId(c.Hash)
if !ok {
return nil, errors.UnsupportedError("no such hash")
}
var params *Params
if c != nil && c.Mode() == Argon2S2K {
// handle Argon2 case
argonConfig := c.Argon2()
params = &Params{
mode: Argon2S2K,
passes: argonConfig.Passes(),
parallelism: argonConfig.Parallelism(),
memoryExp: argonConfig.EncodedMemory(),
}
} else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy
hashId, ok := algorithm.HashToHashId(c.hash())
if !ok {
return nil, errors.UnsupportedError("no such hash")
}
params := &Params{
mode: 3, // Enforce iterared + salted method
hashId: hashId,
salt: make([]byte, 8),
countByte: c.EncodedCount(),
params = &Params{
mode: SaltedS2K,
hashId: hashId,
}
} else { // Enforce IteratedSaltedS2K method otherwise
hashId, ok := algorithm.HashToHashId(c.hash())
if !ok {
return nil, errors.UnsupportedError("no such hash")
}
if c != nil {
c.S2KMode = IteratedSaltedS2K
}
params = &Params{
mode: IteratedSaltedS2K,
hashId: hashId,
countByte: c.EncodedCount(),
}
}
if _, err := io.ReadFull(rand, params.salt); err != nil {
if _, err := io.ReadFull(rand, params.salt()); err != nil {
return nil, err
}
return params, nil
}
@@ -207,45 +238,60 @@ func Parse(r io.Reader) (f func(out, in []byte), err error) {
// ParseIntoParams reads a binary specification for a string-to-key
// transformation from r and returns a struct describing the s2k parameters.
func ParseIntoParams(r io.Reader) (params *Params, err error) {
var buf [9]byte
var buf [Argon2SaltSize + 3]byte
_, err = io.ReadFull(r, buf[:2])
_, err = io.ReadFull(r, buf[:1])
if err != nil {
return
}
params = &Params{
mode: buf[0],
hashId: buf[1],
mode: Mode(buf[0]),
}
switch params.mode {
case 0:
return params, nil
case 1:
_, err = io.ReadFull(r, buf[:8])
case SimpleS2K:
_, err = io.ReadFull(r, buf[:1])
if err != nil {
return nil, err
}
params.salt = buf[:8]
params.hashId = buf[0]
return params, nil
case 3:
case SaltedS2K:
_, err = io.ReadFull(r, buf[:9])
if err != nil {
return nil, err
}
params.salt = buf[:8]
params.countByte = buf[8]
params.hashId = buf[0]
copy(params.salt(), buf[1:9])
return params, nil
case 101:
// This is a GNU extension. See
// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
if _, err = io.ReadFull(r, buf[:4]); err != nil {
case IteratedSaltedS2K:
_, err = io.ReadFull(r, buf[:10])
if err != nil {
return nil, err
}
if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 {
params.hashId = buf[0]
copy(params.salt(), buf[1:9])
params.countByte = buf[9]
return params, nil
case Argon2S2K:
_, err = io.ReadFull(r, buf[:Argon2SaltSize+3])
if err != nil {
return nil, err
}
copy(params.salt(), buf[:Argon2SaltSize])
params.passes = buf[Argon2SaltSize]
params.parallelism = buf[Argon2SaltSize+1]
params.memoryExp = buf[Argon2SaltSize+2]
return params, nil
case GnuS2K:
// This is a GNU extension. See
// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
if _, err = io.ReadFull(r, buf[:5]); err != nil {
return nil, err
}
params.hashId = buf[0]
if buf[1] == 'G' && buf[2] == 'N' && buf[3] == 'U' && buf[4] == 1 {
return params, nil
}
return nil, errors.UnsupportedError("GNU S2K extension")
@@ -255,39 +301,56 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) {
}
func (params *Params) Dummy() bool {
return params != nil && params.mode == 101
return params != nil && params.mode == GnuS2K
}
func (params *Params) salt() []byte {
switch params.mode {
case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8]
case Argon2S2K: return params.saltBytes[:Argon2SaltSize]
default: return nil
}
}
func (params *Params) Function() (f func(out, in []byte), err error) {
if params.Dummy() {
return nil, errors.ErrDummyPrivateKey("dummy key found")
}
hashObj, ok := HashIdToHash(params.hashId)
if !ok {
return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId)))
}
if !hashObj.Available() {
return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj)))
var hashObj crypto.Hash
if params.mode != Argon2S2K {
var ok bool
hashObj, ok = algorithm.HashIdToHashWithSha1(params.hashId)
if !ok {
return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId)))
}
if !hashObj.Available() {
return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj)))
}
}
switch params.mode {
case 0:
case SimpleS2K:
f := func(out, in []byte) {
Simple(out, hashObj.New(), in)
}
return f, nil
case 1:
case SaltedS2K:
f := func(out, in []byte) {
Salted(out, hashObj.New(), in, params.salt)
Salted(out, hashObj.New(), in, params.salt())
}
return f, nil
case 3:
case IteratedSaltedS2K:
f := func(out, in []byte) {
Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte))
Iterated(out, hashObj.New(), in, params.salt(), decodeCount(params.countByte))
}
return f, nil
case Argon2S2K:
f := func(out, in []byte) {
Argon2(out, in, params.salt(), params.passes, params.parallelism, params.memoryExp)
}
return f, nil
}
@@ -295,23 +358,28 @@ func (params *Params) Function() (f func(out, in []byte), err error) {
}
func (params *Params) Serialize(w io.Writer) (err error) {
if _, err = w.Write([]byte{params.mode}); err != nil {
if _, err = w.Write([]byte{uint8(params.mode)}); err != nil {
return
}
if _, err = w.Write([]byte{params.hashId}); err != nil {
return
if params.mode != Argon2S2K {
if _, err = w.Write([]byte{params.hashId}); err != nil {
return
}
}
if params.Dummy() {
_, err = w.Write(append([]byte("GNU"), 1))
return
}
if params.mode > 0 {
if _, err = w.Write(params.salt); err != nil {
if _, err = w.Write(params.salt()); err != nil {
return
}
if params.mode == 3 {
if params.mode == IteratedSaltedS2K {
_, err = w.Write([]byte{params.countByte})
}
if params.mode == Argon2S2K {
_, err = w.Write([]byte{params.passes, params.parallelism, params.memoryExp})
}
}
return
}
@@ -337,31 +405,3 @@ func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Co
f(key, passphrase)
return nil
}
// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
// hash id.
func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
if hash, ok := algorithm.HashById[id]; ok {
return hash.HashFunc(), true
}
return 0, false
}
// HashIdToString returns the name of the hash function corresponding to the
// given OpenPGP hash id.
func HashIdToString(id byte) (name string, ok bool) {
if hash, ok := algorithm.HashById[id]; ok {
return hash.String(), true
}
return "", false
}
// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash.
func HashToHashId(h crypto.Hash) (id byte, ok bool) {
for id, hash := range algorithm.HashById {
if hash.HashFunc() == h {
return id, true
}
}
return 0, false
}

View File

@@ -0,0 +1,26 @@
package s2k
// Cache stores keys derived with s2k functions from one passphrase
// to avoid recomputation if multiple items are encrypted with
// the same parameters.
type Cache map[Params][]byte
// GetOrComputeDerivedKey tries to retrieve the key
// for the given s2k parameters from the cache.
// If there is no hit, it derives the key with the s2k function from the passphrase,
// updates the cache, and returns the key.
func (c *Cache) GetOrComputeDerivedKey(passphrase []byte, params *Params, expectedKeySize int) ([]byte, error) {
key, found := (*c)[*params]
if !found || len(key) != expectedKeySize {
var err error
derivedKey := make([]byte, expectedKeySize)
s2k, err := params.Function()
if err != nil {
return nil, err
}
s2k(derivedKey, passphrase)
(*c)[*params] = key
return derivedKey, nil
}
return key, nil
}

View File

@@ -0,0 +1,129 @@
package s2k
import "crypto"
// Config collects configuration parameters for s2k key-stretching
// transformations. A nil *Config is valid and results in all default
// values.
type Config struct {
// S2K (String to Key) mode, used for key derivation in the context of secret key encryption
// and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used.
// If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true,
// s2k.SaltedS2K can also be used.
// Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it
//(pending standardisation).
// 0 (simple), 1(salted), 3(iterated), 4(argon2)
// 2(reserved) 100-110(private/experimental).
S2KMode Mode
// Only relevant if S2KMode is not set to s2k.Argon2S2K.
// Hash is the default hash function to be used. If
// nil, SHA256 is used.
Hash crypto.Hash
// Argon2 parameters for S2K (String to Key).
// Only relevant if S2KMode is set to s2k.Argon2S2K.
// If nil, default parameters are used.
// For more details on the choice of parameters, see https://tools.ietf.org/html/rfc9106#section-4.
Argon2Config *Argon2Config
// Only relevant if S2KMode is set to s2k.IteratedSaltedS2K.
// Iteration count for Iterated S2K (String to Key). It
// determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 65536 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 16777216 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. When set, it is strongly encrouraged to
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
S2KCount int
// Indicates whether the passphrase passed by the application is a
// high-entropy key (e.g. it's randomly generated or derived from
// another passphrase using a strong key derivation function).
// When true, allows the S2KMode to be s2k.SaltedS2K.
// When the passphrase is not a high-entropy key, using SaltedS2K is
// insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08.
PassphraseIsHighEntropy bool
}
// Argon2Config stores the Argon2 parameters
// A nil *Argon2Config is valid and results in all default
type Argon2Config struct {
NumberOfPasses uint8
DegreeOfParallelism uint8
// The memory parameter for Argon2 specifies desired memory usage in kibibytes.
// For example memory=64*1024 sets the memory cost to ~64 MB.
Memory uint32
}
func (c *Config) Mode() Mode {
if c == nil {
return IteratedSaltedS2K
}
return c.S2KMode
}
func (c *Config) hash() crypto.Hash {
if c == nil || uint(c.Hash) == 0 {
return crypto.SHA256
}
return c.Hash
}
func (c *Config) Argon2() *Argon2Config {
if c == nil || c.Argon2Config == nil {
return nil
}
return c.Argon2Config
}
// EncodedCount get encoded count
func (c *Config) EncodedCount() uint8 {
if c == nil || c.S2KCount == 0 {
return 224 // The common case. Corresponding to 16777216
}
i := c.S2KCount
switch {
case i < 65536:
i = 65536
case i > 65011712:
i = 65011712
}
return encodeCount(i)
}
func (c *Argon2Config) Passes() uint8 {
if c == nil || c.NumberOfPasses == 0 {
return 3
}
return c.NumberOfPasses
}
func (c *Argon2Config) Parallelism() uint8 {
if c == nil || c.DegreeOfParallelism == 0 {
return 4
}
return c.DegreeOfParallelism
}
func (c *Argon2Config) EncodedMemory() uint8 {
if c == nil || c.Memory == 0 {
return 16 // 64 MiB of RAM
}
memory := c.Memory
lowerBound := uint32(c.Parallelism())*8
upperBound := uint32(2147483648)
switch {
case memory < lowerBound:
memory = lowerBound
case memory > upperBound:
memory = upperBound
}
return encodeMemory(memory, c.Parallelism())
}

View File

@@ -13,8 +13,8 @@ import (
"github.com/ProtonMail/go-crypto/openpgp/armor"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
"github.com/ProtonMail/go-crypto/openpgp/packet"
"github.com/ProtonMail/go-crypto/openpgp/s2k"
)
// DetachSign signs message with the private key from signer (which must
@@ -70,15 +70,11 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S
if signingKey.PrivateKey.Encrypted {
return errors.InvalidArgumentError("signing key is encrypted")
}
if _, ok := algorithm.HashToHashId(config.Hash()); !ok {
return errors.InvalidArgumentError("invalid hash function")
}
sig := new(packet.Signature)
sig.SigType = sigType
sig.PubKeyAlgo = signingKey.PrivateKey.PubKeyAlgo
sig.Hash = config.Hash()
sig.CreationTime = config.Now()
sigLifetimeSecs := config.SigLifetime()
sig.SigLifetimeSecs = &sigLifetimeSecs
sig.IssuerKeyId = &signingKey.PrivateKey.KeyId
sig := createSignaturePacket(signingKey.PublicKey, sigType, config)
h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
if err != nil {
@@ -125,16 +121,13 @@ func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHi
}
var w io.WriteCloser
if config.AEAD() != nil {
w, err = packet.SerializeAEADEncrypted(ciphertext, key, config.Cipher(), config.AEAD().Mode(), config)
if err != nil {
return
}
} else {
w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config)
if err != nil {
return
}
cipherSuite := packet.CipherSuite{
Cipher: config.Cipher(),
Mode: config.AEAD().Mode(),
}
w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config)
if err != nil {
return
}
literalData := w
@@ -173,8 +166,25 @@ func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
return a[:j]
}
// intersectPreferences mutates and returns a prefix of a that contains only
// the values in the intersection of a and b. The order of a is preserved.
func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) {
var j int
for _, v := range a {
for _, v2 := range b {
if v[0] == v2[0] && v[1] == v2[1] {
a[j] = v
j++
break
}
}
}
return a[:j]
}
func hashToHashId(h crypto.Hash) uint8 {
v, ok := s2k.HashToHashId(h)
v, ok := algorithm.HashToHashId(h)
if !ok {
panic("tried to convert unknown hash")
}
@@ -240,7 +250,7 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
@@ -249,7 +259,7 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
@@ -258,7 +268,7 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit
if hash == 0 {
hashId := candidateHashes[0]
name, ok := s2k.HashIdToString(hashId)
name, ok := algorithm.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
@@ -329,39 +339,39 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En
// These are the possible ciphers that we'll use for the message.
candidateCiphers := []uint8{
uint8(packet.CipherAES128),
uint8(packet.CipherAES256),
uint8(packet.CipherCAST5),
uint8(packet.CipherAES128),
}
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
hashToHashId(crypto.SHA3_256),
hashToHashId(crypto.SHA3_512),
}
candidateAeadModes := []uint8{
uint8(packet.AEADModeEAX),
uint8(packet.AEADModeOCB),
uint8(packet.AEADModeExperimentalGCM),
// Prefer GCM if everyone supports it
candidateCipherSuites := [][2]uint8{
{uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)},
{uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)},
{uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)},
{uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)},
{uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)},
{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)},
}
candidateCompression := []uint8{
uint8(packet.CompressionNone),
uint8(packet.CompressionZIP),
uint8(packet.CompressionZLIB),
}
// In the event that a recipient doesn't specify any supported ciphers
// or hash functions, these are the ones that we assume that every
// implementation supports.
defaultCiphers := candidateCiphers[0:1]
defaultHashes := candidateHashes[0:1]
defaultAeadModes := candidateAeadModes[0:1]
defaultCompression := candidateCompression[0:1]
encryptKeys := make([]Key, len(to))
// AEAD is used only if every key supports it.
aeadSupported := true
// AEAD is used only if config enables it and every key supports it
aeadSupported := config.AEAD() != nil
for i := range to {
var ok bool
@@ -371,38 +381,37 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En
}
sig := to[i].PrimaryIdentity().SelfSignature
if sig.AEAD == false {
if !sig.SEIPDv2 {
aeadSupported = false
}
preferredSymmetric := sig.PreferredSymmetric
if len(preferredSymmetric) == 0 {
preferredSymmetric = defaultCiphers
}
preferredHashes := sig.PreferredHash
if len(preferredHashes) == 0 {
preferredHashes = defaultHashes
}
preferredAeadModes := sig.PreferredAEAD
if len(preferredAeadModes) == 0 {
preferredAeadModes = defaultAeadModes
}
preferredCompression := sig.PreferredCompression
if len(preferredCompression) == 0 {
preferredCompression = defaultCompression
}
candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
candidateAeadModes = intersectPreferences(candidateAeadModes, preferredAeadModes)
candidateCompression = intersectPreferences(candidateCompression, preferredCompression)
candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric)
candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash)
candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites)
candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression)
}
if len(candidateCiphers) == 0 || len(candidateHashes) == 0 || len(candidateAeadModes) == 0 {
return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
// In the event that the intersection of supported algorithms is empty we use the ones
// labelled as MUST that every implementation supports.
if len(candidateCiphers) == 0 {
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3
candidateCiphers = []uint8{uint8(packet.CipherAES128)}
}
if len(candidateHashes) == 0 {
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos
candidateHashes = []uint8{hashToHashId(crypto.SHA256)}
}
if len(candidateCipherSuites) == 0 {
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}}
}
cipher := packet.CipherFunction(candidateCiphers[0])
mode := packet.AEADMode(candidateAeadModes[0])
aeadCipherSuite := packet.CipherSuite{
Cipher: packet.CipherFunction(candidateCipherSuites[0][0]),
Mode: packet.AEADMode(candidateCipherSuites[0][1]),
}
// If the cipher specified by config is a candidate, we'll use that.
configuredCipher := config.Cipher()
for _, c := range candidateCiphers {
@@ -425,17 +434,11 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En
}
var payload io.WriteCloser
if config.AEAD() != nil && aeadSupported {
payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config)
if err != nil {
return
}
} else {
payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, symKey, config)
if err != nil {
return
}
payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config)
if err != nil {
return
}
payload, err = handleCompression(payload, candidateCompression, config)
if err != nil {
return nil, err
@@ -458,8 +461,8 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
hashToHashId(crypto.SHA3_256),
hashToHashId(crypto.SHA3_512),
}
defaultHashes := candidateHashes[0:1]
preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash
@@ -502,15 +505,9 @@ func (s signatureWriter) Write(data []byte) (int, error) {
}
func (s signatureWriter) Close() error {
sig := &packet.Signature{
Version: s.signer.Version,
SigType: s.sigType,
PubKeyAlgo: s.signer.PubKeyAlgo,
Hash: s.hashType,
CreationTime: s.config.Now(),
IssuerKeyId: &s.signer.KeyId,
Metadata: s.metadata,
}
sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config)
sig.Hash = s.hashType
sig.Metadata = s.metadata
if err := sig.Sign(s.h, s.signer, s.config); err != nil {
return err
@@ -524,6 +521,21 @@ func (s signatureWriter) Close() error {
return s.encryptedData.Close()
}
func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature {
sigLifetimeSecs := config.SigLifetime()
return &packet.Signature{
Version: signer.Version,
SigType: sigType,
PubKeyAlgo: signer.PubKeyAlgo,
Hash: config.Hash(),
CreationTime: config.Now(),
IssuerKeyId: &signer.KeyId,
IssuerFingerprint: signer.Fingerprint,
Notations: config.Notations(),
SigLifetimeSecs: &sigLifetimeSecs,
}
}
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
// TODO: we have two of these in OpenPGP packages alone. This probably needs
// to be promoted somewhere more common.
@@ -545,6 +557,9 @@ func handleCompression(compressed io.WriteCloser, candidateCompression []uint8,
if confAlgo == packet.CompressionNone {
return
}
// Set algorithm labelled as MUST as fallback
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4
finalAlgo := packet.CompressionNone
// if compression specified by config available we will use it
for _, c := range candidateCompression {

View File

@@ -1,42 +0,0 @@
# bufpipe: Buffered Pipe
[![CircleCI](https://img.shields.io/circleci/build/github/acomagu/bufpipe.svg?style=flat-square)](https://circleci.com/gh/acomagu/bufpipe) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/acomagu/bufpipe)
The buffered version of io.Pipe. It's safe for concurrent use.
## How does it differ from io.Pipe?
Writes never block because the pipe has variable-sized buffer.
```Go
r, w := bufpipe.New(nil)
io.WriteString(w, "abc") // No blocking.
io.WriteString(w, "def") // No blocking, too.
w.Close()
io.Copy(os.Stdout, r)
// Output: abcdef
```
[Playground](https://play.golang.org/p/PdyBAS3pVob)
## How does it differ from bytes.Buffer?
Reads block if the internal buffer is empty until the writer is closed.
```Go
r, w := bufpipe.New(nil)
done := make(chan struct{})
go func() {
io.Copy(os.Stdout, r) // The reads block until the writer is closed.
done <- struct{}{}
}()
io.WriteString(w, "abc")
io.WriteString(w, "def")
w.Close()
<-done
// Output: abcdef
```
[Playground](https://play.golang.org/p/UppmyLeRgX6)

View File

@@ -1,128 +0,0 @@
package bufpipe
import (
"bytes"
"errors"
"io"
"sync"
)
// ErrClosedPipe is the error used for read or write operations on a closed pipe.
var ErrClosedPipe = errors.New("bufpipe: read/write on closed pipe")
type pipe struct {
cond *sync.Cond
buf *bytes.Buffer
rerr, werr error
}
// A PipeReader is the read half of a pipe.
type PipeReader struct {
*pipe
}
// A PipeWriter is the write half of a pipe.
type PipeWriter struct {
*pipe
}
// New creates a synchronous pipe using buf as its initial contents. It can be
// used to connect code expecting an io.Reader with code expecting an io.Writer.
//
// Unlike io.Pipe, writes never block because the internal buffer has variable
// size. Reads block only when the buffer is empty.
//
// It is safe to call Read and Write in parallel with each other or with Close.
// Parallel calls to Read and parallel calls to Write are also safe: the
// individual calls will be gated sequentially.
//
// The new pipe takes ownership of buf, and the caller should not use buf after
// this call. New is intended to prepare a PipeReader to read existing data. It
// can also be used to set the initial size of the internal buffer for writing.
// To do that, buf should have the desired capacity but a length of zero.
func New(buf []byte) (*PipeReader, *PipeWriter) {
p := &pipe{
buf: bytes.NewBuffer(buf),
cond: sync.NewCond(new(sync.Mutex)),
}
return &PipeReader{
pipe: p,
}, &PipeWriter{
pipe: p,
}
}
// Read implements the standard Read interface: it reads data from the pipe,
// reading from the internal buffer, otherwise blocking until a writer arrives
// or the write end is closed. If the write end is closed with an error, that
// error is returned as err; otherwise err is io.EOF.
func (r *PipeReader) Read(data []byte) (int, error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
RETRY:
n, err := r.buf.Read(data)
// If not closed and no read, wait for writing.
if err == io.EOF && r.rerr == nil && n == 0 {
r.cond.Wait()
goto RETRY
}
if err == io.EOF {
return n, r.rerr
}
return n, err
}
// Close closes the reader; subsequent writes from the write half of the pipe
// will return error ErrClosedPipe.
func (r *PipeReader) Close() error {
return r.CloseWithError(nil)
}
// CloseWithError closes the reader; subsequent writes to the write half of the
// pipe will return the error err.
func (r *PipeReader) CloseWithError(err error) error {
r.cond.L.Lock()
defer r.cond.L.Unlock()
if err == nil {
err = ErrClosedPipe
}
r.werr = err
return nil
}
// Write implements the standard Write interface: it writes data to the internal
// buffer. If the read end is closed with an error, that err is returned as err;
// otherwise err is ErrClosedPipe.
func (w *PipeWriter) Write(data []byte) (int, error) {
w.cond.L.Lock()
defer w.cond.L.Unlock()
if w.werr != nil {
return 0, w.werr
}
n, err := w.buf.Write(data)
w.cond.Signal()
return n, err
}
// Close closes the writer; subsequent reads from the read half of the pipe will
// return io.EOF once the internal buffer get empty.
func (w *PipeWriter) Close() error {
return w.CloseWithError(nil)
}
// Close closes the writer; subsequent reads from the read half of the pipe will
// return err once the internal buffer get empty.
func (w *PipeWriter) CloseWithError(err error) error {
w.cond.L.Lock()
defer w.cond.L.Unlock()
if err == nil {
err = io.EOF
}
w.rerr = err
return nil
}

View File

@@ -1,2 +0,0 @@
// Package bufpipe provides a IO pipe, has variable-sized buffer.
package bufpipe

28
vendor/github.com/cyphar/filepath-securejoin/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,28 @@
Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
Copyright (C) 2017 SUSE LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

79
vendor/github.com/cyphar/filepath-securejoin/README.md generated vendored Normal file
View File

@@ -0,0 +1,79 @@
## `filepath-securejoin` ##
[![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml)
An implementation of `SecureJoin`, a [candidate for inclusion in the Go
standard library][go#20126]. The purpose of this function is to be a "secure"
alternative to `filepath.Join`, and in particular it provides certain
guarantees that are not provided by `filepath.Join`.
> **NOTE**: This code is *only* safe if you are not at risk of other processes
> modifying path components after you've used `SecureJoin`. If it is possible
> for a malicious process to modify path components of the resolved path, then
> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There
> are some Linux kernel patches I'm working on which might allow for a better
> solution.][lwn-obeneath]
>
> In addition, with a slightly modified API it might be possible to use
> `O_PATH` and verify that the opened path is actually the resolved one -- but
> I have not done that yet. I might add it in the future as a helper function
> to help users verify the path (we can't just return `/proc/self/fd/<foo>`
> because that doesn't always work transparently for all users).
This is the function prototype:
```go
func SecureJoin(root, unsafePath string) (string, error)
```
This library **guarantees** the following:
* If no error is set, the resulting string **must** be a child path of
`root` and will not contain any symlink path components (they will all be
expanded).
* When expanding symlinks, all symlink path components **must** be resolved
relative to the provided root. In particular, this can be considered a
userspace implementation of how `chroot(2)` operates on file paths. Note that
these symlinks will **not** be expanded lexically (`filepath.Clean` is not
called on the input before processing).
* Non-existent path components are unaffected by `SecureJoin` (similar to
`filepath.EvalSymlinks`'s semantics).
* The returned path will always be `filepath.Clean`ed and thus not contain any
`..` components.
A (trivial) implementation of this function on GNU/Linux systems could be done
with the following (note that this requires root privileges and is far more
opaque than the implementation in this library, and also requires that
`readlink` is inside the `root` path):
```go
package securejoin
import (
"os/exec"
"path/filepath"
)
func SecureJoin(root, unsafePath string) (string, error) {
unsafePath = string(filepath.Separator) + unsafePath
cmd := exec.Command("chroot", root,
"readlink", "--canonicalize-missing", "--no-newline", unsafePath)
output, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
expanded := string(output)
return filepath.Join(root, expanded), nil
}
```
[lwn-obeneath]: https://lwn.net/Articles/767547/
[go#20126]: https://github.com/golang/go/issues/20126
### License ###
The license of this project is the same as Go, which is a BSD 3-clause license
available in the `LICENSE` file.

1
vendor/github.com/cyphar/filepath-securejoin/VERSION generated vendored Normal file
View File

@@ -0,0 +1 @@
0.2.4

125
vendor/github.com/cyphar/filepath-securejoin/join.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package securejoin is an implementation of the hopefully-soon-to-be-included
// SecureJoin helper that is meant to be part of the "path/filepath" package.
// The purpose of this project is to provide a PoC implementation to make the
// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
// tangible.
package securejoin
import (
"bytes"
"errors"
"os"
"path/filepath"
"strings"
"syscall"
)
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
// effectively a more broad version of os.IsNotExist.
func IsNotExist(err error) bool {
// Check that it's not actually an ENOTDIR, which in some cases is a more
// convoluted case of ENOENT (usually involving weird paths).
return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
}
// SecureJoinVFS joins the two given path components (similar to Join) except
// that the returned path is guaranteed to be scoped inside the provided root
// path (when evaluated). Any symbolic links in the path are evaluated with the
// given root treated as the root of the filesystem, similar to a chroot. The
// filesystem state is evaluated through the given VFS interface (if nil, the
// standard os.* family of functions are used).
//
// Note that the guarantees provided by this function only apply if the path
// components in the returned string are not modified (in other words are not
// replaced with symlinks on the filesystem) after this function has returned.
// Such a symlink race is necessarily out-of-scope of SecureJoin.
//
// Volume names in unsafePath are always discarded, regardless if they are
// provided via direct input or when evaluating symlinks. Therefore:
//
// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt"
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
// Use the os.* VFS implementation if none was specified.
if vfs == nil {
vfs = osVFS{}
}
unsafePath = filepath.FromSlash(unsafePath)
var path bytes.Buffer
n := 0
for unsafePath != "" {
if n > 255 {
return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP}
}
if v := filepath.VolumeName(unsafePath); v != "" {
unsafePath = unsafePath[len(v):]
}
// Next path component, p.
i := strings.IndexRune(unsafePath, filepath.Separator)
var p string
if i == -1 {
p, unsafePath = unsafePath, ""
} else {
p, unsafePath = unsafePath[:i], unsafePath[i+1:]
}
// Create a cleaned path, using the lexical semantics of /../a, to
// create a "scoped" path component which can safely be joined to fullP
// for evaluation. At this point, path.String() doesn't contain any
// symlink components.
cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p)
if cleanP == string(filepath.Separator) {
path.Reset()
continue
}
fullP := filepath.Clean(root + cleanP)
// Figure out whether the path is a symlink.
fi, err := vfs.Lstat(fullP)
if err != nil && !IsNotExist(err) {
return "", err
}
// Treat non-existent path components the same as non-symlinks (we
// can't do any better here).
if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 {
path.WriteString(p)
path.WriteRune(filepath.Separator)
continue
}
// Only increment when we actually dereference a link.
n++
// It's a symlink, expand it by prepending it to the yet-unparsed path.
dest, err := vfs.Readlink(fullP)
if err != nil {
return "", err
}
// Absolute symlinks reset any work we've already done.
if filepath.IsAbs(dest) {
path.Reset()
}
unsafePath = dest + string(filepath.Separator) + unsafePath
}
// We have to clean path.String() here because it may contain '..'
// components that are entirely lexical, but would be misleading otherwise.
// And finally do a final clean to ensure that root is also lexically
// clean.
fullP := filepath.Clean(string(filepath.Separator) + path.String())
return filepath.Clean(root + fullP), nil
}
// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
func SecureJoin(root, unsafePath string) (string, error) {
return SecureJoinVFS(root, unsafePath, nil)
}

41
vendor/github.com/cyphar/filepath-securejoin/vfs.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
// Copyright (C) 2017 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import "os"
// In future this should be moved into a separate package, because now there
// are several projects (umoci and go-mtree) that are using this sort of
// interface.
// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
// equivalent to using the standard os.* family of functions. This is mainly
// used for the purposes of mock testing, but also can be used to otherwise use
// SecureJoin with VFS-like system.
type VFS interface {
// Lstat returns a FileInfo describing the named file. If the file is a
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
// makes no attempt to follow the link. These semantics are identical to
// os.Lstat.
Lstat(name string) (os.FileInfo, error)
// Readlink returns the destination of the named symbolic link. These
// semantics are identical to os.Readlink.
Readlink(name string) (string, error)
}
// osVFS is the "nil" VFS, in that it just passes everything through to the os
// module.
type osVFS struct{}
// Lstat returns a FileInfo describing the named file. If the file is a
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
// makes no attempt to follow the link. These semantics are identical to
// os.Lstat.
func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
// Readlink returns the destination of the named symbolic link. These
// semantics are identical to os.Readlink.
func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }

1
vendor/github.com/go-git/gcfg/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
coverage.out

17
vendor/github.com/go-git/gcfg/Makefile generated vendored Normal file
View File

@@ -0,0 +1,17 @@
# General
WORKDIR = $(PWD)
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
# Coverage
COVERAGE_REPORT = coverage.out
COVERAGE_MODE = count
test:
$(GOTEST) ./...
test-coverage:
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...

View File

@@ -1,7 +0,0 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

View File

@@ -1,9 +0,0 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

View File

@@ -3,16 +3,16 @@ package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"gopkg.in/warnings.v0"
"github.com/go-git/gcfg/scanner"
"github.com/go-git/gcfg/token"
"gopkg.in/warnings.v0"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'}
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b', '\n': '\n'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
@@ -224,7 +224,7 @@ func readInto(config interface{}, fset *token.FileSet, file *token.File,
//
// If callback returns an error, ReadWithCallback terminates with an error too.
func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error {
src, err := ioutil.ReadAll(reader)
src, err := io.ReadAll(reader)
if err != nil {
return err
}
@@ -239,7 +239,7 @@ func ReadWithCallback(reader io.Reader, callback func(string, string, string, st
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
src, err := io.ReadAll(reader)
if err != nil {
return err
}
@@ -263,7 +263,7 @@ func ReadFileInto(config interface{}, filename string) error {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
src, err := io.ReadAll(f)
if err != nil {
return err
}

View File

@@ -8,7 +8,6 @@
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
@@ -16,9 +15,7 @@ import (
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"github.com/go-git/gcfg/token"
)
@@ -26,13 +23,11 @@ import (
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
@@ -54,7 +49,6 @@ type Scanner struct {
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
@@ -87,7 +81,6 @@ func (s *Scanner) next() {
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
@@ -108,7 +101,6 @@ const (
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
@@ -163,12 +155,13 @@ func (s *Scanner) scanIdentifier() string {
return string(s.src[offs:s.offset])
}
// val indicate if we are scanning a value (vs a header)
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
case '\\', '"', '\n':
// ok
case 'n', 't', 'b':
if val {
@@ -289,7 +282,6 @@ func (s *Scanner) skipWhitespace() {
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()

View File

@@ -2,6 +2,7 @@ package gcfg
import (
"bytes"
"encoding"
"encoding/gob"
"fmt"
"math/big"
@@ -10,8 +11,9 @@ import (
"unicode"
"unicode/utf8"
"github.com/go-git/gcfg/types"
"gopkg.in/warnings.v0"
"github.com/go-git/gcfg/types"
)
type tag struct {
@@ -65,7 +67,7 @@ var setters = []setter{
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
dtu, ok := d.(encoding.TextUnmarshaler)
if !ok {
return errUnsupportedType
}

11
vendor/github.com/go-git/go-billy/v5/Makefile generated vendored Normal file
View File

@@ -0,0 +1,11 @@
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
.PHONY: test
test:
$(GOTEST) -race ./...
test-coverage:
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...

View File

@@ -310,14 +310,14 @@ func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File
flag: flag,
}
if isAppend(flag) {
new.position = int64(new.content.Len())
}
if isTruncate(flag) {
new.content.Truncate()
}
if isAppend(flag) {
new.position = int64(new.content.Len())
}
return new
}

View File

@@ -6,6 +6,7 @@ import (
"io"
"os"
"path/filepath"
"sync"
)
type storage struct {
@@ -174,6 +175,8 @@ func clean(path string) string {
type content struct {
name string
bytes []byte
m sync.RWMutex
}
func (c *content) WriteAt(p []byte, off int64) (int, error) {
@@ -185,6 +188,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
}
}
c.m.Lock()
prev := len(c.bytes)
diff := int(off) - prev
@@ -196,6 +200,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
if len(c.bytes) < prev {
c.bytes = c.bytes[:prev]
}
c.m.Unlock()
return len(p), nil
}
@@ -209,8 +214,10 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
}
c.m.RLock()
size := int64(len(c.bytes))
if off >= size {
c.m.RUnlock()
return 0, io.EOF
}
@@ -220,10 +227,12 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
btr := c.bytes[off : off+l]
n = copy(b, btr)
if len(btr) < len(b) {
err = io.EOF
}
n = copy(b, btr)
c.m.RUnlock()
return
}

View File

@@ -1,140 +1,123 @@
//go:build !js
// +build !js
// Package osfs provides a billy filesystem for the OS.
package osfs // import "github.com/go-git/go-billy/v5/osfs"
package osfs
import (
"io/ioutil"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/helper/chroot"
)
const (
defaultDirectoryMode = 0755
defaultCreateMode = 0666
defaultDirectoryMode = 0o755
defaultCreateMode = 0o666
)
// Default Filesystem representing the root of the os filesystem.
var Default = &OS{}
// OS is a filesystem based on the os filesystem.
type OS struct{}
var Default = &ChrootOS{}
// New returns a new OS filesystem.
func New(baseDir string) billy.Filesystem {
return chroot.New(Default, baseDir)
// By default paths are deduplicated, but still enforced
// under baseDir. For more info refer to WithDeduplicatePath.
func New(baseDir string, opts ...Option) billy.Filesystem {
o := &options{
deduplicatePath: true,
}
for _, opt := range opts {
opt(o)
}
if o.Type == BoundOSFS {
return newBoundOS(baseDir, o.deduplicatePath)
}
return newChrootOS(baseDir)
}
func (fs *OS) Create(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
// WithBoundOS returns the option of using a Bound filesystem OS.
func WithBoundOS() Option {
return func(o *options) {
o.Type = BoundOSFS
}
}
func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
if flag&os.O_CREATE != 0 {
if err := fs.createDir(filename); err != nil {
// WithChrootOS returns the option of using a Chroot filesystem OS.
func WithChrootOS() Option {
return func(o *options) {
o.Type = ChrootOSFS
}
}
// WithDeduplicatePath toggles the deduplication of the base dir in the path.
// This occurs when absolute links are being used.
// Assuming base dir /base/dir and an absolute symlink /base/dir/target:
//
// With DeduplicatePath (default): /base/dir/target
// Without DeduplicatePath: /base/dir/base/dir/target
//
// This option is only used by the BoundOS OS type.
func WithDeduplicatePath(enabled bool) Option {
return func(o *options) {
o.deduplicatePath = enabled
}
}
type options struct {
Type
deduplicatePath bool
}
type Type int
const (
ChrootOSFS Type = iota
BoundOSFS
)
func readDir(dir string) ([]os.FileInfo, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
infos := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
fi, err := entry.Info()
if err != nil {
return nil, err
}
infos = append(infos, fi)
}
f, err := os.OpenFile(filename, flag, perm)
if err != nil {
return nil, err
}
return &file{File: f}, err
return infos, nil
}
func (fs *OS) createDir(fullpath string) error {
dir := filepath.Dir(fullpath)
if dir != "." {
if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
return err
}
}
return nil
}
func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) {
l, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var s = make([]os.FileInfo, len(l))
for i, f := range l {
s[i] = f
}
return s, nil
}
func (fs *OS) Rename(from, to string) error {
if err := fs.createDir(to); err != nil {
return err
}
return rename(from, to)
}
func (fs *OS) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, defaultDirectoryMode)
}
func (fs *OS) Open(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
func (fs *OS) Stat(filename string) (os.FileInfo, error) {
return os.Stat(filename)
}
func (fs *OS) Remove(filename string) error {
return os.Remove(filename)
}
func (fs *OS) TempFile(dir, prefix string) (billy.File, error) {
if err := fs.createDir(dir + string(os.PathSeparator)); err != nil {
return nil, err
}
f, err := ioutil.TempFile(dir, prefix)
func tempFile(dir, prefix string) (billy.File, error) {
f, err := os.CreateTemp(dir, prefix)
if err != nil {
return nil, err
}
return &file{File: f}, nil
}
func (fs *OS) Join(elem ...string) string {
return filepath.Join(elem...)
}
func (fs *OS) RemoveAll(path string) error {
return os.RemoveAll(filepath.Clean(path))
}
func (fs *OS) Lstat(filename string) (os.FileInfo, error) {
return os.Lstat(filepath.Clean(filename))
}
func (fs *OS) Symlink(target, link string) error {
if err := fs.createDir(link); err != nil {
return err
func openFile(fn string, flag int, perm os.FileMode, createDir func(string) error) (billy.File, error) {
if flag&os.O_CREATE != 0 {
if createDir == nil {
return nil, fmt.Errorf("createDir func cannot be nil if file needs to be opened in create mode")
}
if err := createDir(fn); err != nil {
return nil, err
}
}
return os.Symlink(target, link)
}
func (fs *OS) Readlink(link string) (string, error) {
return os.Readlink(link)
}
// Capabilities implements the Capable interface.
func (fs *OS) Capabilities() billy.Capability {
return billy.DefaultCapabilities
f, err := os.OpenFile(fn, flag, perm)
if err != nil {
return nil, err
}
return &file{File: f}, err
}
// file is a wrapper for an os.File which adds support for file locking.

261
vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go generated vendored Normal file
View File

@@ -0,0 +1,261 @@
//go:build !js
// +build !js
/*
Copyright 2022 The Flux authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osfs
import (
"fmt"
"os"
"path/filepath"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/go-git/go-billy/v5"
)
// BoundOS is a fs implementation based on the OS filesystem which is bound to
// a base dir.
// Prefer this fs implementation over ChrootOS.
//
// Behaviours of note:
// 1. Read and write operations can only be directed to files which descends
// from the base dir.
// 2. Symlinks don't have their targets modified, and therefore can point
// to locations outside the base dir or to non-existent paths.
// 3. Readlink and Lstat ensures that the link file is located within the base
// dir, evaluating any symlinks that file or base dir may contain.
type BoundOS struct {
baseDir string
deduplicatePath bool
}
func newBoundOS(d string, deduplicatePath bool) billy.Filesystem {
return &BoundOS{baseDir: d, deduplicatePath: deduplicatePath}
}
func (fs *BoundOS) Create(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
}
func (fs *BoundOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
fn, err := fs.abs(filename)
if err != nil {
return nil, err
}
return openFile(fn, flag, perm, fs.createDir)
}
func (fs *BoundOS) ReadDir(path string) ([]os.FileInfo, error) {
dir, err := fs.abs(path)
if err != nil {
return nil, err
}
return readDir(dir)
}
func (fs *BoundOS) Rename(from, to string) error {
f, err := fs.abs(from)
if err != nil {
return err
}
t, err := fs.abs(to)
if err != nil {
return err
}
// MkdirAll for target name.
if err := fs.createDir(t); err != nil {
return err
}
return os.Rename(f, t)
}
func (fs *BoundOS) MkdirAll(path string, perm os.FileMode) error {
dir, err := fs.abs(path)
if err != nil {
return err
}
return os.MkdirAll(dir, perm)
}
func (fs *BoundOS) Open(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
func (fs *BoundOS) Stat(filename string) (os.FileInfo, error) {
filename, err := fs.abs(filename)
if err != nil {
return nil, err
}
return os.Stat(filename)
}
func (fs *BoundOS) Remove(filename string) error {
fn, err := fs.abs(filename)
if err != nil {
return err
}
return os.Remove(fn)
}
// TempFile creates a temporary file. If dir is empty, the file
// will be created within the OS Temporary dir. If dir is provided
// it must descend from the current base dir.
func (fs *BoundOS) TempFile(dir, prefix string) (billy.File, error) {
if dir != "" {
var err error
dir, err = fs.abs(dir)
if err != nil {
return nil, err
}
}
return tempFile(dir, prefix)
}
func (fs *BoundOS) Join(elem ...string) string {
return filepath.Join(elem...)
}
func (fs *BoundOS) RemoveAll(path string) error {
dir, err := fs.abs(path)
if err != nil {
return err
}
return os.RemoveAll(dir)
}
func (fs *BoundOS) Symlink(target, link string) error {
ln, err := fs.abs(link)
if err != nil {
return err
}
// MkdirAll for containing dir.
if err := fs.createDir(ln); err != nil {
return err
}
return os.Symlink(target, ln)
}
func (fs *BoundOS) Lstat(filename string) (os.FileInfo, error) {
filename = filepath.Clean(filename)
if !filepath.IsAbs(filename) {
filename = filepath.Join(fs.baseDir, filename)
}
if ok, err := fs.insideBaseDirEval(filename); !ok {
return nil, err
}
return os.Lstat(filename)
}
func (fs *BoundOS) Readlink(link string) (string, error) {
if !filepath.IsAbs(link) {
link = filepath.Clean(filepath.Join(fs.baseDir, link))
}
if ok, err := fs.insideBaseDirEval(link); !ok {
return "", err
}
return os.Readlink(link)
}
// Chroot returns a new OS filesystem, with the base dir set to the
// result of joining the provided path with the underlying base dir.
func (fs *BoundOS) Chroot(path string) (billy.Filesystem, error) {
joined, err := securejoin.SecureJoin(fs.baseDir, path)
if err != nil {
return nil, err
}
return New(joined), nil
}
// Root returns the current base dir of the billy.Filesystem.
// This is required in order for this implementation to be a drop-in
// replacement for other upstream implementations (e.g. memory and osfs).
func (fs *BoundOS) Root() string {
return fs.baseDir
}
func (fs *BoundOS) createDir(fullpath string) error {
dir := filepath.Dir(fullpath)
if dir != "." {
if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
return err
}
}
return nil
}
// abs transforms filename to an absolute path, taking into account the base dir.
// Relative paths won't be allowed to ascend the base dir, so `../file` will become
// `/working-dir/file`.
//
// Note that if filename is a symlink, the returned address will be the target of the
// symlink.
func (fs *BoundOS) abs(filename string) (string, error) {
if filename == fs.baseDir {
filename = string(filepath.Separator)
}
path, err := securejoin.SecureJoin(fs.baseDir, filename)
if err != nil {
return "", nil
}
if fs.deduplicatePath {
vol := filepath.VolumeName(fs.baseDir)
dup := filepath.Join(fs.baseDir, fs.baseDir[len(vol):])
if strings.HasPrefix(path, dup+string(filepath.Separator)) {
return fs.abs(path[len(dup):])
}
}
return path, nil
}
// insideBaseDir checks whether filename is located within
// the fs.baseDir.
func (fs *BoundOS) insideBaseDir(filename string) (bool, error) {
if filename == fs.baseDir {
return true, nil
}
if !strings.HasPrefix(filename, fs.baseDir+string(filepath.Separator)) {
return false, fmt.Errorf("path outside base dir")
}
return true, nil
}
// insideBaseDirEval checks whether filename is contained within
// a dir that is within the fs.baseDir, by first evaluating any symlinks
// that either filename or fs.baseDir may contain.
func (fs *BoundOS) insideBaseDirEval(filename string) (bool, error) {
dir, err := filepath.EvalSymlinks(filepath.Dir(filename))
if dir == "" || os.IsNotExist(err) {
dir = filepath.Dir(filename)
}
wd, err := filepath.EvalSymlinks(fs.baseDir)
if wd == "" || os.IsNotExist(err) {
wd = fs.baseDir
}
if filename != wd && dir != wd && !strings.HasPrefix(dir, wd+string(filepath.Separator)) {
return false, fmt.Errorf("path outside base dir")
}
return true, nil
}

112
vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
//go:build !js
// +build !js
package osfs
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/helper/chroot"
)
// ChrootOS is a legacy filesystem based on a "soft chroot" of the os filesystem.
// Although this is still the default os filesystem, consider using BoundOS instead.
//
// Behaviours of note:
// 1. A "soft chroot" translates the base dir to "/" for the purposes of the
// fs abstraction.
// 2. Symlinks targets may be modified to be kept within the chroot bounds.
// 3. Some file modes does not pass-through the fs abstraction.
// 4. The combination of 1 and 2 may cause go-git to think that a Git repository
// is dirty, when in fact it isn't.
type ChrootOS struct{}
func newChrootOS(baseDir string) billy.Filesystem {
return chroot.New(&ChrootOS{}, baseDir)
}
func (fs *ChrootOS) Create(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
}
func (fs *ChrootOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
return openFile(filename, flag, perm, fs.createDir)
}
func (fs *ChrootOS) createDir(fullpath string) error {
dir := filepath.Dir(fullpath)
if dir != "." {
if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
return err
}
}
return nil
}
func (fs *ChrootOS) ReadDir(dir string) ([]os.FileInfo, error) {
return readDir(dir)
}
func (fs *ChrootOS) Rename(from, to string) error {
if err := fs.createDir(to); err != nil {
return err
}
return rename(from, to)
}
func (fs *ChrootOS) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, defaultDirectoryMode)
}
func (fs *ChrootOS) Open(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
func (fs *ChrootOS) Stat(filename string) (os.FileInfo, error) {
return os.Stat(filename)
}
func (fs *ChrootOS) Remove(filename string) error {
return os.Remove(filename)
}
func (fs *ChrootOS) TempFile(dir, prefix string) (billy.File, error) {
if err := fs.createDir(dir + string(os.PathSeparator)); err != nil {
return nil, err
}
return tempFile(dir, prefix)
}
func (fs *ChrootOS) Join(elem ...string) string {
return filepath.Join(elem...)
}
func (fs *ChrootOS) RemoveAll(path string) error {
return os.RemoveAll(filepath.Clean(path))
}
func (fs *ChrootOS) Lstat(filename string) (os.FileInfo, error) {
return os.Lstat(filepath.Clean(filename))
}
func (fs *ChrootOS) Symlink(target, link string) error {
if err := fs.createDir(link); err != nil {
return err
}
return os.Symlink(target, link)
}
func (fs *ChrootOS) Readlink(link string) (string, error) {
return os.Readlink(link)
}
// Capabilities implements the Capable interface.
func (fs *ChrootOS) Capabilities() billy.Capability {
return billy.DefaultCapabilities
}

View File

@@ -1,3 +1,4 @@
//go:build js
// +build js
package osfs
@@ -16,6 +17,9 @@ var globalMemFs = memfs.New()
var Default = memfs.New()
// New returns a new OS filesystem.
func New(baseDir string) billy.Filesystem {
func New(baseDir string, _ ...Option) billy.Filesystem {
return chroot.New(Default, Default.Join("/", baseDir))
}
type options struct {
}

View File

@@ -0,0 +1,3 @@
package osfs
type Option func(*options)

View File

@@ -1,3 +1,4 @@
//go:build plan9
// +build plan9
package osfs
@@ -83,3 +84,8 @@ func dirwstat(name string, d *syscall.Dir) error {
}
return nil
}
func umask(new int) func() {
return func() {
}
}

View File

@@ -1,9 +1,11 @@
//go:build !plan9 && !windows && !js
// +build !plan9,!windows,!js
package osfs
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
@@ -25,3 +27,12 @@ func (f *file) Unlock() error {
func rename(from, to string) error {
return os.Rename(from, to)
}
// umask sets umask to a new value, and returns a func which allows the
// caller to reset it back to what it was originally.
func umask(new int) func() {
old := syscall.Umask(new)
return func() {
syscall.Umask(old)
}
}

View File

@@ -1,3 +1,4 @@
//go:build windows
// +build windows
package osfs
@@ -10,15 +11,6 @@ import (
"golang.org/x/sys/windows"
)
type fileInfo struct {
os.FileInfo
name string
}
func (fi *fileInfo) Name() string {
return fi.name
}
var (
kernel32DLL = windows.NewLazySystemDLL("kernel32.dll")
lockFileExProc = kernel32DLL.NewProc("LockFileEx")
@@ -59,3 +51,8 @@ func (f *file) Unlock() error {
func rename(from, to string) error {
return os.Rename(from, to)
}
func umask(new int) func() {
return func() {
}
}

72
vendor/github.com/go-git/go-billy/v5/util/walk.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
package util
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
)
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
if !info.IsDir() {
return walkFn(path, info, nil)
}
names, err := readdirnames(fs, path)
err1 := walkFn(path, info, err)
// If err != nil, walk can't walk into this directory.
// err1 != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of err and err1 isn't nil, walk will return.
if err != nil || err1 != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore err and return nil.
// If walkFn returns SkipDir, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return err1
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := fs.Lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// Walk walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by fn: see the WalkFunc documentation for
// details.
//
// The files are walked in lexical order, which makes the output deterministic
// but requires Walk to read an entire directory into memory before proceeding
// to walk that directory. Walk does not follow symbolic links.
//
// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500
func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
info, err := fs.Lstat(root)
if err != nil {
err = walkFn(root, nil, err)
} else {
err = walk(fs, root, info, walkFn)
}
if err == filepath.SkipDir {
return nil
}
return err
}

View File

@@ -2,3 +2,6 @@ coverage.out
*~
coverage.txt
profile.out
.tmp/
.git-dist/
.vscode

View File

@@ -1,111 +1,233 @@
Supported Capabilities
======================
# Supported Features
Here is a non-comprehensive table of git commands and features whose equivalent
is supported by go-git.
Here is a non-comprehensive table of git commands and features and their
compatibility status with go-git.
| Feature | Status | Notes |
|---------------------------------------|--------|-------|
| **config** |
| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. |
| **getting and creating repositories** |
| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. |
| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. |
| **basic snapshotting** |
| add | ✔ | Plain add is supported. Any other flags aren't supported |
| status | ✔ |
| commit | ✔ |
| reset | ✔ |
| rm | ✔ |
| mv | ✔ |
| **branching and merging** |
| branch | ✔ |
| checkout | ✔ | Basic usages of checkout are supported. |
| merge | ✖ |
| mergetool | ✖ |
| stash | ✖ |
| tag | |
| **sharing and updating projects** |
| fetch | ✔ |
| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. |
| push | ✔ |
| remote | ✔ |
| submodule | ✔ |
| **inspection and comparison** |
| show | ✔ |
| log | ✔ |
| shortlog | (see log) |
| describe | |
| **patching** |
| apply | ✖ |
| cherry-pick | ✖ |
| diff | ✔ | Patch object with UnifiedDiff output representation |
| rebase | ✖ |
| revert | ✖ |
| **debugging** |
| bisect | ✖ |
| blame | ✔ |
| grep | ✔ |
| **email** ||
| am | ✖ |
| apply | ✖ |
| format-patch | ✖ |
| send-email | ✖ |
| request-pull | ✖ |
| **external systems** |
| svn | |
| fast-import | ✖ |
| **administration** |
| clean | ✔ |
| gc | ✖ |
| fsck | ✖ |
| reflog | |
| filter-branch | ✖ |
| instaweb | |
| archive | |
| bundle | ✖ |
| prune | ✖ |
| repack | |
| **server admin** |
| daemon | |
| update-server-info | |
| **advanced** |
| notes | ✖ |
| replace | ✖ |
| worktree | |
| annotate | (see blame) |
| **gpg** |
| git-verify-commit | ✔ |
| git-verify-tag | ✔ |
| **plumbing commands** |
| cat-file | ✔ |
| check-ignore | |
| commit-tree | |
| count-objects | |
| diff-index | |
| for-each-ref | |
| hash-object | ✔ |
| ls-files | ✔ |
| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
| read-tree | |
| rev-list | ✔ |
| rev-parse | |
| show-ref | ✔ |
| symbolic-ref | |
| update-index | |
| update-ref | |
| verify-pack | |
| write-tree | |
| **protocols** |
| http(s):// (dumb) | |
| http(s):// (smart) | |
| git:// | |
| ssh:// | |
| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. |
| custom | |
| **other features** |
| gitignore | |
| gitattributes | |
| index version | |
| packfile version | |
| push-certs | ✖ |
## Getting and creating repositories
| Feature | Sub-feature | Status | Notes | Examples |
| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `init` | | ✅ | | |
| `init` | `--bare` | ✅ | | |
| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` <br/>`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
## Basic snapshotting
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ |
| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
| `status` | | ✅ | | |
| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
| `reset` | | ✅ | | |
| `rm` | | ✅ | | |
| `mv` | | ✅ | | |
## Branching and merging
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
| `merge` | | ❌ | | |
| `mergetool` | | ❌ | | |
| `stash` | | ❌ | | |
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
## Sharing and updating projects
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ |
| `fetch` | | ✅ | | |
| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
| `push` | | ✅ | | - [push](_examples/push/main.go) |
| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
| `submodule` | deinit | ❌ | | |
## Inspection and comparison
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | --------- | ----- | ------------------------------ |
| `show` | | ✅ | | |
| `log` | | ✅ | | - [log](_examples/log/main.go) |
| `shortlog` | | (see log) | | |
| `describe` | | ❌ | | |
## Patching
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- |
| `apply` | | ❌ | | |
| `cherry-pick` | | ❌ | | |
| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
| `rebase` | | ❌ | | |
| `revert` | | ❌ | | |
## Debugging
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ----- | ---------------------------------- |
| `bisect` | | ❌ | | |
| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
| `grep` | | ✅ | | |
## Email
| Feature | Sub-feature | Status | Notes | Examples |
| -------------- | ----------- | ------ | ----- | -------- |
| `am` | | ❌ | | |
| `apply` | | ❌ | | |
| `format-patch` | | ❌ | | |
| `send-email` | | ❌ | | |
| `request-pull` | | ❌ | | |
## External systems
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ----- | -------- |
| `svn` | | ❌ | | |
| `fast-import` | | ❌ | | |
| `lfs` | | ❌ | | |
## Administration
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ----------- | ------ | ----- | -------- |
| `clean` | | ✅ | | |
| `gc` | | ❌ | | |
| `fsck` | | ❌ | | |
| `reflog` | | ❌ | | |
| `filter-branch` | | ❌ | | |
| `instaweb` | | ❌ | | |
| `archive` | | ❌ | | |
| `bundle` | | ❌ | | |
| `prune` | | ❌ | | |
| `repack` | | ❌ | | |
## Server admin
| Feature | Sub-feature | Status | Notes | Examples |
| -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
| `daemon` | | ❌ | | |
| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
## Advanced
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | ----------- | ----- | -------- |
| `notes` | | ❌ | | |
| `replace` | | ❌ | | |
| `worktree` | | ❌ | | |
| `annotate` | | (see blame) | | |
## GPG
| Feature | Sub-feature | Status | Notes | Examples |
| ------------------- | ----------- | ------ | ----- | -------- |
| `git-verify-commit` | | ✅ | | |
| `git-verify-tag` | | ✅ | | |
## Plumbing commands
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- |
| `cat-file` | | ✅ | | |
| `check-ignore` | | ❌ | | |
| `commit-tree` | | ❌ | | |
| `count-objects` | | ❌ | | |
| `diff-index` | | ❌ | | |
| `for-each-ref` | | ✅ | | |
| `hash-object` | | ✅ | | |
| `ls-files` | | ✅ | | |
| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
| `merge-base` | `--independent` <br/> `--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
| `merge-base` | `--fork-point` <br/> `--octopus` | ❌ | | |
| `read-tree` | | ❌ | | |
| `rev-list` | | ✅ | | |
| `rev-parse` | | ❌ | | |
| `show-ref` | | ✅ | | |
| `symbolic-ref` | | ✅ | | |
| `update-index` | | ❌ | | |
| `update-ref` | | ❌ | | |
| `verify-pack` | | ❌ | | |
| `write-tree` | | ❌ | | |
## Indexes and Git Protocols
| Feature | Version | Status | Notes |
| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- |
| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| cruft packs | | ❌ | |
## Capabilities
| Feature | Status | Notes |
| ------------------------------ | ------------ | ----- |
| `multi_ack` | ❌ | |
| `multi_ack_detailed` | ❌ | |
| `no-done` | ❌ | |
| `thin-pack` | ❌ | |
| `side-band` | ⚠️ (partial) | |
| `side-band-64k` | ⚠️ (partial) | |
| `ofs-delta` | ✅ | |
| `agent` | ✅ | |
| `object-format` | ❌ | |
| `symref` | ✅ | |
| `shallow` | ✅ | |
| `deepen-since` | ✅ | |
| `deepen-not` | ❌ | |
| `deepen-relative` | ❌ | |
| `no-progress` | ✅ | |
| `include-tag` | ✅ | |
| `report-status` | ✅ | |
| `report-status-v2` | ❌ | |
| `delete-refs` | ✅ | |
| `quiet` | ❌ | |
| `atomic` | ✅ | |
| `push-options` | ✅ | |
| `allow-tip-sha1-in-want` | ✅ | |
| `allow-reachable-sha1-in-want` | ❌ | |
| `push-cert=<nonce>` | ❌ | |
| `filter` | ❌ | |
| `session-id=<session id>` | ❌ | |
## Transport Schemes
| Scheme | Status | Notes | Examples |
| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- |
| `http(s)://` (dumb) | ❌ | | |
| `http(s)://` (smart) | ✅ | | |
| `git://` | ✅ | | |
| `ssh://` | ✅ | | |
| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
## SHA256
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ |
| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
| `pull` | | ❌ | | |
| `fetch` | | ❌ | | |
| `push` | | ❌ | | |
## Other features
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- |
| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
| `config` | `--global` <br/> `--system` | ✅ | Read-only. | |
| `gitignore` | | ✅ | | |
| `gitattributes` | | ✅ | | |
| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |

78
vendor/github.com/go-git/go-git/v5/EXTENDING.md generated vendored Normal file
View File

@@ -0,0 +1,78 @@
# Extending go-git
`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features:
## Dot Git Storers
Dot git storers are the components responsible for storing the Git internal files, including objects and references.
The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this:
```go
r, err := git.Init(memory.NewStorage(), nil)
```
The `filesystem` storer stores the data in the OS filesystem, and can be used as follows:
```go
r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil)
```
New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16).
## Filesystem
Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows:
```go
fs := memfs.New()
r, err := git.Init(memory.NewStorage(), fs)
```
The same operation can be done against the OS filesystem:
```go
fs := osfs.New("/tmp/foo")
r, err := git.Init(memory.NewStorage(), fs)
```
New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52).
## Transport Schemes
Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them.
The built-in implementations can be replaced by calling `client.InstallProtocol`.
An example of changing the built-in `https` implementation to skip TLS could look like this:
```go
customClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
client.InstallProtocol("https", githttp.NewClient(customClient))
```
Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`).
## Cache
Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17).
Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface.
## Hash
`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`.
The default hash functions can be changed by calling `hash.RegisterHash`.
```go
func init() {
hash.RegisterHash(crypto.SHA1, sha1.New)
}
```
New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`.

View File

@@ -27,7 +27,13 @@ build-git:
test:
@echo "running against `git version`"; \
$(GOTEST) ./...
$(GOTEST) -race ./...
TEMP_REPO := $(shell mktemp)
test-sha256:
$(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO)
cd $(TEMP_REPO) && git fsck
rm -rf $(TEMP_REPO)
test-coverage:
@echo "running against `git version`"; \
@@ -35,4 +41,13 @@ test-coverage:
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
clean:
rm -rf $(GIT_DIST_PATH)
rm -rf $(GIT_DIST_PATH)
fuzz:
@go test -fuzz=FuzzParser $(PWD)/internal/revision
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config
@go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile
@go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object
@go test -fuzz=FuzzDecode $(PWD)/plumbing/object
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp
@go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport

38
vendor/github.com/go-git/go-git/v5/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,38 @@
# go-git Security Policy
The purpose of this security policy is to outline `go-git`'s process
for reporting, handling and disclosing security sensitive information.
## Supported Versions
The project follows a version support policy where only the latest minor
release is actively supported. Therefore, only issues that impact the latest
minor release will be fixed. Users are encouraged to upgrade to the latest
minor/patch release to benefit from the most up-to-date features, bug fixes,
and security enhancements.
The supported versions policy applies to both the `go-git` library and its
associated repositories within the `go-git` org.
## Reporting Security Issues
Please report any security vulnerabilities or potential weaknesses in `go-git`
privately via go-git-security@googlegroups.com. Do not publicly disclose the
details of the vulnerability until a fix has been implemented and released.
During the process the project maintainers will investigate the report, so please
provide detailed information, including steps to reproduce, affected versions, and any mitigations if known.
The project maintainers will acknowledge the receipt of the report and work with
the reporter to validate and address the issue.
Please note that `go-git` does not have any bounty programs, and therefore do
not provide financial compensation for disclosures.
## Security Disclosure Process
The project maintainers will make every effort to promptly address security issues.
Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures.
All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories.

View File

@@ -2,16 +2,18 @@ package git
import (
"bytes"
"container/heap"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/utils/diff"
"github.com/sergi/go-diff/diffmatchpatch"
)
// BlameResult represents the result of a Blame operation.
@@ -29,53 +31,26 @@ type BlameResult struct {
func Blame(c *object.Commit, path string) (*BlameResult, error) {
// The file to blame is identified by the input arguments:
// commit and path. commit is a Commit object obtained from a Repository. Path
// represents a path to a specific file contained into the repository.
// represents a path to a specific file contained in the repository.
//
// Blaming a file is a two step process:
// Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified.
//
// 1. Create a linear history of the commits affecting a file. We use
// revlist.New for that.
// When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its
// parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its
// parents then it will assign the change to itself.
//
// 2. Then build a graph with a node for every line in every file in
// the history of the file.
// When encountering 2 parents that have made the same change to a file it will choose the parent that was merged
// into the current branch first (this is determined by the order of the parents inside the commit).
//
// Each node is assigned a commit: Start by the nodes in the first
// commit. Assign that commit as the creator of all its lines.
//
// Then jump to the nodes in the next commit, and calculate the diff
// between the two files. Newly created lines get
// assigned the new commit as its origin. Modified lines also get
// this new commit. Untouched lines retain the old commit.
//
// All this work is done in the assignOrigin function which holds all
// the internal relevant data in a "blame" struct, that is not
// exported.
//
// TODO: ways to improve the efficiency of this function:
// 1. Improve revlist
// 2. Improve how to traverse the history (example a backward traversal will
// be much more efficient)
//
// TODO: ways to improve the function in general:
// 1. Add memoization between revlist and assign.
// 2. It is using much more memory than needed, see the TODOs below.
// This currently works on a line by line basis, if performance becomes an issue it could be changed to work with
// hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary.
b := new(blame)
b.fRev = c
b.path = path
b.q = new(priorityQueue)
// get all the file revisions
if err := b.fillRevs(); err != nil {
return nil, err
}
// calculate the line tracking graph and fill in
// file contents in data.
if err := b.fillGraphAndData(); err != nil {
return nil, err
}
file, err := b.fRev.File(b.path)
file, err := b.fRev.File(path)
if err != nil {
return nil, err
}
@@ -83,13 +58,59 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
if err != nil {
return nil, err
}
finalLength := len(finalLines)
// Each node (line) holds the commit where it was introduced or
// last modified. To achieve that we use the FORWARD algorithm
// described in Zimmermann, et al. "Mining Version Archives for
// Co-changed Lines", in proceedings of the Mining Software
// Repositories workshop, Shanghai, May 22-23, 2006.
lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1))
needsMap := make([]lineMap, finalLength)
for i := range needsMap {
needsMap[i] = lineMap{i, i, nil, -1}
}
contents, err := file.Contents()
if err != nil {
return nil, err
}
b.q.Push(&queueItem{
nil,
nil,
c,
path,
contents,
needsMap,
0,
false,
0,
})
items := make([]*queueItem, 0)
for {
items = items[:0]
for {
if b.q.Len() == 0 {
return nil, errors.New("invalid state: no items left on the blame queue")
}
item := b.q.Pop()
items = append(items, item)
next := b.q.Peek()
if next == nil || next.Hash != item.Commit.Hash {
break
}
}
finished, err := b.addBlames(items)
if err != nil {
return nil, err
}
if finished == true {
break
}
}
if err != nil {
return nil, err
}
b.lineToCommit = make([]*object.Commit, finalLength)
for i := range needsMap {
b.lineToCommit[i] = needsMap[i].Commit
}
lines, err := newLines(finalLines, b.lineToCommit)
if err != nil {
return nil, err
}
@@ -105,6 +126,8 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
type Line struct {
// Author is the email address of the last author that modified the line.
Author string
// AuthorName is the name of the last author that modified the line.
AuthorName string
// Text is the original text of the line.
Text string
// Date is when the original text of the line was introduced
@@ -113,31 +136,21 @@ type Line struct {
Hash plumbing.Hash
}
func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line {
func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line {
return &Line{
Author: author,
Text: text,
Hash: hash,
Date: date,
Author: author,
AuthorName: authorName,
Text: text,
Hash: hash,
Date: date,
}
}
func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
lcontents := len(contents)
lcommits := len(commits)
if lcontents != lcommits {
if lcontents == lcommits-1 && contents[lcontents-1] != "\n" {
contents = append(contents, "\n")
} else {
return nil, errors.New("contents and commits have different length")
}
}
result := make([]*Line, 0, lcontents)
result := make([]*Line, 0, len(contents))
for i := range contents {
result = append(result, newLine(
commits[i].Author.Email, contents[i],
commits[i].Author.Email, commits[i].Author.Name, contents[i],
commits[i].Author.When, commits[i].Hash,
))
}
@@ -152,151 +165,426 @@ type blame struct {
path string
// the commit of the final revision of the file to blame
fRev *object.Commit
// the chain of revisions affecting the the file to blame
revs []*object.Commit
// the contents of the file across all its revisions
data []string
// the graph of the lines in the file across all the revisions
graph [][]*object.Commit
// resolved lines
lineToCommit []*object.Commit
// queue of commits that need resolving
q *priorityQueue
}
// calculate the history of a file "path", starting from commit "from", sorted by commit date.
func (b *blame) fillRevs() error {
var err error
b.revs, err = references(b.fRev, b.path)
return err
type lineMap struct {
Orig, Cur int
Commit *object.Commit
FromParentNo int
}
// build graph of a file from its revision history
func (b *blame) fillGraphAndData() error {
//TODO: not all commits are needed, only the current rev and the prev
b.graph = make([][]*object.Commit, len(b.revs))
b.data = make([]string, len(b.revs)) // file contents in all the revisions
// for every revision of the file, starting with the first
// one...
for i, rev := range b.revs {
func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
curItem := curItems[0]
// Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates
// not only if they are all the same.
if len(curItems) == 1 {
curItems = nil
} else if curItem.IdenticalToChild {
allSame := true
lenCurItems := len(curItems)
lowestParentNo := curItem.ParentNo
for i := 1; i < lenCurItems; i++ {
if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child {
allSame = false
break
}
lowestParentNo = min(lowestParentNo, curItems[i].ParentNo)
}
if allSame {
curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1
curItems = nil // free the memory
curItem.ParentNo = lowestParentNo
// Now check if we can remove the parent completely
for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 {
oldChild := curItem.Child
curItem.Child = oldChild.Child
curItem.ParentNo = oldChild.ParentNo
}
}
}
// if we have more than 1 item for this commit, create a single needsMap
if len(curItems) > 1 {
curItem.MergedChildren = make([]childToNeedsMap, len(curItems))
for i, c := range curItems {
curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo}
}
newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap))
newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...)
for i := 1; i < len(curItems); i++ {
cur := curItems[i].NeedsMap
n := 0 // position in newNeedsMap
c := 0 // position in current list
for c < len(cur) {
if n == len(newNeedsMap) {
newNeedsMap = append(newNeedsMap, cur[c:]...)
break
} else if newNeedsMap[n].Cur == cur[c].Cur {
n++
c++
} else if newNeedsMap[n].Cur < cur[c].Cur {
n++
} else {
newNeedsMap = append(newNeedsMap, cur[c])
newPos := len(newNeedsMap) - 1
for newPos > n {
newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1]
newPos--
}
}
}
}
curItem.NeedsMap = newNeedsMap
curItem.IdenticalToChild = false
curItem.Child = nil
curItems = nil // free the memory
}
parents, err := parentsContainingPath(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
anyPushed := false
for parnetNo, prev := range parents {
currentHash, err := blobHash(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
prevHash, err := blobHash(prev.Path, prev.Commit)
if err != nil {
return false, err
}
if currentHash == prevHash {
if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild {
// commit that has 1 parent and 1 child and is the same as both, bypass it completely
b.q.Push(&queueItem{
Child: curItem.Child,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item
IdenticalToChild: true,
ParentNo: curItem.ParentNo,
})
} else {
b.q.Push(&queueItem{
Child: curItem,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy
IdenticalToChild: true,
ParentNo: parnetNo,
})
curItem.numParentsNeedResolving++
}
anyPushed = true
continue
}
// get the contents of the file
file, err := rev.File(b.path)
file, err := prev.Commit.File(prev.Path)
if err != nil {
return nil
return false, err
}
b.data[i], err = file.Contents()
prevContents, err := file.Contents()
if err != nil {
return err
return false, err
}
nLines := countLines(b.data[i])
// create a node for each line
b.graph[i] = make([]*object.Commit, nLines)
// assign a commit to each node
// if this is the first revision, then the node is assigned to
// this first commit.
if i == 0 {
for j := 0; j < nLines; j++ {
b.graph[i][j] = b.revs[i]
hunks := diff.Do(prevContents, curItem.Contents)
prevl := -1
curl := -1
need := 0
getFromParent := make([]lineMap, 0)
out:
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch {
case hunks[h].Type == diffmatchpatch.DiffEqual:
prevl++
curl++
if curl == curItem.NeedsMap[need].Cur {
// add to needs
getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1})
// move to next need
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case hunks[h].Type == diffmatchpatch.DiffInsert:
curl++
if curl == curItem.NeedsMap[need].Cur {
// the line we want is added, it may have been added here (or by another parent), skip it for now
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case hunks[h].Type == diffmatchpatch.DiffDelete:
prevl += hLines
continue out
default:
return false, errors.New("invalid state: invalid hunk Type")
}
}
} else {
// if this is not the first commit, then assign to the old
// commit or to the new one, depending on what the diff
// says.
b.assignOrigin(i, i-1)
}
if len(getFromParent) > 0 {
b.q.Push(&queueItem{
curItem,
nil,
prev.Commit,
prev.Path,
prevContents,
getFromParent,
0,
false,
parnetNo,
})
curItem.numParentsNeedResolving++
anyPushed = true
}
}
return nil
}
// sliceGraph returns a slice of commits (one per line) for a particular
// revision of a file (0=first revision).
func (b *blame) sliceGraph(i int) []*object.Commit {
fVs := b.graph[i]
result := make([]*object.Commit, 0, len(fVs))
for _, v := range fVs {
c := *v
result = append(result, &c)
curItem.Contents = "" // no longer need, free the memory
if !anyPushed {
return finishNeeds(curItem)
}
return result
return false, nil
}
// Assigns origin to vertexes in current (c) rev from data in its previous (p)
// revision
func (b *blame) assignOrigin(c, p int) {
// assign origin based on diff info
hunks := diff.Do(b.data[p], b.data[c])
sl := -1 // source line
dl := -1 // destination line
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch {
case hunks[h].Type == 0:
sl++
dl++
b.graph[c][dl] = b.graph[p][sl]
case hunks[h].Type == 1:
dl++
b.graph[c][dl] = b.revs[c]
case hunks[h].Type == -1:
sl++
default:
panic("unreachable")
func finishNeeds(curItem *queueItem) (bool, error) {
// any needs left in the needsMap must have come from this revision
for i := range curItem.NeedsMap {
if curItem.NeedsMap[i].Commit == nil {
curItem.NeedsMap[i].Commit = curItem.Commit
curItem.NeedsMap[i].FromParentNo = -1
}
}
if curItem.Child == nil && curItem.MergedChildren == nil {
return true, nil
}
if curItem.MergedChildren == nil {
return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo)
}
for _, ctn := range curItem.MergedChildren {
m := 0 // position in merged needs map
p := 0 // position in parent needs map
for p < len(ctn.NeedsMap) {
if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur {
ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit
m++
p++
} else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur {
p++
} else {
m++
}
}
finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) {
if identicalToChild {
for i := range child.NeedsMap {
l := &child.NeedsMap[i]
if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig {
return false, errors.New("needsMap isn't the same? Why not??")
}
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
} else {
i := 0
out:
for j := range child.NeedsMap {
l := &child.NeedsMap[j]
for needsMap[i].Orig < l.Cur {
i++
if i == len(needsMap) {
break out
}
}
if l.Cur == needsMap[i].Orig {
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
}
}
child.numParentsNeedResolving--
if child.numParentsNeedResolving == 0 {
finished, err := finishNeeds(child)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
// GoString prints the results of a Blame using git-blame's style.
func (b *blame) GoString() string {
// String prints the results of a Blame using git-blame's style.
func (b BlameResult) String() string {
var buf bytes.Buffer
file, err := b.fRev.File(b.path)
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
contents, err := file.Contents()
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
lines := strings.Split(contents, "\n")
// max line number length
mlnl := len(strconv.Itoa(len(lines)))
mlnl := len(strconv.Itoa(len(b.Lines)))
// max author length
mal := b.maxAuthorLength()
format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n",
mal, mlnl)
format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl)
fVs := b.graph[len(b.graph)-1]
for ln, v := range fVs {
fmt.Fprintf(&buf, format, v.Hash.String()[:8],
prettyPrintAuthor(fVs[ln]), ln+1, lines[ln])
for ln := range b.Lines {
_, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8],
b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text)
}
return buf.String()
}
// utility function to pretty print the author.
func prettyPrintAuthor(c *object.Commit) string {
return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02"))
}
// utility function to calculate the number of runes needed
// to print the longest author name in the blame of a file.
func (b *blame) maxAuthorLength() int {
memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1)
fVs := b.graph[len(b.graph)-1]
func (b BlameResult) maxAuthorLength() int {
m := 0
for ln := range fVs {
if _, ok := memo[fVs[ln].Hash]; ok {
continue
}
memo[fVs[ln].Hash] = struct{}{}
m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln])))
for ln := range b.Lines {
m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName))
}
return m
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
type childToNeedsMap struct {
Child *queueItem
NeedsMap []lineMap
IdenticalToChild bool
ParentNo int
}
type queueItem struct {
Child *queueItem
MergedChildren []childToNeedsMap
Commit *object.Commit
path string
Contents string
NeedsMap []lineMap
numParentsNeedResolving int
IdenticalToChild bool
ParentNo int
}
type priorityQueueImp []*queueItem
func (pq *priorityQueueImp) Len() int { return len(*pq) }
func (pq *priorityQueueImp) Less(i, j int) bool {
return !(*pq)[i].Commit.Less((*pq)[j].Commit)
}
func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] }
func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) }
func (pq *priorityQueueImp) Pop() any {
n := len(*pq)
ret := (*pq)[n-1]
(*pq)[n-1] = nil // ovoid memory leak
*pq = (*pq)[0 : n-1]
return ret
}
func (pq *priorityQueueImp) Peek() *object.Commit {
if len(*pq) == 0 {
return nil
}
return (*pq)[0].Commit
}
type priorityQueue priorityQueueImp
func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) }
func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() }
func (pq *priorityQueue) Push(c *queueItem) {
heap.Push((*priorityQueueImp)(pq), c)
}
func (pq *priorityQueue) Pop() *queueItem {
return heap.Pop((*priorityQueueImp)(pq)).(*queueItem)
}
func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() }
type parentCommit struct {
Commit *object.Commit
Path string
}
func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) {
// TODO: benchmark this method making git.object.Commit.parent public instead of using
// an iterator
var result []parentCommit
iter := c.Parents()
for {
parent, err := iter.Next()
if err == io.EOF {
return result, nil
}
if err != nil {
return nil, err
}
if _, err := parent.File(path); err == nil {
result = append(result, parentCommit{parent, path})
} else {
// look for renames
patch, err := parent.Patch(c)
if err != nil {
return nil, err
} else if patch != nil {
for _, fp := range patch.FilePatches() {
from, to := fp.Files()
if from != nil && to != nil && to.Path() == path {
result = append(result, parentCommit{parent, from.Path()})
break
}
}
}
}
}
}
func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) {
file, err := commit.File(path)
if err != nil {
return plumbing.ZeroHash, err
}
return file.Hash, nil
}

View File

@@ -2,6 +2,7 @@ package config
import (
"errors"
"strings"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
@@ -26,6 +27,12 @@ type Branch struct {
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
// Description explains what the branch is for.
// Multi-line explanations may be used.
//
// Original git command to edit:
// git branch --edit-description
Description string
raw *format.Subsection
}
@@ -47,7 +54,7 @@ func (b *Branch) Validate() error {
return errBranchInvalidRebase
}
return nil
return plumbing.NewBranchReferenceName(b.Name).Validate()
}
func (b *Branch) marshal() *format.Subsection {
@@ -75,9 +82,27 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(rebaseKey, b.Rebase)
}
if b.Description == "" {
b.raw.RemoveOption(descriptionKey)
} else {
desc := quoteDescription(b.Description)
b.raw.SetOption(descriptionKey, desc)
}
return b.raw
}
// hack to trigger conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
//
// Current Encoder implementation uses Go %q format if value contains a backslash character,
// which is not consistent with reference git implementation.
// git just replaces newline characters with \n, while Encoder prints them directly.
// Until value quoting fix, we should escape description value by replacing newline characters with \n.
func quoteDescription(desc string) string {
return strings.ReplaceAll(desc, "\n", `\n`)
}
func (b *Branch) unmarshal(s *format.Subsection) error {
b.raw = s
@@ -85,6 +110,14 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
return b.Validate()
}
// hack to enable conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
// goto quoteDescription for details.
func unquoteDescription(desc string) string {
return strings.ReplaceAll(desc, `\n`, "\n")
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -14,8 +13,8 @@ import (
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/mitchellh/go-homedir"
)
const (
@@ -60,12 +59,14 @@ type Config struct {
// CommentChar is the character indicating the start of a
// comment for commands like commit and tag
CommentChar string
// RepositoryFormatVersion identifies the repository format and layout version.
RepositoryFormatVersion format.RepositoryFormatVersion
}
User struct {
// Name is the personal name of the author and the commiter of a commit.
// Name is the personal name of the author and the committer of a commit.
Name string
// Email is the email of the author and the commiter of a commit.
// Email is the email of the author and the committer of a commit.
Email string
}
@@ -77,9 +78,9 @@ type Config struct {
}
Committer struct {
// Name is the personal name of the commiter of a commit.
// Name is the personal name of the committer of a commit.
Name string
// Email is the email of the the commiter of a commit.
// Email is the email of the committer of a commit.
Email string
}
@@ -97,6 +98,17 @@ type Config struct {
DefaultBranch string
}
Extensions struct {
// ObjectFormat specifies the hash algorithm to use. The
// acceptable values are sha1 and sha256. If not specified,
// sha1 is assumed. It is an error to specify this key unless
// core.repositoryFormatVersion is 1.
//
// This setting must not be changed after repository initialization
// (e.g. clone or init).
ObjectFormat format.ObjectFormat
}
// Remotes list of repository remotes, the key of the map is the name
// of the remote, should equal to RemoteConfig.Name.
Remotes map[string]*RemoteConfig
@@ -132,7 +144,7 @@ func NewConfig() *Config {
// ReadConfig reads a config file from a io.Reader.
func ReadConfig(r io.Reader) (*Config, error) {
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@@ -146,11 +158,11 @@ func ReadConfig(r io.Reader) (*Config, error) {
}
// LoadConfig loads a config file from a given scope. The returned Config,
// contains exclusively information fom the given scope. If couldn't find a
// config file to the given scope, a empty one is returned.
// contains exclusively information from the given scope. If it couldn't find a
// config file to the given scope, an empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
}
files, err := Paths(scope)
@@ -185,7 +197,7 @@ func Paths(scope Scope) ([]string, error) {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := homedir.Dir()
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@@ -227,27 +239,32 @@ func (c *Config) Validate() error {
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
defaultBranchKey = "defaultBranch"
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
extensionsSection = "extensions"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
repositoryFormatVersionKey = "repositoryformatversion"
objectFormat = "objectformat"
mirrorKey = "mirror"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
@@ -391,6 +408,7 @@ func (c *Config) unmarshalInit() {
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalExtensions()
c.marshalUser()
c.marshalPack()
c.marshalRemotes()
@@ -410,12 +428,24 @@ func (c *Config) Marshal() ([]byte, error) {
func (c *Config) marshalCore() {
s := c.Raw.Section(coreSection)
s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
if string(c.Core.RepositoryFormatVersion) != "" {
s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion))
}
if c.Core.Worktree != "" {
s.SetOption(worktreeKey, c.Core.Worktree)
}
}
func (c *Config) marshalExtensions() {
// Extensions are only supported on Version 1, therefore
// ignore them otherwise.
if c.Core.RepositoryFormatVersion == format.Version_1 {
s := c.Raw.Section(extensionsSection)
s.SetOption(objectFormat, string(c.Extensions.ObjectFormat))
}
}
func (c *Config) marshalUser() {
s := c.Raw.Section(userSection)
if c.User.Name != "" {
@@ -549,6 +579,8 @@ type RemoteConfig struct {
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
URLs []string
// Mirror indicates that the repository is a mirror of remote.
Mirror bool
// insteadOfRulesApplied have urls been modified
insteadOfRulesApplied bool
@@ -583,7 +615,7 @@ func (c *RemoteConfig) Validate() error {
c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
}
return nil
return plumbing.NewRemoteHEADReferenceName(c.Name).Validate()
}
func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
@@ -602,6 +634,7 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
c.Name = c.raw.Name
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
c.Fetch = fetch
c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
return nil
}
@@ -634,6 +667,10 @@ func (c *RemoteConfig) marshal() *format.Subsection {
c.raw.SetOption(fetchKey, values...)
}
if c.Mirror {
c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror))
}
return c.raw
}

View File

@@ -64,7 +64,7 @@ func (s RefSpec) IsExactSHA1() bool {
return plumbing.IsHash(s.Src())
}
// Src return the src side.
// Src returns the src side.
func (s RefSpec) Src() string {
spec := string(s)

View File

@@ -0,0 +1,29 @@
package path_util
import (
"os"
"os/user"
"strings"
)
func ReplaceTildeWithHome(path string) (string, error) {
if strings.HasPrefix(path, "~") {
firstSlash := strings.Index(path, "/")
if firstSlash == 1 {
home, err := os.UserHomeDir()
if err != nil {
return path, err
}
return strings.Replace(path, "~", home, 1), nil
} else if firstSlash > 1 {
username := path[1:firstSlash]
userAccount, err := user.Lookup(username)
if err != nil {
return path, err
}
return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil
}
}
return path, nil
}

View File

@@ -322,6 +322,8 @@ func (p *Parser) parseAt() (Revisioner, error) {
}
return AtDate{t}, nil
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in @{<data>} structure`}
default:
date += lit
}
@@ -424,6 +426,8 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) {
p.unscan()
case tok != slash && start:
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in ^{<data>} structure`}
case tok != cbrace:
p.unscan()
re += lit

View File

@@ -5,8 +5,10 @@ import (
)
var (
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})(?:\/|:))?(?P<path>[^\\].*\/[^\\].*)$`)
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
// Ref: https://github.com/git/git/blob/master/Documentation/urls.txt#L37
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5}):)?(?P<path>[^\\].*)$`)
)
// MatchesScheme returns true if the given string matches a URL-like

Some files were not shown because too many files have changed in this diff Show More