mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-24 14:50:39 -05:00
Compare commits
15 Commits
benchmark-
...
v3.6.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e62e2e0f12 | ||
|
|
9cb973baac | ||
|
|
9e16bb9e29 | ||
|
|
641dac0a88 | ||
|
|
570ec0bf97 | ||
|
|
aaaf5cf5c4 | ||
|
|
fb8af22073 | ||
|
|
8c9f266ded | ||
|
|
f04f6ad470 | ||
|
|
c887947a85 | ||
|
|
ac8be264f0 | ||
|
|
2c18d5b010 | ||
|
|
44ee182aa3 | ||
|
|
d76cacd99f | ||
|
|
fb94f34a1f |
@@ -338,6 +338,7 @@ config = {
|
||||
"FRONTEND_READONLY_USER_ATTRIBUTES": "user.onPremisesSamAccountName,user.displayName,user.mail,user.passwordProfile,user.accountEnabled,user.appRoleAssignments",
|
||||
"OC_LDAP_SERVER_WRITE_ENABLED": False,
|
||||
"OC_EXCLUDE_RUN_SERVICES": "idm",
|
||||
"OC_LDAP_USER_ENABLED_ATTRIBUTE": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -350,7 +351,7 @@ config = {
|
||||
"part": {
|
||||
"skip": False,
|
||||
"totalParts": 4, # divide and run all suites in parts (divide pipelines)
|
||||
"xsuites": ["search", "app-provider", "app-provider-onlyOffice", "app-store", "keycloak", "oidc", "ocm", "a11y", "mobile-view"], # suites to skip
|
||||
"xsuites": ["search", "app-provider", "app-provider-onlyOffice", "app-store", "keycloak", "oidc", "ocm", "a11y", "mobile-view", "navigation"], # suites to skip
|
||||
},
|
||||
"search": {
|
||||
"skip": False,
|
||||
@@ -1076,6 +1077,7 @@ def localApiTests(name, suites, storage = "decomposed", extra_environment = {},
|
||||
"WITH_REMOTE_PHP": with_remote_php,
|
||||
"COLLABORATION_SERVICE_URL": "http://wopi-fakeoffice:9300",
|
||||
"OC_STORAGE_PATH": "$HOME/.opencloud/storage/users",
|
||||
"USE_BEARER_TOKEN": True,
|
||||
}
|
||||
|
||||
for item in extra_environment:
|
||||
|
||||
75
CHANGELOG.md
75
CHANGELOG.md
@@ -1,5 +1,80 @@
|
||||
# Changelog
|
||||
|
||||
## [3.6.0](https://github.com/opencloud-eu/opencloud/releases/tag/v3.6.0) - 2025-10-27
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@AlexAndBear, @ScharfViktor, @butonic, @dragonchaser, @fschade, @micbar, @prashant-gurung899, @rhafer, @schweigisito, @tammi-23
|
||||
|
||||
### 📈 Enhancement
|
||||
|
||||
- allow specifying a shutdown order [[#1622](https://github.com/opencloud-eu/opencloud/pull/1622)]
|
||||
- change: use 404 as status when thumbnail can not be fetched [[#1582](https://github.com/opencloud-eu/opencloud/pull/1582)]
|
||||
- feat: add dedicated logo (web) for mobile view to theme [[#1579](https://github.com/opencloud-eu/opencloud/pull/1579)]
|
||||
- feat: make it possible to start the collaboration service in the single process [[#1569](https://github.com/opencloud-eu/opencloud/pull/1569)]
|
||||
- introduce AppURLs helper for atomic backgroud updates [[#1542](https://github.com/opencloud-eu/opencloud/pull/1542)]
|
||||
- chore: add config for capability CheckForUpdates [[#1556](https://github.com/opencloud-eu/opencloud/pull/1556)]
|
||||
|
||||
### ✅ Tests
|
||||
|
||||
- [full-ci] feat: implement OIDC authentication option [[#1676](https://github.com/opencloud-eu/opencloud/pull/1676)]
|
||||
- apiTest-coverage for #1523 [[#1660](https://github.com/opencloud-eu/opencloud/pull/1660)]
|
||||
- [full-ci] deleted unused step definitions [[#1639](https://github.com/opencloud-eu/opencloud/pull/1639)]
|
||||
- check thumbnails in the share with me response [[#1605](https://github.com/opencloud-eu/opencloud/pull/1605)]
|
||||
- [full-ci][tests-only] fix restore browsers cache workflow [[#1615](https://github.com/opencloud-eu/opencloud/pull/1615)]
|
||||
- [full-ci] Enhance getSpaceByName: check local cache before Graph API calls [[#1574](https://github.com/opencloud-eu/opencloud/pull/1574)]
|
||||
- [full-ci] getting personal space by userId instead of userName [[#1553](https://github.com/opencloud-eu/opencloud/pull/1553)]
|
||||
- apiTest-flaky: sync share before checking [[#1550](https://github.com/opencloud-eu/opencloud/pull/1550)]
|
||||
- [decomposed] use Alpine for opencloud starting [[#1547](https://github.com/opencloud-eu/opencloud/pull/1547)]
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- fix: apply changes from other fixes in compose repo [[#1707](https://github.com/opencloud-eu/opencloud/pull/1707)]
|
||||
- fix(settings): env var precedence [[#1625](https://github.com/opencloud-eu/opencloud/pull/1625)]
|
||||
- fix(antivirus): update icap-client library which fixes tcp socket reuse [[#1589](https://github.com/opencloud-eu/opencloud/pull/1589)]
|
||||
- fix: use valid autocomplete values (axe autocomplete-valid) [[#1588](https://github.com/opencloud-eu/opencloud/pull/1588)]
|
||||
- Fix collaboration service name [[#1577](https://github.com/opencloud-eu/opencloud/pull/1577)]
|
||||
- let the runtime always create a cancel context [[#1565](https://github.com/opencloud-eu/opencloud/pull/1565)]
|
||||
- Bump reva and cs3apis [[#1538](https://github.com/opencloud-eu/opencloud/pull/1538)]
|
||||
- use correct endpoint in nats check [[#1533](https://github.com/opencloud-eu/opencloud/pull/1533)]
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- adr: use eduation api for multi-tenancy provisioning [[#1548](https://github.com/opencloud-eu/opencloud/pull/1548)]
|
||||
- fix: remove deprecated web ui feature "OpenAppsInTab" [[#1575](https://github.com/opencloud-eu/opencloud/pull/1575)]
|
||||
|
||||
### 📦️ Dependencies
|
||||
|
||||
- build(deps): bump github.com/onsi/ginkgo/v2 from 2.26.0 to 2.27.1 [[#1705](https://github.com/opencloud-eu/opencloud/pull/1705)]
|
||||
- [decomposed] bump-version-v3.6.0 [[#1719](https://github.com/opencloud-eu/opencloud/pull/1719)]
|
||||
- revaBump-2.39.1 [[#1718](https://github.com/opencloud-eu/opencloud/pull/1718)]
|
||||
- chore: bump reva [[#1701](https://github.com/opencloud-eu/opencloud/pull/1701)]
|
||||
- build(deps): bump github.com/kovidgoyal/imaging from 1.6.4 to 1.7.2 [[#1696](https://github.com/opencloud-eu/opencloud/pull/1696)]
|
||||
- build(deps): bump github.com/blevesearch/bleve/v2 from 2.5.3 to 2.5.4 [[#1697](https://github.com/opencloud-eu/opencloud/pull/1697)]
|
||||
- build(deps): bump golang.org/x/oauth2 from 0.31.0 to 0.32.0 [[#1634](https://github.com/opencloud-eu/opencloud/pull/1634)]
|
||||
- build(deps): bump golang.org/x/net from 0.44.0 to 0.46.0 [[#1638](https://github.com/opencloud-eu/opencloud/pull/1638)]
|
||||
- revaBumb: add groupware capabilities [[#1689](https://github.com/opencloud-eu/opencloud/pull/1689)]
|
||||
- revaUpdate: adding groupware capabilities [[#1659](https://github.com/opencloud-eu/opencloud/pull/1659)]
|
||||
- chore/bump-web-4.1.0 [[#1652](https://github.com/opencloud-eu/opencloud/pull/1652)]
|
||||
- build(deps): bump google.golang.org/grpc from 1.75.1 to 1.76.0 [[#1628](https://github.com/opencloud-eu/opencloud/pull/1628)]
|
||||
- build(deps): bump github.com/coreos/go-oidc/v3 from 3.15.0 to 3.16.0 [[#1627](https://github.com/opencloud-eu/opencloud/pull/1627)]
|
||||
- build(deps): bump github.com/grpc-ecosystem/grpc-gateway/v2 from 2.27.2 to 2.27.3 [[#1608](https://github.com/opencloud-eu/opencloud/pull/1608)]
|
||||
- build(deps): bump github.com/go-ldap/ldap/v3 from 3.4.11 to 3.4.12 [[#1609](https://github.com/opencloud-eu/opencloud/pull/1609)]
|
||||
- build(deps): bump google.golang.org/protobuf from 1.36.9 to 1.36.10 [[#1604](https://github.com/opencloud-eu/opencloud/pull/1604)]
|
||||
- build(deps): bump github.com/onsi/ginkgo/v2 from 2.25.3 to 2.26.0 [[#1603](https://github.com/opencloud-eu/opencloud/pull/1603)]
|
||||
- build(deps): bump github.com/nats-io/nats.go from 1.46.0 to 1.46.1 [[#1590](https://github.com/opencloud-eu/opencloud/pull/1590)]
|
||||
- build(deps): bump github.com/olekukonko/tablewriter from 1.0.9 to 1.1.0 [[#1584](https://github.com/opencloud-eu/opencloud/pull/1584)]
|
||||
- build(deps): bump github.com/open-policy-agent/opa from 1.8.0 to 1.9.0 [[#1576](https://github.com/opencloud-eu/opencloud/pull/1576)]
|
||||
- build(deps): bump github.com/nats-io/nats-server/v2 from 2.11.9 to 2.12.0 [[#1568](https://github.com/opencloud-eu/opencloud/pull/1568)]
|
||||
- build(deps): bump golang.org/x/net from 0.43.0 to 0.44.0 [[#1567](https://github.com/opencloud-eu/opencloud/pull/1567)]
|
||||
- reva bump. getting #327 [[#1555](https://github.com/opencloud-eu/opencloud/pull/1555)]
|
||||
- build(deps): bump golang.org/x/image from 0.30.0 to 0.31.0 [[#1552](https://github.com/opencloud-eu/opencloud/pull/1552)]
|
||||
- build(deps): bump github.com/nats-io/nats.go from 1.45.0 to 1.46.0 [[#1551](https://github.com/opencloud-eu/opencloud/pull/1551)]
|
||||
- build(deps): bump golang.org/x/crypto from 0.41.0 to 0.42.0 [[#1545](https://github.com/opencloud-eu/opencloud/pull/1545)]
|
||||
- build(deps): bump github.com/testcontainers/testcontainers-go/modules/opensearch from 0.38.0 to 0.39.0 [[#1544](https://github.com/opencloud-eu/opencloud/pull/1544)]
|
||||
- build(deps): bump github.com/open-policy-agent/opa from 1.6.0 to 1.8.0 [[#1510](https://github.com/opencloud-eu/opencloud/pull/1510)]
|
||||
- build(deps): bump google.golang.org/grpc from 1.75.0 to 1.75.1 [[#1534](https://github.com/opencloud-eu/opencloud/pull/1534)]
|
||||
|
||||
## [3.5.0](https://github.com/opencloud-eu/opencloud/releases/tag/v3.5.0) - 2025-09-22
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@@ -663,6 +663,7 @@
|
||||
"profile",
|
||||
"roles",
|
||||
"groups",
|
||||
"OpenCloudUnique_ID",
|
||||
"basic",
|
||||
"email"
|
||||
],
|
||||
@@ -2308,7 +2309,7 @@
|
||||
"always"
|
||||
],
|
||||
"usePasswordModifyExtendedOp": [
|
||||
"false"
|
||||
"true"
|
||||
],
|
||||
"trustEmail": [
|
||||
"false"
|
||||
|
||||
19
go.mod
19
go.mod
@@ -11,7 +11,7 @@ require (
|
||||
github.com/Nerzal/gocloak/v13 v13.9.0
|
||||
github.com/bbalet/stopwords v1.0.0
|
||||
github.com/beevik/etree v1.6.0
|
||||
github.com/blevesearch/bleve/v2 v2.5.3
|
||||
github.com/blevesearch/bleve/v2 v2.5.4
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20250908152307-4ca807afe54e
|
||||
@@ -48,7 +48,7 @@ require (
|
||||
github.com/jellydator/ttlcache/v3 v3.4.0
|
||||
github.com/jinzhu/now v1.1.5
|
||||
github.com/justinas/alice v1.2.0
|
||||
github.com/kovidgoyal/imaging v1.6.4
|
||||
github.com/kovidgoyal/imaging v1.7.2
|
||||
github.com/leonelquinteros/gotext v1.7.2
|
||||
github.com/libregraph/idm v0.5.0
|
||||
github.com/libregraph/lico v0.66.0
|
||||
@@ -60,12 +60,12 @@ require (
|
||||
github.com/oklog/run v1.2.0
|
||||
github.com/olekukonko/tablewriter v1.1.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.26.0
|
||||
github.com/onsi/ginkgo/v2 v2.27.1
|
||||
github.com/onsi/gomega v1.38.2
|
||||
github.com/open-policy-agent/opa v1.9.0
|
||||
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89
|
||||
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76
|
||||
github.com/opencloud-eu/reva/v2 v2.39.1-0.20251020192555-e3aa6a7d6d43
|
||||
github.com/opencloud-eu/reva/v2 v2.39.1
|
||||
github.com/opensearch-project/opensearch-go/v4 v4.5.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
@@ -104,7 +104,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.38.0
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
|
||||
golang.org/x/image v0.31.0
|
||||
golang.org/x/image v0.32.0
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/oauth2 v0.32.0
|
||||
golang.org/x/sync v0.17.0
|
||||
@@ -140,13 +140,13 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bitly/go-simplejson v0.5.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.22.0 // indirect
|
||||
github.com/blevesearch/bleve_index_api v1.2.8 // indirect
|
||||
github.com/blevesearch/bleve_index_api v1.2.10 // indirect
|
||||
github.com/blevesearch/geo v0.2.4 // indirect
|
||||
github.com/blevesearch/go-faiss v1.0.25 // indirect
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
||||
github.com/blevesearch/gtreap v0.1.1 // indirect
|
||||
github.com/blevesearch/mmap-go v1.0.4 // indirect
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.3.10 // indirect
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.3.12 // indirect
|
||||
github.com/blevesearch/segment v0.9.1 // indirect
|
||||
github.com/blevesearch/snowballstem v0.9.0 // indirect
|
||||
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
|
||||
@@ -156,7 +156,7 @@ require (
|
||||
github.com/blevesearch/zapx/v13 v13.4.2 // indirect
|
||||
github.com/blevesearch/zapx/v14 v14.4.2 // indirect
|
||||
github.com/blevesearch/zapx/v15 v15.4.2 // indirect
|
||||
github.com/blevesearch/zapx/v16 v16.2.4 // indirect
|
||||
github.com/blevesearch/zapx/v16 v16.2.6 // indirect
|
||||
github.com/bluele/gcache v0.0.2 // indirect
|
||||
github.com/bombsimon/logrusr/v3 v3.1.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
@@ -257,6 +257,7 @@ require (
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
|
||||
github.com/kovidgoyal/go-parallel v1.0.1 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 // indirect
|
||||
github.com/lestrrat-go/dsig v1.0.0 // indirect
|
||||
@@ -327,6 +328,7 @@ require (
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/russellhaering/goxmldsig v1.5.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/segmentio/kafka-go v0.4.49 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
@@ -378,7 +380,6 @@ require (
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/time v0.13.0 // indirect
|
||||
golang.org/x/tools v0.37.0 // indirect
|
||||
golang.org/x/tools/godoc v0.1.0-deprecated // indirect
|
||||
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||
|
||||
40
go.sum
40
go.sum
@@ -151,10 +151,10 @@ github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6
|
||||
github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=
|
||||
github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/blevesearch/bleve/v2 v2.5.3 h1:9l1xtKaETv64SZc1jc4Sy0N804laSa/LeMbYddq1YEM=
|
||||
github.com/blevesearch/bleve/v2 v2.5.3/go.mod h1:Z/e8aWjiq8HeX+nW8qROSxiE0830yQA071dwR3yoMzw=
|
||||
github.com/blevesearch/bleve_index_api v1.2.8 h1:Y98Pu5/MdlkRyLM0qDHostYo7i+Vv1cDNhqTeR4Sy6Y=
|
||||
github.com/blevesearch/bleve_index_api v1.2.8/go.mod h1:rKQDl4u51uwafZxFrPD1R7xFOwKnzZW7s/LSeK4lgo0=
|
||||
github.com/blevesearch/bleve/v2 v2.5.4 h1:1iur8e+PHsxtncV2xIVuqlQme/V8guEDO2uV6Wll3lQ=
|
||||
github.com/blevesearch/bleve/v2 v2.5.4/go.mod h1:yB4PnV4N2q5rTEpB2ndG8N2ISexBQEFIYgwx4ztfvoo=
|
||||
github.com/blevesearch/bleve_index_api v1.2.10 h1:FMFmZCmTX6PdoLLvwUnKF2RsmILFFwO3h0WPevXY9fE=
|
||||
github.com/blevesearch/bleve_index_api v1.2.10/go.mod h1:rKQDl4u51uwafZxFrPD1R7xFOwKnzZW7s/LSeK4lgo0=
|
||||
github.com/blevesearch/geo v0.2.4 h1:ECIGQhw+QALCZaDcogRTNSJYQXRtC8/m8IKiA706cqk=
|
||||
github.com/blevesearch/geo v0.2.4/go.mod h1:K56Q33AzXt2YExVHGObtmRSFYZKYGv0JEN5mdacJJR8=
|
||||
github.com/blevesearch/go-faiss v1.0.25 h1:lel1rkOUGbT1CJ0YgzKwC7k+XH0XVBHnCVWahdCXk4U=
|
||||
@@ -165,8 +165,8 @@ github.com/blevesearch/gtreap v0.1.1 h1:2JWigFrzDMR+42WGIN/V2p0cUvn4UP3C4Q5nmaZG
|
||||
github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk=
|
||||
github.com/blevesearch/mmap-go v1.0.4 h1:OVhDhT5B/M1HNPpYPBKIEJaD0F3Si+CrEKULGCDPWmc=
|
||||
github.com/blevesearch/mmap-go v1.0.4/go.mod h1:EWmEAOmdAS9z/pi/+Toxu99DnsbhG1TIxUoRmJw/pSs=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.3.10 h1:Yqk0XD1mE0fDZAJXTjawJ8If/85JxnLd8v5vG/jWE/s=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.3.10/go.mod h1:Z3e6ChN3qyN35yaQpl00MfI5s8AxUJbpTR/DL8QOQ+8=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.3.12 h1:GGZc2qwbyRBwtckPPkHkLyXw64mmsLJxdturBI1cM+c=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.3.12/go.mod h1:JBRGAneqgLSI2+jCNjtwMqp2B7EBF3/VUzgDPIU33MM=
|
||||
github.com/blevesearch/segment v0.9.1 h1:+dThDy+Lvgj5JMxhmOVlgFfkUtZV2kw49xax4+jTfSU=
|
||||
github.com/blevesearch/segment v0.9.1/go.mod h1:zN21iLm7+GnBHWTao9I+Au/7MBiL8pPFtJBJTsk6kQw=
|
||||
github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
|
||||
@@ -185,8 +185,8 @@ github.com/blevesearch/zapx/v14 v14.4.2 h1:2SGHakVKd+TrtEqpfeq8X+So5PShQ5nW6GNxT
|
||||
github.com/blevesearch/zapx/v14 v14.4.2/go.mod h1:rz0XNb/OZSMjNorufDGSpFpjoFKhXmppH9Hi7a877D8=
|
||||
github.com/blevesearch/zapx/v15 v15.4.2 h1:sWxpDE0QQOTjyxYbAVjt3+0ieu8NCE0fDRaFxEsp31k=
|
||||
github.com/blevesearch/zapx/v15 v15.4.2/go.mod h1:1pssev/59FsuWcgSnTa0OeEpOzmhtmr/0/11H0Z8+Nw=
|
||||
github.com/blevesearch/zapx/v16 v16.2.4 h1:tGgfvleXTAkwsD5mEzgM3zCS/7pgocTCnO1oyAUjlww=
|
||||
github.com/blevesearch/zapx/v16 v16.2.4/go.mod h1:Rti/REtuuMmzwsI8/C/qIzRaEoSK/wiFYw5e5ctUKKs=
|
||||
github.com/blevesearch/zapx/v16 v16.2.6 h1:OHuUl2GhM+FpBq9RwNsJ4k/QodqbMMHoQEgn/IHYpu8=
|
||||
github.com/blevesearch/zapx/v16 v16.2.6/go.mod h1:cuAPB+YoIyRngNhno1S1GPr9SfMk+x/SgAHBLXSIq3k=
|
||||
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
||||
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
@@ -361,8 +361,8 @@ github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BN
|
||||
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
|
||||
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
|
||||
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
|
||||
github.com/gkampitakis/go-snaps v0.5.14 h1:3fAqdB6BCPKHDMHAKRwtPUwYexKtGrNuw8HX/T/4neo=
|
||||
github.com/gkampitakis/go-snaps v0.5.14/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
|
||||
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
|
||||
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
|
||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
|
||||
github.com/go-acme/lego/v4 v4.4.0 h1:uHhU5LpOYQOdp3aDU+XY2bajseu8fuExphTL1Ss6/Fc=
|
||||
@@ -729,8 +729,10 @@ github.com/kolo/xmlrpc v0.0.0-20200310150728-e0350524596b/go.mod h1:o03bZfuBwAXH
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kovidgoyal/imaging v1.6.4 h1:K0idhRPXnRrJBKnBYcTfI1HTWSNDeAn7hYDvf9I0dCk=
|
||||
github.com/kovidgoyal/imaging v1.6.4/go.mod h1:bEIgsaZmXlvFfkv/CUxr9rJook6AQkJnpB5EPosRfRY=
|
||||
github.com/kovidgoyal/go-parallel v1.0.1 h1:nYUjN+EdpbmQjTg3N5eTUInuXTB3/1oD2vHdaMfuHoI=
|
||||
github.com/kovidgoyal/go-parallel v1.0.1/go.mod h1:BJNIbe6+hxyFWv7n6oEDPj3PA5qSw5OCtf0hcVxWJiw=
|
||||
github.com/kovidgoyal/imaging v1.7.2 h1:mmT6k6Az3mC6dbqdZ6Q9KQCdZFWTAQ+q97NyGZgJ/2c=
|
||||
github.com/kovidgoyal/imaging v1.7.2/go.mod h1:GdkCORjfZMMGFY0Pb7TDmRhj7PDhxF/QShKukSCj0VU=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -931,8 +933,8 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.26.0 h1:1J4Wut1IlYZNEAWIV3ALrT9NfiaGW2cDCJQSFQMs/gE=
|
||||
github.com/onsi/ginkgo/v2 v2.26.0/go.mod h1:qhEywmzWTBUY88kfO0BRvX4py7scov9yR+Az2oavUzw=
|
||||
github.com/onsi/ginkgo/v2 v2.27.1 h1:0LJC8MpUSQnfnp4n/3W3GdlmJP3ENGF0ZPzjQGLPP7s=
|
||||
github.com/onsi/ginkgo/v2 v2.27.1/go.mod h1:wmy3vCqiBjirARfVhAqFpYt8uvX0yaFe+GudAqqcCqA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
@@ -946,8 +948,8 @@ github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89 h1:W1ms+l
|
||||
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89/go.mod h1:vigJkNss1N2QEceCuNw/ullDehncuJNFB6mEnzfq9UI=
|
||||
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76 h1:vD/EdfDUrv4omSFjrinT8Mvf+8D7f9g4vgQ2oiDrVUI=
|
||||
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76/go.mod h1:pzatilMEHZFT3qV7C/X3MqOa3NlRQuYhlRhZTL+hN6Q=
|
||||
github.com/opencloud-eu/reva/v2 v2.39.1-0.20251020192555-e3aa6a7d6d43 h1:GQWk2gk8BcbIVoEysI3QRVfARaZJG9jMOpNTKSIr/hY=
|
||||
github.com/opencloud-eu/reva/v2 v2.39.1-0.20251020192555-e3aa6a7d6d43/go.mod h1:rWCkqbdtVGVcZLZ2uw2kLGGjGnK8NTXfy9y0+rMyL8M=
|
||||
github.com/opencloud-eu/reva/v2 v2.39.1 h1:nJ6he/geQcS7EES6WmyZ5TWOA9EkqgL9MgXU1Nns/to=
|
||||
github.com/opencloud-eu/reva/v2 v2.39.1/go.mod h1:4CgDhO6Pc+HLdNI7+Rep8N0bc7qP9novdcv764IMpmM=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
@@ -1079,6 +1081,8 @@ github.com/russellhaering/goxmldsig v1.5.0/go.mod h1:x98CjQNFJcWfMxeOrMnMKg70lvD
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sacloud/libsacloud v1.36.2/go.mod h1:P7YAOVmnIn3DKHqCZcUKYUXmSwGBm3yS7IBEjKVSrjg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210127161313-bd30bebeac4f/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
|
||||
@@ -1360,8 +1364,8 @@ golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScy
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
|
||||
golang.org/x/image v0.31.0 h1:mLChjE2MV6g1S7oqbXC0/UcKijjm5fnJLUYKIYrLESA=
|
||||
golang.org/x/image v0.31.0/go.mod h1:R9ec5Lcp96v9FTF+ajwaH3uGxPH4fKfHHAVbUILxghA=
|
||||
golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ=
|
||||
golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
||||
@@ -2,7 +2,6 @@ package command
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
@@ -11,11 +10,8 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
@@ -68,15 +64,8 @@ func BenchmarkClientCommand(cfg *config.Config) *cli.Command {
|
||||
&cli.StringFlag{
|
||||
Name: "data",
|
||||
Aliases: []string{"d"},
|
||||
Usage: "Sends the specified data in a POST request to the HTTP server, in the same way that a browser does when a user has filled in an HTML form and presses the submit button. If you start the data with the letter @, the rest should be a file name to read the data from, or - if you want to read the data from stdin. When -d, --data is told to read from a file like that, carriage returns and newlines are stripped out. If you do not want the @ character to have a special interpretation use --data-raw instead.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "data-binary",
|
||||
Usage: "This posts data exactly as specified with no extra processing whatsoever. If you start the data with the letter @, the rest should be a file name to read the data from, or - if you want to read the data from stdin.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "data-raw",
|
||||
Usage: "Sends the specified data in a request to the HTTP server.",
|
||||
Usage: "Sends the specified data in a request to the HTTP server.",
|
||||
// TODE support multiple data flags, support data-binary, data-raw
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "header",
|
||||
@@ -118,97 +107,14 @@ func BenchmarkClientCommand(cfg *config.Config) *cli.Command {
|
||||
},
|
||||
Category: "benchmark",
|
||||
Action: func(c *cli.Context) error {
|
||||
// Set up signal handling for Ctrl+C
|
||||
ctx, cancel := context.WithCancel(c.Context)
|
||||
defer cancel()
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sigChan
|
||||
fmt.Println("\nReceived interrupt signal, shutting down...")
|
||||
cancel()
|
||||
}()
|
||||
|
||||
opt := clientOptions{
|
||||
request: c.String("request"),
|
||||
url: c.Args().First(),
|
||||
insecure: c.Bool("insecure"),
|
||||
jobs: c.Int("jobs"),
|
||||
headers: make(map[string]string),
|
||||
data: []byte(c.String("data")),
|
||||
}
|
||||
|
||||
if d := c.String("data-raw"); d != "" {
|
||||
opt.request = "POST"
|
||||
opt.headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
opt.data = []byte(d)
|
||||
}
|
||||
|
||||
if d := c.String("data"); d != "" {
|
||||
opt.request = "POST"
|
||||
opt.headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
if strings.HasPrefix(d, "@") {
|
||||
filePath := strings.TrimPrefix(d, "@")
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
// read from file or stdin and trim trailing newlines
|
||||
if filePath == "-" {
|
||||
data, err = os.ReadFile("/dev/stdin")
|
||||
} else {
|
||||
data, err = os.ReadFile(filePath)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(errors.New("could not read data from file '" + filePath + "': " + err.Error()))
|
||||
}
|
||||
|
||||
// clean byte array similar to curl's --data parameter
|
||||
// It removes leading/trailing whitespace and converts line breaks to spaces
|
||||
|
||||
// Trim leading and trailing whitespace
|
||||
data = bytes.TrimSpace(data)
|
||||
|
||||
// Replace newlines and carriage returns with spaces
|
||||
data = bytes.ReplaceAll(data, []byte("\r\n"), []byte(" "))
|
||||
data = bytes.ReplaceAll(data, []byte("\n"), []byte(" "))
|
||||
data = bytes.ReplaceAll(data, []byte("\r"), []byte(" "))
|
||||
|
||||
// Replace multiple spaces with single space
|
||||
for bytes.Contains(data, []byte(" ")) {
|
||||
data = bytes.ReplaceAll(data, []byte(" "), []byte(" "))
|
||||
}
|
||||
|
||||
opt.data = data
|
||||
} else {
|
||||
opt.data = []byte(d)
|
||||
}
|
||||
}
|
||||
|
||||
if d := c.String("data-binary"); d != "" {
|
||||
opt.request = "POST"
|
||||
opt.headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
if strings.HasPrefix(d, "@") {
|
||||
filePath := strings.TrimPrefix(d, "@")
|
||||
var data []byte
|
||||
var err error
|
||||
if filePath == "-" {
|
||||
data, err = os.ReadFile("/dev/stdin")
|
||||
} else {
|
||||
data, err = os.ReadFile(filePath)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(errors.New("could not read data from file '" + filePath + "': " + err.Error()))
|
||||
}
|
||||
opt.data = data
|
||||
} else {
|
||||
opt.data = []byte(d)
|
||||
}
|
||||
}
|
||||
|
||||
// override method if specified
|
||||
if request := c.String("request"); request != "" {
|
||||
opt.request = request
|
||||
}
|
||||
|
||||
if opt.url == "" {
|
||||
log.Fatal(errors.New("no URL specified"))
|
||||
}
|
||||
@@ -273,7 +179,7 @@ func BenchmarkClientCommand(cfg *config.Config) *cli.Command {
|
||||
defer opt.ticker.Stop()
|
||||
}
|
||||
|
||||
return client(ctx, opt)
|
||||
return client(opt)
|
||||
|
||||
},
|
||||
}
|
||||
@@ -291,19 +197,16 @@ type clientOptions struct {
|
||||
jobs int
|
||||
}
|
||||
|
||||
func client(ctx context.Context, o clientOptions) error {
|
||||
func client(o clientOptions) error {
|
||||
|
||||
type stat struct {
|
||||
job int
|
||||
duration time.Duration
|
||||
status int
|
||||
}
|
||||
stats := make(chan stat)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < o.jobs; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
@@ -314,13 +217,6 @@ func client(ctx context.Context, o clientOptions) error {
|
||||
|
||||
cookies := map[string]*http.Cookie{}
|
||||
for {
|
||||
// Check if context is cancelled
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(o.request, o.url, bytes.NewReader(o.data))
|
||||
if err != nil {
|
||||
log.Printf("client %d: could not create request: %s\n", i, err)
|
||||
@@ -338,35 +234,20 @@ func client(ctx context.Context, o clientOptions) error {
|
||||
res, err := client.Do(req)
|
||||
duration := -time.Until(start)
|
||||
if err != nil {
|
||||
// Check if error is due to context cancellation
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("client %d: could not create request: %s\n", i, err)
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
res.Body.Close()
|
||||
select {
|
||||
case stats <- stat{
|
||||
stats <- stat{
|
||||
job: i,
|
||||
duration: duration,
|
||||
status: res.StatusCode,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
for _, c := range res.Cookies() {
|
||||
cookies[c.Name] = c
|
||||
}
|
||||
}
|
||||
// Sleep with context awareness
|
||||
if o.rateDelay > duration {
|
||||
select {
|
||||
case <-time.After(o.rateDelay - duration):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
time.Sleep(o.rateDelay - duration)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
@@ -375,15 +256,9 @@ func client(ctx context.Context, o clientOptions) error {
|
||||
if o.ticker == nil {
|
||||
// no ticker, just write every request
|
||||
for {
|
||||
select {
|
||||
case stat := <-stats:
|
||||
numRequests++
|
||||
fmt.Printf("req %d took %v and returned status %d\n", numRequests, stat.duration, stat.status)
|
||||
case <-ctx.Done():
|
||||
fmt.Println("\nShutting down...")
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
stat := <-stats
|
||||
numRequests++
|
||||
fmt.Printf("req %d took %v and returned status %d\n", numRequests, stat.duration, stat.status)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,13 +274,6 @@ func client(ctx context.Context, o clientOptions) error {
|
||||
numRequests = 0
|
||||
duration = 0
|
||||
}
|
||||
case <-ctx.Done():
|
||||
if numRequests > 0 {
|
||||
fmt.Printf("\n%d req at %v/req\n", numRequests, duration/time.Duration(numRequests))
|
||||
}
|
||||
fmt.Println("Shutting down...")
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -521,6 +521,24 @@ func trapShutdownCtx(s *Service, srv *http.Server, ctx context.Context) error {
|
||||
s.Log.Debug().Msg("runtime listener shutdown done")
|
||||
}()
|
||||
|
||||
// shutdown services in the order defined in the config
|
||||
// any services not listed will be shutdown in parallel afterwards
|
||||
for _, sName := range s.cfg.Runtime.ShutdownOrder {
|
||||
if _, ok := s.serviceToken[sName]; !ok {
|
||||
s.Log.Warn().Str("service", sName).Msg("unknown service for ordered shutdown, skipping")
|
||||
continue
|
||||
}
|
||||
for i := range s.serviceToken[sName] {
|
||||
if err := s.Supervisor.RemoveAndWait(s.serviceToken[sName][i], _defaultShutdownTimeoutDuration); err != nil && !errors.Is(err, suture.ErrSupervisorNotRunning) {
|
||||
s.Log.Error().Err(err).Str("service", sName).Msg("could not shutdown service in order, skipping to next")
|
||||
// continue shutting down other services
|
||||
continue
|
||||
}
|
||||
s.Log.Debug().Str("service", sName).Msg("graceful ordered shutdown for service done")
|
||||
}
|
||||
delete(s.serviceToken, sName)
|
||||
}
|
||||
|
||||
for sName := range s.serviceToken {
|
||||
for i := range s.serviceToken[sName] {
|
||||
wg.Add(1)
|
||||
|
||||
@@ -50,11 +50,12 @@ type Mode int
|
||||
|
||||
// Runtime configures the OpenCloud runtime when running in supervised mode.
|
||||
type Runtime struct {
|
||||
Port string `yaml:"port" env:"OC_RUNTIME_PORT" desc:"The TCP port at which OpenCloud will be available" introductionVersion:"1.0.0"`
|
||||
Host string `yaml:"host" env:"OC_RUNTIME_HOST" desc:"The host at which OpenCloud will be available" introductionVersion:"1.0.0"`
|
||||
Services []string `yaml:"services" env:"OC_RUN_EXTENSIONS;OC_RUN_SERVICES" desc:"A comma-separated list of service names. Will start only the listed services." introductionVersion:"1.0.0"`
|
||||
Disabled []string `yaml:"disabled_services" env:"OC_EXCLUDE_RUN_SERVICES" desc:"A comma-separated list of service names. Will start all default services except of the ones listed. Has no effect when OC_RUN_SERVICES is set." introductionVersion:"1.0.0"`
|
||||
Additional []string `yaml:"add_services" env:"OC_ADD_RUN_SERVICES" desc:"A comma-separated list of service names. Will add the listed services to the default configuration. Has no effect when OC_RUN_SERVICES is set. Note that one can add services not started by the default list and exclude services from the default list by using both envvars at the same time." introductionVersion:"1.0.0"`
|
||||
Port string `yaml:"port" env:"OC_RUNTIME_PORT" desc:"The TCP port at which OpenCloud will be available" introductionVersion:"1.0.0"`
|
||||
Host string `yaml:"host" env:"OC_RUNTIME_HOST" desc:"The host at which OpenCloud will be available" introductionVersion:"1.0.0"`
|
||||
Services []string `yaml:"services" env:"OC_RUN_EXTENSIONS;OC_RUN_SERVICES" desc:"A comma-separated list of service names. Will start only the listed services." introductionVersion:"1.0.0"`
|
||||
Disabled []string `yaml:"disabled_services" env:"OC_EXCLUDE_RUN_SERVICES" desc:"A comma-separated list of service names. Will start all default services except of the ones listed. Has no effect when OC_RUN_SERVICES is set." introductionVersion:"1.0.0"`
|
||||
Additional []string `yaml:"add_services" env:"OC_ADD_RUN_SERVICES" desc:"A comma-separated list of service names. Will add the listed services to the default configuration. Has no effect when OC_RUN_SERVICES is set. Note that one can add services not started by the default list and exclude services from the default list by using both envvars at the same time." introductionVersion:"1.0.0"`
|
||||
ShutdownOrder []string `yaml:"shutdown_order" env:"OC_SHUTDOWN_ORDER" desc:"A comma-separated list of service names defining the order in which services are shut down. Services not listed will be stopped after the listed ones in random order." introductionVersion:"%%NEXT%%"`
|
||||
}
|
||||
|
||||
// Config combines all available configuration parts.
|
||||
|
||||
@@ -50,8 +50,9 @@ func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
OpenCloudURL: "https://localhost:9200",
|
||||
Runtime: Runtime{
|
||||
Port: "9250",
|
||||
Host: "localhost",
|
||||
Port: "9250",
|
||||
Host: "localhost",
|
||||
ShutdownOrder: []string{"proxy"},
|
||||
},
|
||||
Reva: &shared.Reva{
|
||||
Address: "eu.opencloud.api.gateway",
|
||||
|
||||
@@ -16,7 +16,7 @@ var (
|
||||
// LatestTag is the latest released version plus the dev meta version.
|
||||
// Will be overwritten by the release pipeline
|
||||
// Needs a manual change for every tagged release
|
||||
LatestTag = "3.5.0+dev"
|
||||
LatestTag = "3.6.0+dev"
|
||||
|
||||
// Date indicates the build date.
|
||||
// This has been removed, it looks like you can only replace static strings with recent go versions
|
||||
|
||||
@@ -11,7 +11,7 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: \n"
|
||||
"Report-Msgid-Bugs-To: EMAIL\n"
|
||||
"POT-Creation-Date: 2025-10-05 00:01+0000\n"
|
||||
"POT-Creation-Date: 2025-10-26 00:00+0000\n"
|
||||
"PO-Revision-Date: 2025-01-27 10:17+0000\n"
|
||||
"Last-Translator: Stephan Paternotte <stephan@paternottes.net>, 2025\n"
|
||||
"Language-Team: Dutch (https://app.transifex.com/opencloud-eu/teams/204053/nl/)\n"
|
||||
|
||||
@@ -11,7 +11,7 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: \n"
|
||||
"Report-Msgid-Bugs-To: EMAIL\n"
|
||||
"POT-Creation-Date: 2025-10-05 00:01+0000\n"
|
||||
"POT-Creation-Date: 2025-10-26 00:00+0000\n"
|
||||
"PO-Revision-Date: 2025-01-27 10:17+0000\n"
|
||||
"Last-Translator: Stephan Paternotte <stephan@paternottes.net>, 2025\n"
|
||||
"Language-Team: Dutch (https://app.transifex.com/opencloud-eu/teams/204053/nl/)\n"
|
||||
|
||||
@@ -12,7 +12,7 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: \n"
|
||||
"Report-Msgid-Bugs-To: EMAIL\n"
|
||||
"POT-Creation-Date: 2025-10-07 00:00+0000\n"
|
||||
"POT-Creation-Date: 2025-10-27 00:01+0000\n"
|
||||
"PO-Revision-Date: 2025-01-27 10:17+0000\n"
|
||||
"Last-Translator: Lulufox, 2025\n"
|
||||
"Language-Team: Russian (https://app.transifex.com/opencloud-eu/teams/204053/ru/)\n"
|
||||
|
||||
@@ -11,7 +11,7 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: \n"
|
||||
"Report-Msgid-Bugs-To: EMAIL\n"
|
||||
"POT-Creation-Date: 2025-10-05 00:01+0000\n"
|
||||
"POT-Creation-Date: 2025-10-26 00:00+0000\n"
|
||||
"PO-Revision-Date: 2025-01-27 10:17+0000\n"
|
||||
"Last-Translator: Stephan Paternotte <stephan@paternottes.net>, 2025\n"
|
||||
"Language-Team: Dutch (https://app.transifex.com/opencloud-eu/teams/204053/nl/)\n"
|
||||
|
||||
@@ -197,34 +197,6 @@ class GraphHelper {
|
||||
return $baseUrl . '/graph/v1beta1/' . $path;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param string $baseUrl
|
||||
* @param string $xRequestId
|
||||
* @param string $method
|
||||
* @param string $path
|
||||
* @param string|null $body
|
||||
* @param array|null $headers
|
||||
*
|
||||
* @return RequestInterface
|
||||
*/
|
||||
public static function createRequest(
|
||||
string $baseUrl,
|
||||
string $xRequestId,
|
||||
string $method,
|
||||
string $path,
|
||||
?string $body = null,
|
||||
?array $headers = []
|
||||
): RequestInterface {
|
||||
$fullUrl = self::getFullUrl($baseUrl, $path);
|
||||
return HttpRequestHelper::createRequest(
|
||||
$fullUrl,
|
||||
$xRequestId,
|
||||
$method,
|
||||
$headers,
|
||||
$body
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param string $baseUrl
|
||||
* @param string $xRequestId
|
||||
@@ -1908,7 +1880,7 @@ class GraphHelper {
|
||||
string $permissionsId
|
||||
): ResponseInterface {
|
||||
$url = self::getBetaFullUrl($baseUrl, "drives/$spaceId/items/$itemId/permissions/$permissionsId");
|
||||
return HttpRequestHelper::sendRequestOnce(
|
||||
return HttpRequestHelper::sendRequest(
|
||||
$url,
|
||||
$xRequestId,
|
||||
'PATCH',
|
||||
@@ -2264,7 +2236,7 @@ class GraphHelper {
|
||||
): ResponseInterface {
|
||||
$url = self::getBetaFullUrl($baseUrl, "drives/$spaceId/root/permissions/$permissionsId");
|
||||
|
||||
return HttpRequestHelper::sendRequestOnce(
|
||||
return HttpRequestHelper::sendRequest(
|
||||
$url,
|
||||
$xRequestId,
|
||||
'PATCH',
|
||||
|
||||
@@ -74,6 +74,7 @@ class HttpRequestHelper {
|
||||
* than download it all up-front.
|
||||
* @param int|null $timeout
|
||||
* @param Client|null $client
|
||||
* @param string|null $bearerToken
|
||||
*
|
||||
* @return ResponseInterface
|
||||
* @throws GuzzleException
|
||||
@@ -90,7 +91,8 @@ class HttpRequestHelper {
|
||||
?CookieJar $cookies = null,
|
||||
bool $stream = false,
|
||||
?int $timeout = 0,
|
||||
?Client $client = null
|
||||
?Client $client = null,
|
||||
?string $bearerToken = null
|
||||
): ResponseInterface {
|
||||
if ($client === null) {
|
||||
$client = self::createClient(
|
||||
@@ -99,7 +101,8 @@ class HttpRequestHelper {
|
||||
$config,
|
||||
$cookies,
|
||||
$stream,
|
||||
$timeout
|
||||
$timeout,
|
||||
$bearerToken
|
||||
);
|
||||
}
|
||||
|
||||
@@ -200,6 +203,13 @@ class HttpRequestHelper {
|
||||
} else {
|
||||
$debugResponses = false;
|
||||
}
|
||||
// use basic auth for 'public' user or no user
|
||||
if ($user === 'public' || $user === null || $user === '') {
|
||||
$bearerToken = null;
|
||||
} else {
|
||||
$useBearerToken = TokenHelper::useBearerToken();
|
||||
$bearerToken = $useBearerToken ? TokenHelper::getTokens($user, $password, $url)['access_token'] : null;
|
||||
}
|
||||
|
||||
$sendRetryLimit = self::numRetriesOnHttpTooEarly();
|
||||
$sendCount = 0;
|
||||
@@ -217,7 +227,8 @@ class HttpRequestHelper {
|
||||
$cookies,
|
||||
$stream,
|
||||
$timeout,
|
||||
$client
|
||||
$client,
|
||||
$bearerToken,
|
||||
);
|
||||
|
||||
if ($response->getStatusCode() >= 400
|
||||
@@ -348,6 +359,7 @@ class HttpRequestHelper {
|
||||
* @param bool $stream Set to true to stream a response rather
|
||||
* than download it all up-front.
|
||||
* @param int|null $timeout
|
||||
* @param string|null $bearerToken
|
||||
*
|
||||
* @return Client
|
||||
*/
|
||||
@@ -357,10 +369,13 @@ class HttpRequestHelper {
|
||||
?array $config = null,
|
||||
?CookieJar $cookies = null,
|
||||
?bool $stream = false,
|
||||
?int $timeout = 0
|
||||
?int $timeout = 0,
|
||||
?string $bearerToken = null
|
||||
): Client {
|
||||
$options = [];
|
||||
if ($user !== null) {
|
||||
if ($bearerToken !== null) {
|
||||
$options['headers']['Authorization'] = 'Bearer ' . $bearerToken;
|
||||
} elseif ($user !== null) {
|
||||
$options['auth'] = [$user, $password];
|
||||
}
|
||||
if ($config !== null) {
|
||||
|
||||
403
tests/acceptance/TestHelpers/TokenHelper.php
Normal file
403
tests/acceptance/TestHelpers/TokenHelper.php
Normal file
@@ -0,0 +1,403 @@
|
||||
<?php
|
||||
/**
|
||||
* @author Viktor Scharf <v.scharf@opencloud.eu>
|
||||
* @copyright Copyright (c) 2025 Viktor Scharf <v.scharf@opencloud.eu>
|
||||
*
|
||||
* This code is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License,
|
||||
* as published by the Free Software Foundation;
|
||||
* either version 3 of the License, or any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*
|
||||
*/
|
||||
|
||||
namespace TestHelpers;
|
||||
|
||||
use GuzzleHttp\Client;
|
||||
use GuzzleHttp\Cookie\CookieJar;
|
||||
use GuzzleHttp\Exception\GuzzleException;
|
||||
use Exception;
|
||||
|
||||
/**
|
||||
* Helper for obtaining bearer tokens for users
|
||||
*/
|
||||
class TokenHelper {
|
||||
private const LOGON_URL = '/signin/v1/identifier/_/logon';
|
||||
private const REDIRECT_URL = '/oidc-callback.html';
|
||||
private const TOKEN_URL = '/konnect/v1/token';
|
||||
|
||||
// Static cache [username => token_data]
|
||||
private static array $tokenCache = [];
|
||||
|
||||
/**
|
||||
* @return bool
|
||||
*/
|
||||
public static function useBearerToken(): bool {
|
||||
return \getenv('USE_BEARER_TOKEN') === 'true';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts base URL from a full URL
|
||||
*
|
||||
* @param string $url
|
||||
*
|
||||
* @return string the base URL
|
||||
*/
|
||||
private static function extractBaseUrl(string $url): string {
|
||||
return preg_replace('#(https?://[^/]+).*#', '$1', $url);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get access and refresh tokens for a user
|
||||
* Uses cache to avoid unnecessary requests
|
||||
*
|
||||
* @param string $username
|
||||
* @param string $password
|
||||
* @param string $url
|
||||
*
|
||||
* @return array ['access_token' => string, 'refresh_token' => string, 'expires_at' => int]
|
||||
* @throws GuzzleException
|
||||
* @throws Exception
|
||||
*/
|
||||
public static function getTokens(string $username, string $password, string $url): array {
|
||||
// Extract base URL. I need to send $url to get correct server in case of multiple servers (ocm suite)
|
||||
$baseUrl = self::extractBaseUrl($url);
|
||||
$cacheKey = $username . '|' . $baseUrl;
|
||||
|
||||
// Check cache
|
||||
if (isset(self::$tokenCache[$cacheKey])) {
|
||||
$cachedToken = self::$tokenCache[$cacheKey];
|
||||
|
||||
// Check if access token has expired
|
||||
if (time() < $cachedToken['expires_at']) {
|
||||
return $cachedToken;
|
||||
}
|
||||
|
||||
$refreshedToken = self::refreshToken($cachedToken['refresh_token'], $baseUrl);
|
||||
$tokenData = [
|
||||
'access_token' => $refreshedToken['access_token'],
|
||||
'refresh_token' => $refreshedToken['refresh_token'],
|
||||
'expires_at' => time() + 300 // 5 minutes
|
||||
];
|
||||
self::$tokenCache[$cacheKey] = $tokenData;
|
||||
return $tokenData;
|
||||
}
|
||||
|
||||
// Get new tokens
|
||||
$cookieJar = new CookieJar();
|
||||
|
||||
$continueUrl = self::getAuthorizedEndPoint($username, $password, $baseUrl, $cookieJar);
|
||||
$code = self::getCode($continueUrl, $baseUrl, $cookieJar);
|
||||
$tokens = self::getToken($code, $baseUrl, $cookieJar);
|
||||
|
||||
$tokenData = [
|
||||
'access_token' => $tokens['access_token'],
|
||||
'refresh_token' => $tokens['refresh_token'],
|
||||
'expires_at' => time() + 290 // set expiry to 290 seconds to allow for some buffer
|
||||
];
|
||||
|
||||
// Save to cache
|
||||
self::$tokenCache[$cacheKey] = $tokenData;
|
||||
|
||||
return $tokenData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh token
|
||||
*
|
||||
* @param string $refreshToken
|
||||
* @param string $baseUrl
|
||||
*
|
||||
* @return array
|
||||
* @throws GuzzleException
|
||||
* @throws Exception
|
||||
*/
|
||||
private static function refreshToken(string $refreshToken, string $baseUrl): array {
|
||||
$client = new Client(
|
||||
[
|
||||
'verify' => false,
|
||||
'http_errors' => false,
|
||||
'allow_redirects' => false
|
||||
]
|
||||
);
|
||||
|
||||
$response = $client->post(
|
||||
$baseUrl . self::TOKEN_URL,
|
||||
[
|
||||
'form_params' => [
|
||||
'client_id' => 'web',
|
||||
'refresh_token' => $refreshToken,
|
||||
'grant_type' => 'refresh_token'
|
||||
]
|
||||
]
|
||||
);
|
||||
|
||||
if ($response->getStatusCode() !== 200) {
|
||||
throw new Exception(
|
||||
\sprintf(
|
||||
'Token refresh failed: Expected status code 200 but received %d. Message: %s',
|
||||
$response->getStatusCode(),
|
||||
$response->getReasonPhrase()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
$data = json_decode($response->getBody()->getContents(), true);
|
||||
|
||||
if (!isset($data['access_token']) || !isset($data['refresh_token'])) {
|
||||
throw new Exception('Missing tokens in refresh response');
|
||||
}
|
||||
|
||||
return [
|
||||
'access_token' => $data['access_token'],
|
||||
'refresh_token' => $data['refresh_token']
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached tokens for a specific user
|
||||
*
|
||||
* @param string $username
|
||||
* @param string $url
|
||||
*
|
||||
* @return void
|
||||
*/
|
||||
public static function clearUserTokens(string $username, string $url): void {
|
||||
$baseUrl = self::extractBaseUrl($url);
|
||||
$cacheKey = $username . '|' . $baseUrl;
|
||||
unset(self::$tokenCache[$cacheKey]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all cached tokens
|
||||
*
|
||||
* @return void
|
||||
*/
|
||||
public static function clearAllTokens(): void {
|
||||
self::$tokenCache = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* @param string $username
|
||||
* @param string $password
|
||||
* @param string $baseUrl
|
||||
* @param CookieJar $cookieJar
|
||||
*
|
||||
* @return \Psr\Http\Message\ResponseInterface
|
||||
* @throws GuzzleException
|
||||
*/
|
||||
public static function makeLoginRequest(
|
||||
string $username,
|
||||
string $password,
|
||||
string $baseUrl,
|
||||
CookieJar $cookieJar
|
||||
): \Psr\Http\Message\ResponseInterface {
|
||||
$client = new Client(
|
||||
[
|
||||
'verify' => false,
|
||||
'http_errors' => false,
|
||||
'allow_redirects' => false,
|
||||
'cookies' => $cookieJar
|
||||
]
|
||||
);
|
||||
|
||||
return $client->post(
|
||||
$baseUrl . self::LOGON_URL,
|
||||
[
|
||||
'headers' => [
|
||||
'Kopano-Konnect-XSRF' => '1',
|
||||
'Referer' => $baseUrl,
|
||||
'Content-Type' => 'application/json'
|
||||
],
|
||||
'json' => [
|
||||
'params' => [$username, $password, '1'],
|
||||
'hello' => [
|
||||
'scope' => 'openid profile offline_access email',
|
||||
'client_id' => 'web',
|
||||
'redirect_uri' => $baseUrl . self::REDIRECT_URL,
|
||||
'flow' => 'oidc'
|
||||
]
|
||||
]
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Step 1: Login and get continue_uri
|
||||
*
|
||||
* @param string $username
|
||||
* @param string $password
|
||||
* @param string $baseUrl
|
||||
* @param CookieJar $cookieJar
|
||||
*
|
||||
* @return string
|
||||
* @throws GuzzleException
|
||||
* @throws Exception
|
||||
*/
|
||||
private static function getAuthorizedEndPoint(
|
||||
string $username,
|
||||
string $password,
|
||||
string $baseUrl,
|
||||
CookieJar $cookieJar
|
||||
): string {
|
||||
$response = self::makeLoginRequest($username, $password, $baseUrl, $cookieJar);
|
||||
|
||||
if ($response->getStatusCode() !== 200) {
|
||||
throw new Exception(
|
||||
\sprintf(
|
||||
'Logon failed: Expected status code 200 but received %d. Message: %s',
|
||||
$response->getStatusCode(),
|
||||
$response->getReasonPhrase()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
$data = json_decode($response->getBody()->getContents(), true);
|
||||
|
||||
if (!isset($data['hello']['continue_uri'])) {
|
||||
throw new Exception('Missing continue_uri in logon response');
|
||||
}
|
||||
|
||||
return $data['hello']['continue_uri'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Step 2: Authorization and get code
|
||||
*
|
||||
* @param string $continueUrl
|
||||
* @param string $baseUrl
|
||||
* @param CookieJar $cookieJar
|
||||
*
|
||||
* @return string
|
||||
* @throws GuzzleException
|
||||
* @throws Exception
|
||||
*/
|
||||
private static function getCode(string $continueUrl, string $baseUrl, CookieJar $cookieJar): string {
|
||||
$client = new Client(
|
||||
[
|
||||
'verify' => false,
|
||||
'http_errors' => false,
|
||||
'allow_redirects' => false, // Disable automatic redirects
|
||||
'cookies' => $cookieJar
|
||||
]
|
||||
);
|
||||
|
||||
$params = [
|
||||
'client_id' => 'web',
|
||||
'prompt' => 'none',
|
||||
'redirect_uri' => $baseUrl . self::REDIRECT_URL,
|
||||
'response_mode' => 'query',
|
||||
'response_type' => 'code',
|
||||
'scope' => 'openid profile offline_access email'
|
||||
];
|
||||
|
||||
$response = $client->get(
|
||||
$continueUrl,
|
||||
[
|
||||
'query' => $params
|
||||
]
|
||||
);
|
||||
|
||||
if ($response->getStatusCode() !== 302) {
|
||||
// Add debugging to understand what is happening
|
||||
$body = $response->getBody()->getContents();
|
||||
throw new Exception(
|
||||
\sprintf(
|
||||
'Authorization failed: Expected status code 302 but received %d. Message: %s. Body: %s',
|
||||
$response->getStatusCode(),
|
||||
$response->getReasonPhrase(),
|
||||
$body
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
$location = $response->getHeader('Location')[0] ?? '';
|
||||
|
||||
if (empty($location)) {
|
||||
throw new Exception('Missing Location header in authorization response');
|
||||
}
|
||||
|
||||
parse_str(parse_url($location, PHP_URL_QUERY), $queryParams);
|
||||
|
||||
// Check for errors
|
||||
if (isset($queryParams['error'])) {
|
||||
throw new Exception(
|
||||
\sprintf(
|
||||
'Authorization error: %s - %s',
|
||||
$queryParams['error'],
|
||||
urldecode($queryParams['error_description'] ?? 'No description')
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (!isset($queryParams['code'])) {
|
||||
throw new Exception('Missing auth code in redirect URL. Location: ' . $location);
|
||||
}
|
||||
|
||||
return $queryParams['code'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Step 3: Get token
|
||||
*
|
||||
* @param string $code
|
||||
* @param string $baseUrl
|
||||
* @param CookieJar $cookieJar
|
||||
*
|
||||
* @return array
|
||||
*
|
||||
* @throws GuzzleException
|
||||
* @throws Exception
|
||||
*
|
||||
*/
|
||||
private static function getToken(string $code, string $baseUrl, CookieJar $cookieJar): array {
|
||||
$client = new Client(
|
||||
[
|
||||
'verify' => false,
|
||||
'http_errors' => false,
|
||||
'allow_redirects' => false,
|
||||
'cookies' => $cookieJar
|
||||
]
|
||||
);
|
||||
|
||||
$response = $client->post(
|
||||
$baseUrl . self::TOKEN_URL,
|
||||
[
|
||||
'form_params' => [
|
||||
'client_id' => 'web',
|
||||
'code' => $code,
|
||||
'redirect_uri' => $baseUrl . self::REDIRECT_URL,
|
||||
'grant_type' => 'authorization_code'
|
||||
]
|
||||
]
|
||||
);
|
||||
|
||||
if ($response->getStatusCode() !== 200) {
|
||||
throw new Exception(
|
||||
\sprintf(
|
||||
'Token request failed: Expected status code 200 but received %d. Message: %s',
|
||||
$response->getStatusCode(),
|
||||
$response->getReasonPhrase()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
$data = json_decode($response->getBody()->getContents(), true);
|
||||
|
||||
if (!isset($data['access_token']) || !isset($data['refresh_token'])) {
|
||||
throw new Exception('Missing tokens in response');
|
||||
}
|
||||
|
||||
return [
|
||||
'access_token' => $data['access_token'],
|
||||
'refresh_token' => $data['refresh_token']
|
||||
];
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,7 @@ use Behat\Behat\Context\Context;
|
||||
use Psr\Http\Message\ResponseInterface;
|
||||
use TestHelpers\HttpRequestHelper;
|
||||
use TestHelpers\BehatHelper;
|
||||
use TestHelpers\TokenHelper;
|
||||
use TestHelpers\WebDavHelper;
|
||||
|
||||
/**
|
||||
@@ -714,4 +715,27 @@ class AuthContext implements Context {
|
||||
);
|
||||
$this->featureContext->setResponse($response);
|
||||
}
|
||||
|
||||
/**
|
||||
* @When user :user should not be able to log in with wrong password :password
|
||||
*
|
||||
* @param string $user
|
||||
* @param string $password
|
||||
*
|
||||
* @return void
|
||||
*/
|
||||
public function userShouldNotBeAbleToLogInWithWrongPassword(
|
||||
string $user,
|
||||
string $password
|
||||
): void {
|
||||
TokenHelper::clearUserTokens($user, $this->featureContext->getBaseUrl());
|
||||
$response = TokenHelper::makeLoginRequest(
|
||||
$user,
|
||||
$password,
|
||||
$this->featureContext->getBaseUrl(),
|
||||
new \GuzzleHttp\Cookie\CookieJar()
|
||||
);
|
||||
// why is not 401 returned?
|
||||
$this->featureContext->theHTTPStatusCodeShouldBe(204, 'should not be able to log in', $response);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use TestHelpers\GraphHelper;
|
||||
use TestHelpers\WebDavHelper;
|
||||
use TestHelpers\HttpRequestHelper;
|
||||
use TestHelpers\BehatHelper;
|
||||
use TestHelpers\TokenHelper;
|
||||
|
||||
require_once 'bootstrap.php';
|
||||
|
||||
@@ -2864,6 +2865,7 @@ class GraphContext implements Context {
|
||||
);
|
||||
$this->featureContext->theHTTPStatusCodeShouldBe(200, '', $response);
|
||||
$this->featureContext->updateUsernameInCreatedUserList($byUser, $userName);
|
||||
TokenHelper::clearUserTokens($byUser, $this->featureContext->getBaseUrl());
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -31,6 +31,7 @@ use TestHelpers\WebDavHelper;
|
||||
use TestHelpers\GraphHelper;
|
||||
use Laminas\Ldap\Exception\LdapException;
|
||||
use Laminas\Ldap\Ldap;
|
||||
use TestHelpers\TokenHelper;
|
||||
|
||||
/**
|
||||
* Functions for provisioning of users and groups
|
||||
@@ -558,110 +559,65 @@ trait Provisioning {
|
||||
*/
|
||||
public function usersHaveBeenCreated(
|
||||
TableNode $table,
|
||||
bool $useDefault=true,
|
||||
bool $initialize=true
|
||||
bool $useDefault = true,
|
||||
bool $initialize = true
|
||||
) {
|
||||
$this->verifyTableNodeColumns($table, ['username'], ['displayname', 'email', 'password']);
|
||||
$table = $table->getColumnsHash();
|
||||
$users = $this->buildUsersAttributesArray($useDefault, $table);
|
||||
|
||||
$requests = [];
|
||||
$client = HttpRequestHelper::createClient(
|
||||
$this->getAdminUsername(),
|
||||
$this->getAdminPassword()
|
||||
);
|
||||
|
||||
foreach ($users as $userAttributes) {
|
||||
$userName = $userAttributes['userid'];
|
||||
$password = $userAttributes['password'];
|
||||
$displayName = $userAttributes['displayName'];
|
||||
$email = $userAttributes['email'];
|
||||
|
||||
if ($this->isTestingWithLdap()) {
|
||||
$this->createLdapUser($userAttributes);
|
||||
} else {
|
||||
$attributesToCreateUser['userid'] = $userAttributes['userid'];
|
||||
$attributesToCreateUser['password'] = $userAttributes['password'];
|
||||
$attributesToCreateUser['displayname'] = $userAttributes['displayName'];
|
||||
if ($userAttributes['email'] === null) {
|
||||
Assert::assertArrayHasKey(
|
||||
'userid',
|
||||
$userAttributes,
|
||||
__METHOD__ . " userAttributes array does not have key 'userid'"
|
||||
try {
|
||||
$this->createLdapUser($userAttributes);
|
||||
} catch (LdapException $exception) {
|
||||
throw new Exception(
|
||||
__METHOD__ . " cannot create a LDAP user with provided data. Error: $exception"
|
||||
);
|
||||
$attributesToCreateUser['email'] = $userAttributes['userid'] . '@opencloud.eu';
|
||||
} else {
|
||||
$attributesToCreateUser['email'] = $userAttributes['email'];
|
||||
}
|
||||
$body = GraphHelper::prepareCreateUserPayload(
|
||||
$attributesToCreateUser['userid'],
|
||||
$attributesToCreateUser['password'],
|
||||
$attributesToCreateUser['email'],
|
||||
$attributesToCreateUser['displayname']
|
||||
);
|
||||
$request = GraphHelper::createRequest(
|
||||
} else {
|
||||
// Use the same logic as userHasBeenCreated for email generation
|
||||
if ($email === null) {
|
||||
$email = $this->getEmailAddressForUser($userName);
|
||||
if ($email === null) {
|
||||
// escape @ & space if present in userId
|
||||
$email = \str_replace(["@", " "], "", $userName) . '@opencloud.eu';
|
||||
}
|
||||
}
|
||||
|
||||
$userName = $this->getActualUsername($userName);
|
||||
$userName = \trim($userName);
|
||||
|
||||
$response = GraphHelper::createUser(
|
||||
$this->getBaseUrl(),
|
||||
$this->getStepLineRef(),
|
||||
"POST",
|
||||
'users',
|
||||
$body,
|
||||
$this->getAdminUsername(),
|
||||
$this->getAdminPassword(),
|
||||
$userName,
|
||||
$password,
|
||||
$email,
|
||||
$displayName,
|
||||
);
|
||||
// Add the request to the $requests array so that they can be sent in parallel.
|
||||
$requests[] = $request;
|
||||
|
||||
Assert::assertEquals(
|
||||
201,
|
||||
$response->getStatusCode(),
|
||||
__METHOD__ . " cannot create user '$userName' using Graph API.\nResponse:" .
|
||||
json_encode($this->getJsonDecodedResponse($response))
|
||||
);
|
||||
|
||||
$userId = $this->getJsonDecodedResponse($response)['id'];
|
||||
}
|
||||
}
|
||||
|
||||
$exceptionToThrow = null;
|
||||
if (!$this->isTestingWithLdap()) {
|
||||
$results = HttpRequestHelper::sendBatchRequest($requests, $client);
|
||||
// Check all requests to inspect failures.
|
||||
foreach ($results as $key => $e) {
|
||||
if ($e instanceof ClientException) {
|
||||
$responseBody = $this->getJsonDecodedResponse($e->getResponse());
|
||||
$httpStatusCode = $e->getResponse()->getStatusCode();
|
||||
$graphStatusCode = $responseBody['error']['code'];
|
||||
$messageText = $responseBody['error']['message'];
|
||||
$exceptionToThrow = new Exception(
|
||||
__METHOD__ .
|
||||
" Unexpected failure when creating the user '" .
|
||||
$users[$key]['userid'] . "'" .
|
||||
"\nHTTP status $httpStatusCode " .
|
||||
"\nGraph status $graphStatusCode " .
|
||||
"\nError message $messageText"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
$this->addUserToCreatedUsersList($userName, $password, $displayName, $email, $userId ?? null);
|
||||
|
||||
// Create requests for setting displayname and email for the newly created users.
|
||||
// These values cannot be set while creating the user, so we have to edit the newly created user to set these values.
|
||||
foreach ($users as $userAttributes) {
|
||||
if (!$this->isTestingWithLdap()) {
|
||||
// for graph api, we need to save the user id to be able to add it in some group
|
||||
// can be fetched with the "onPremisesSamAccountName" i.e. userid
|
||||
$response = $this->graphContext->adminHasRetrievedUserUsingTheGraphApi($userAttributes['userid']);
|
||||
$userAttributes['id'] = $this->getJsonDecodedResponse($response)['id'];
|
||||
} else {
|
||||
$userAttributes['id'] = null;
|
||||
}
|
||||
$this->addUserToCreatedUsersList(
|
||||
$userAttributes['userid'],
|
||||
$userAttributes['password'],
|
||||
$userAttributes['displayName'],
|
||||
$userAttributes['email'],
|
||||
$userAttributes['id']
|
||||
);
|
||||
}
|
||||
|
||||
if (isset($exceptionToThrow)) {
|
||||
throw $exceptionToThrow;
|
||||
}
|
||||
|
||||
foreach ($users as $user) {
|
||||
Assert::assertTrue(
|
||||
$this->userExists($user["userid"]),
|
||||
"User '" . $user["userid"] . "' should exist but does not exist"
|
||||
);
|
||||
}
|
||||
|
||||
if ($initialize) {
|
||||
foreach ($users as $user) {
|
||||
$this->initializeUser($user['userid'], $user['password']);
|
||||
if ($initialize) {
|
||||
$this->initializeUser($userName, $password);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -841,45 +797,16 @@ trait Provisioning {
|
||||
*/
|
||||
public function userHasBeenDeleted(string $user): void {
|
||||
$user = $this->getActualUsername($user);
|
||||
if ($this->userExists($user)) {
|
||||
if ($this->isTestingWithLdap() && \in_array($user, $this->ldapCreatedUsers)) {
|
||||
$this->deleteLdapUser($user);
|
||||
} else {
|
||||
$response = $this->deleteUser($user);
|
||||
$this->theHTTPStatusCodeShouldBe(204, "", $response);
|
||||
WebDavHelper::removeSpaceIdReferenceForUser($user);
|
||||
}
|
||||
if ($this->isTestingWithLdap() && \in_array($user, $this->ldapCreatedUsers)) {
|
||||
$this->deleteLdapUser($user);
|
||||
} else {
|
||||
$response = $this->deleteUser($user);
|
||||
$this->theHTTPStatusCodeShouldBe(204, "", $response);
|
||||
WebDavHelper::removeSpaceIdReferenceForUser($user);
|
||||
}
|
||||
Assert::assertFalse(
|
||||
$this->userExists($user),
|
||||
"User '$user' should not exist but does exist"
|
||||
);
|
||||
$this->rememberThatUserIsNotExpectedToExist($user);
|
||||
}
|
||||
|
||||
/**
|
||||
* @Given these users have been initialized:
|
||||
* expects a table of users with the heading
|
||||
* "|username|password|"
|
||||
*
|
||||
* @param TableNode $table
|
||||
*
|
||||
* @return void
|
||||
*/
|
||||
public function theseUsersHaveBeenInitialized(TableNode $table): void {
|
||||
foreach ($table as $row) {
|
||||
if (!isset($row ['password'])) {
|
||||
$password = $this->getPasswordForUser($row ['username']);
|
||||
} else {
|
||||
$password = $row ['password'];
|
||||
}
|
||||
$this->initializeUser(
|
||||
$row ['username'],
|
||||
$password
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get all the existing groups
|
||||
*
|
||||
@@ -961,13 +888,14 @@ trait Provisioning {
|
||||
$url = $this->getBaseUrl()
|
||||
. "/ocs/v$this->ocsApiVersion.php/cloud/users/$user";
|
||||
}
|
||||
|
||||
HttpRequestHelper::get(
|
||||
$url,
|
||||
$this->getStepLineRef(),
|
||||
$user,
|
||||
$password
|
||||
);
|
||||
if ($password !== '') {
|
||||
HttpRequestHelper::get(
|
||||
$url,
|
||||
$this->getStepLineRef(),
|
||||
$user,
|
||||
$password
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1162,12 +1090,6 @@ trait Provisioning {
|
||||
}
|
||||
|
||||
$this->addUserToCreatedUsersList($user, $password, $displayName, $email, $userId);
|
||||
|
||||
Assert::assertTrue(
|
||||
$this->userExists($user),
|
||||
"User '$user' should exist but does not exist"
|
||||
);
|
||||
|
||||
$this->initializeUser($user, $password);
|
||||
}
|
||||
|
||||
@@ -1999,21 +1921,15 @@ trait Provisioning {
|
||||
$this->usingServer('LOCAL');
|
||||
foreach ($this->createdUsers as $userData) {
|
||||
$user = $userData['actualUsername'];
|
||||
TokenHelper::clearUserTokens($user, $this->getBaseUrl());
|
||||
$this->deleteUser($user);
|
||||
Assert::assertFalse(
|
||||
$this->userExists($user),
|
||||
"User '$user' should not exist but does exist"
|
||||
);
|
||||
$this->rememberThatUserIsNotExpectedToExist($user);
|
||||
}
|
||||
$this->usingServer('REMOTE');
|
||||
foreach ($this->createdRemoteUsers as $userData) {
|
||||
$user = $userData['actualUsername'];
|
||||
TokenHelper::clearUserTokens($user, $this->getBaseUrl());
|
||||
$this->deleteUser($user);
|
||||
Assert::assertFalse(
|
||||
$this->userExists($user),
|
||||
"User '$user' should not exist but does exist"
|
||||
);
|
||||
$this->rememberThatUserIsNotExpectedToExist($user);
|
||||
}
|
||||
$this->usingServer($previousServer);
|
||||
|
||||
@@ -741,6 +741,17 @@ trait WebDav {
|
||||
$this->setResponse($this->downloadFileWithRange($user, $fileSource, $range));
|
||||
}
|
||||
|
||||
/**
|
||||
* @When the user waits for :time seconds for postprocessing to finish
|
||||
*
|
||||
* @param int $time
|
||||
*
|
||||
* @return void
|
||||
*/
|
||||
public function waitForCertainSeconds(int $time): void {
|
||||
\sleep($time);
|
||||
}
|
||||
|
||||
/**
|
||||
* @Then /^user "([^"]*)" using password "([^"]*)" should not be able to download file "([^"]*)"$/
|
||||
*
|
||||
|
||||
@@ -120,3 +120,26 @@ Feature: Propfind test
|
||||
| Manager | RDNVWZP |
|
||||
| Space Editor | DNVW |
|
||||
| Space Viewer | |
|
||||
|
||||
@issue-1523
|
||||
Scenario: propfind response contains a restored folder with correct name
|
||||
Given user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
And user "Alice" has deleted folder "folderMain"
|
||||
And user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
When user "Alice" restores the folder with original path "/folderMain" to "/folderMain (1)" using the trashbin API
|
||||
And user "Alice" sends PROPFIND request to space "Personal" using the WebDAV API
|
||||
Then the HTTP status code should be "207"
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain (1)" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain (1) |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
|
||||
@@ -219,7 +219,7 @@ Feature: edit user
|
||||
When the user "Brian" resets the password of user "Carol" to "newpassword" using the Graph API
|
||||
Then the HTTP status code should be "403"
|
||||
And the content of file "resetpassword.txt" for user "Carol" using password "1234" should be "test file for reset password"
|
||||
But user "Carol" using password "newpassword" should not be able to download file "resetpassword.txt"
|
||||
And user "Carol" should not be able to log in with wrong password "newpassword"
|
||||
Examples:
|
||||
| user-role | user-role-2 |
|
||||
| Space Admin | Space Admin |
|
||||
|
||||
@@ -542,6 +542,7 @@ Feature: enable or disable sync of incoming shares
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
And the user "Admin" has deleted a user "Alice"
|
||||
When user "Brian" disables sync of share "textfile0.txt" using the Graph API
|
||||
Then the HTTP status code should be "204"
|
||||
@@ -820,6 +821,7 @@ Feature: enable or disable sync of incoming shares
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" has disabled sync of last shared resource
|
||||
When user "Brian" disables sync of share "<resource>" using the Graph API
|
||||
Then the HTTP status code should be "409"
|
||||
|
||||
@@ -14,7 +14,7 @@ Feature: reset user password via CLI command
|
||||
But the command output should not contain "Failed to update user password: entry does not exist"
|
||||
And the administrator has started the server
|
||||
And user "Alice" should be able to create folder "newFolder" using password "newpass"
|
||||
But user "Alice" should not be able to create folder "anotherFolder" using password "%alt1%"
|
||||
But user "Alice" should not be able to log in with wrong password "%alt1%"
|
||||
|
||||
|
||||
Scenario: try to reset password of non-existing user
|
||||
|
||||
@@ -567,3 +567,52 @@ Feature: restore deleted files/folders
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "content" to "new/test.txt"
|
||||
And user "Alice" has deleted folder "new"
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "new content" to "new/new-file.txt"
|
||||
When user "Alice" restores the folder with original path "/new" to "/new (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /new |
|
||||
| /new (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /new/new-file.txt |
|
||||
| /new (1)/test.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder with files when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "folder-a"
|
||||
And user "Alice" has uploaded file with content "content b" to "folder-a/b.txt"
|
||||
And user "Alice" has uploaded file with content "content c" to "folder-a/c.txt"
|
||||
And user "Alice" has deleted file "folder-a/b.txt"
|
||||
And user "Alice" has deleted folder "folder-a"
|
||||
And user "Alice" has created folder "folder-a"
|
||||
When user "Alice" restores the file with original path "folder-a/b.txt" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
When user "Alice" restores the folder with original path "/folder-a" to "/folder-a (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /folder-a |
|
||||
| /folder-a (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /folder-a/b.txt |
|
||||
| /folder-a (1)/c.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@@ -50,6 +50,7 @@ Feature: low level tests for upload of chunks
|
||||
| Upload-Metadata | filename ZmlsZS50eHQ= |
|
||||
When user "Alice" sends a chunk to the last created TUS Location with offset "0" and data "123" using the WebDAV API
|
||||
And user "Alice" sends a chunk to the last created TUS Location with offset "3" and data "4567890" using the WebDAV API
|
||||
And the user waits for "2" seconds for postprocessing to finish
|
||||
And user "Alice" sends a chunk to the last created TUS Location with offset "3" and data "0000000" using the WebDAV API
|
||||
Then the HTTP status code should be "404"
|
||||
And the content of file "/file.txt" for user "Alice" should be "1234567890"
|
||||
|
||||
3
vendor/github.com/blevesearch/bleve/v2/README.md
generated
vendored
3
vendor/github.com/blevesearch/bleve/v2/README.md
generated
vendored
@@ -4,7 +4,6 @@
|
||||
[](https://coveralls.io/github/blevesearch/bleve?branch=master)
|
||||
[](https://pkg.go.dev/github.com/blevesearch/bleve/v2)
|
||||
[](https://app.gitter.im/#/room/#blevesearch_bleve:gitter.im)
|
||||
[](https://codebeat.co/projects/github-com-blevesearch-bleve)
|
||||
[](https://goreportcard.com/report/github.com/blevesearch/bleve/v2)
|
||||
[](https://sourcegraph.com/github.com/blevesearch/bleve?badge)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
@@ -27,6 +26,8 @@ A modern indexing + search library in GO
|
||||
* [synonym search](https://github.com/blevesearch/bleve/blob/master/docs/synonyms.md)
|
||||
* [tf-idf](https://github.com/blevesearch/bleve/blob/master/docs/scoring.md#tf-idf) / [bm25](https://github.com/blevesearch/bleve/blob/master/docs/scoring.md#bm25) scoring models
|
||||
* Hybrid search: exact + semantic
|
||||
* Supports [RRF (Reciprocal Rank Fusion) and RSF (Relative Score Fusion)](docs/score_fusion.md)
|
||||
* [Result pagination](https://github.com/blevesearch/bleve/blob/master/docs/pagination.md)
|
||||
* Query time boosting
|
||||
* Search result match highlighting with document fragments
|
||||
* Aggregations/faceting support:
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/builder.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/builder.go
generated
vendored
@@ -68,7 +68,7 @@ func newBuilder(path string, mapping mapping.IndexMapping, config map[string]int
|
||||
return nil, err
|
||||
}
|
||||
config["internal"] = map[string][]byte{
|
||||
string(mappingInternalKey): mappingBytes,
|
||||
string(util.MappingInternalKey): mappingBytes,
|
||||
}
|
||||
|
||||
// do not use real config, as these are options for the builder,
|
||||
|
||||
26
vendor/github.com/blevesearch/bleve/v2/fusion/fusion.go
generated
vendored
Normal file
26
vendor/github.com/blevesearch/bleve/v2/fusion/fusion.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright (c) 2025 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fusion
|
||||
|
||||
import (
|
||||
"github.com/blevesearch/bleve/v2/search"
|
||||
)
|
||||
|
||||
type FusionResult struct {
|
||||
Hits search.DocumentMatchCollection
|
||||
Total uint64
|
||||
MaxScore float64
|
||||
}
|
||||
131
vendor/github.com/blevesearch/bleve/v2/fusion/rrf.go
generated
vendored
Normal file
131
vendor/github.com/blevesearch/bleve/v2/fusion/rrf.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright (c) 2025 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fusion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/search"
|
||||
)
|
||||
|
||||
func formatRRFMessage(weight float64, rank int, rankConstant int) string {
|
||||
return fmt.Sprintf("rrf score (weight=%.3f, rank=%d, rank_constant=%d), normalized score of", weight, rank, rankConstant)
|
||||
}
|
||||
|
||||
// ReciprocalRankFusion performs a reciprocal rank fusion on the search results.
|
||||
func ReciprocalRankFusion(hits search.DocumentMatchCollection, weights []float64, rankConstant int, windowSize int, numKNNQueries int, explain bool) FusionResult {
|
||||
if len(hits) == 0 {
|
||||
return FusionResult{
|
||||
Hits: hits,
|
||||
Total: 0,
|
||||
MaxScore: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a map of document ID to a slice of ranks.
|
||||
// The first element of the slice is the rank from the FTS search,
|
||||
// and the subsequent elements are the ranks from the KNN searches.
|
||||
docRanks := make(map[string][]int)
|
||||
|
||||
// Pre-assign rank lists to each candidate document
|
||||
for _, hit := range hits {
|
||||
docRanks[hit.ID] = make([]int, numKNNQueries+1)
|
||||
}
|
||||
|
||||
// Only a max of `window_size` elements need to be counted for. Stop
|
||||
// calculating rank once this threshold is hit.
|
||||
sort.Slice(hits, func(a, b int) bool {
|
||||
return scoreSortFunc()(hits[a], hits[b]) < 0
|
||||
})
|
||||
// Only consider top windowSize docs for rescoring
|
||||
for i := range min(windowSize, len(hits)) {
|
||||
if hits[i].Score != 0.0 {
|
||||
// Skip if Score is 0, since that means the document was not
|
||||
// found as part of FTS, and only in KNN.
|
||||
docRanks[hits[i].ID][0] = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate knnDocs and reuse it within the loop
|
||||
knnDocs := make([]*search.DocumentMatch, 0, len(hits))
|
||||
|
||||
// For each KNN query, rank the documents based on their KNN score.
|
||||
for i := range numKNNQueries {
|
||||
knnDocs = knnDocs[:0]
|
||||
|
||||
for _, hit := range hits {
|
||||
if _, ok := hit.ScoreBreakdown[i]; ok {
|
||||
knnDocs = append(knnDocs, hit)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the documents based on their score for this KNN query.
|
||||
sort.Slice(knnDocs, func(a, b int) bool {
|
||||
return scoreBreakdownSortFunc(i)(knnDocs[a], knnDocs[b]) < 0
|
||||
})
|
||||
|
||||
// Update the ranks of the documents in the docRanks map.
|
||||
// Only consider top windowSize docs for rescoring.
|
||||
for j := range min(windowSize, len(knnDocs)) {
|
||||
docRanks[knnDocs[j].ID][i+1] = j + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the RRF score for each document.
|
||||
var maxScore float64
|
||||
for _, hit := range hits {
|
||||
var rrfScore float64
|
||||
var explChildren []*search.Explanation
|
||||
if explain {
|
||||
explChildren = make([]*search.Explanation, 0, numKNNQueries+1)
|
||||
}
|
||||
for i, rank := range docRanks[hit.ID] {
|
||||
if rank > 0 {
|
||||
partialRrfScore := weights[i] * 1.0 / float64(rankConstant+rank)
|
||||
if explain {
|
||||
expl := getFusionExplAt(
|
||||
hit,
|
||||
i,
|
||||
partialRrfScore,
|
||||
formatRRFMessage(weights[i], rank, rankConstant),
|
||||
)
|
||||
explChildren = append(explChildren, expl)
|
||||
}
|
||||
rrfScore += partialRrfScore
|
||||
}
|
||||
}
|
||||
hit.Score = rrfScore
|
||||
hit.ScoreBreakdown = nil
|
||||
if rrfScore > maxScore {
|
||||
maxScore = rrfScore
|
||||
}
|
||||
|
||||
if explain {
|
||||
finalizeFusionExpl(hit, explChildren)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(hits)
|
||||
if len(hits) > windowSize {
|
||||
hits = hits[:windowSize]
|
||||
}
|
||||
return FusionResult{
|
||||
Hits: hits,
|
||||
Total: uint64(len(hits)),
|
||||
MaxScore: maxScore,
|
||||
}
|
||||
}
|
||||
162
vendor/github.com/blevesearch/bleve/v2/fusion/rsf.go
generated
vendored
Normal file
162
vendor/github.com/blevesearch/bleve/v2/fusion/rsf.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright (c) 2025 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fusion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/search"
|
||||
)
|
||||
|
||||
func formatRSFMessage(weight float64, normalizedScore float64, minScore float64, maxScore float64) string {
|
||||
return fmt.Sprintf("rsf score (weight=%.3f, normalized=%.6f, min=%.6f, max=%.6f), normalized score of",
|
||||
weight, normalizedScore, minScore, maxScore)
|
||||
}
|
||||
|
||||
// RelativeScoreFusion normalizes scores based on min/max values for FTS and each KNN query, then applies weights.
|
||||
func RelativeScoreFusion(hits search.DocumentMatchCollection, weights []float64, windowSize int, numKNNQueries int, explain bool) FusionResult {
|
||||
if len(hits) == 0 {
|
||||
return FusionResult{
|
||||
Hits: hits,
|
||||
Total: 0,
|
||||
MaxScore: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
rsfScores := make(map[string]float64)
|
||||
|
||||
// contains the docs under consideration for scoring.
|
||||
// Reused for fts and knn hits
|
||||
scoringDocs := make([]*search.DocumentMatch, 0, len(hits))
|
||||
var explMap map[string][]*search.Explanation
|
||||
if explain {
|
||||
explMap = make(map[string][]*search.Explanation)
|
||||
}
|
||||
// remove non-fts hits
|
||||
for _, hit := range hits {
|
||||
if hit.Score != 0.0 {
|
||||
scoringDocs = append(scoringDocs, hit)
|
||||
}
|
||||
}
|
||||
// sort hits by fts score
|
||||
sort.Slice(scoringDocs, func(a, b int) bool {
|
||||
return scoreSortFunc()(scoringDocs[a], scoringDocs[b]) < 0
|
||||
})
|
||||
// Reslice to correct size
|
||||
if len(scoringDocs) > windowSize {
|
||||
scoringDocs = scoringDocs[:windowSize]
|
||||
}
|
||||
|
||||
var min, max float64
|
||||
if len(scoringDocs) > 0 {
|
||||
min, max = scoringDocs[len(scoringDocs)-1].Score, scoringDocs[0].Score
|
||||
}
|
||||
|
||||
for _, hit := range scoringDocs {
|
||||
var tempRsfScore float64
|
||||
if max > min {
|
||||
tempRsfScore = (hit.Score - min) / (max - min)
|
||||
} else {
|
||||
tempRsfScore = 1.0
|
||||
}
|
||||
|
||||
if explain {
|
||||
// create and replace new explanation
|
||||
expl := getFusionExplAt(
|
||||
hit,
|
||||
0,
|
||||
tempRsfScore,
|
||||
formatRSFMessage(weights[0], tempRsfScore, min, max),
|
||||
)
|
||||
explMap[hit.ID] = append(explMap[hit.ID], expl)
|
||||
}
|
||||
|
||||
rsfScores[hit.ID] = weights[0] * tempRsfScore
|
||||
}
|
||||
|
||||
for i := range numKNNQueries {
|
||||
scoringDocs = scoringDocs[:0]
|
||||
for _, hit := range hits {
|
||||
if _, exists := hit.ScoreBreakdown[i]; exists {
|
||||
scoringDocs = append(scoringDocs, hit)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(scoringDocs, func(a, b int) bool {
|
||||
return scoreBreakdownSortFunc(i)(scoringDocs[a], scoringDocs[b]) < 0
|
||||
})
|
||||
|
||||
if len(scoringDocs) > windowSize {
|
||||
scoringDocs = scoringDocs[:windowSize]
|
||||
}
|
||||
|
||||
if len(scoringDocs) > 0 {
|
||||
min, max = scoringDocs[len(scoringDocs)-1].ScoreBreakdown[i], scoringDocs[0].ScoreBreakdown[i]
|
||||
} else {
|
||||
min, max = 0.0, 0.0
|
||||
}
|
||||
|
||||
for _, hit := range scoringDocs {
|
||||
var tempRsfScore float64
|
||||
if max > min {
|
||||
tempRsfScore = (hit.ScoreBreakdown[i] - min) / (max - min)
|
||||
} else {
|
||||
tempRsfScore = 1.0
|
||||
}
|
||||
|
||||
if explain {
|
||||
expl := getFusionExplAt(
|
||||
hit,
|
||||
i+1,
|
||||
tempRsfScore,
|
||||
formatRSFMessage(weights[i+1], tempRsfScore, min, max),
|
||||
)
|
||||
explMap[hit.ID] = append(explMap[hit.ID], expl)
|
||||
}
|
||||
|
||||
rsfScores[hit.ID] += weights[i+1] * tempRsfScore
|
||||
}
|
||||
}
|
||||
|
||||
var maxScore float64
|
||||
for _, hit := range hits {
|
||||
if rsfScore, exists := rsfScores[hit.ID]; exists {
|
||||
hit.Score = rsfScore
|
||||
if rsfScore > maxScore {
|
||||
maxScore = rsfScore
|
||||
}
|
||||
if explain {
|
||||
finalizeFusionExpl(hit, explMap[hit.ID])
|
||||
}
|
||||
} else {
|
||||
hit.Score = 0.0
|
||||
}
|
||||
|
||||
hit.ScoreBreakdown = nil
|
||||
}
|
||||
|
||||
sort.Sort(hits)
|
||||
|
||||
if len(hits) > windowSize {
|
||||
hits = hits[:windowSize]
|
||||
}
|
||||
|
||||
return FusionResult{
|
||||
Hits: hits,
|
||||
Total: uint64(len(hits)),
|
||||
MaxScore: maxScore,
|
||||
}
|
||||
}
|
||||
96
vendor/github.com/blevesearch/bleve/v2/fusion/util.go
generated
vendored
Normal file
96
vendor/github.com/blevesearch/bleve/v2/fusion/util.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright (c) 2025 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fusion
|
||||
|
||||
import (
|
||||
"github.com/blevesearch/bleve/v2/search"
|
||||
)
|
||||
|
||||
// scoreBreakdownSortFunc returns a comparison function for sorting DocumentMatch objects
|
||||
// by their ScoreBreakdown at the specified index in descending order.
|
||||
// In case of ties, documents with lower HitNumber (earlier hits) are preferred.
|
||||
// If either document is missing the ScoreBreakdown for the specified index,
|
||||
// it's treated as having a score of 0.0.
|
||||
func scoreBreakdownSortFunc(idx int) func(i, j *search.DocumentMatch) int {
|
||||
return func(i, j *search.DocumentMatch) int {
|
||||
// Safely extract scores, defaulting to 0.0 if missing
|
||||
iScore := 0.0
|
||||
jScore := 0.0
|
||||
|
||||
if i.ScoreBreakdown != nil {
|
||||
if score, ok := i.ScoreBreakdown[idx]; ok {
|
||||
iScore = score
|
||||
}
|
||||
}
|
||||
|
||||
if j.ScoreBreakdown != nil {
|
||||
if score, ok := j.ScoreBreakdown[idx]; ok {
|
||||
jScore = score
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by score in descending order (higher scores first)
|
||||
if iScore > jScore {
|
||||
return -1
|
||||
} else if iScore < jScore {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Break ties by HitNumber in ascending order (lower HitNumber wins)
|
||||
if i.HitNumber < j.HitNumber {
|
||||
return -1
|
||||
} else if i.HitNumber > j.HitNumber {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0 // Equal scores and HitNumbers
|
||||
}
|
||||
}
|
||||
|
||||
func scoreSortFunc() func(i, j *search.DocumentMatch) int {
|
||||
return func(i, j *search.DocumentMatch) int {
|
||||
// Sort by score in descending order
|
||||
if i.Score > j.Score {
|
||||
return -1
|
||||
} else if i.Score < j.Score {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Break ties by HitNumber
|
||||
if i.HitNumber < j.HitNumber {
|
||||
return -1
|
||||
} else if i.HitNumber > j.HitNumber {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func getFusionExplAt(hit *search.DocumentMatch, i int, value float64, message string) *search.Explanation {
|
||||
return &search.Explanation{
|
||||
Value: value,
|
||||
Message: message,
|
||||
Children: []*search.Explanation{hit.Expl.Children[i]},
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeFusionExpl(hit *search.DocumentMatch, explChildren []*search.Explanation) {
|
||||
hit.Expl.Children = explChildren
|
||||
|
||||
hit.Expl.Value = hit.Score
|
||||
hit.Expl.Message = "sum of"
|
||||
}
|
||||
2
vendor/github.com/blevesearch/bleve/v2/geo/README.md
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/geo/README.md
generated
vendored
@@ -308,5 +308,5 @@ First, all of this geo code is a Go adaptation of the [Lucene 5.3.2 sandbox geo
|
||||
- LineStrings and MultiLineStrings may only contain Points and MultiPoints.
|
||||
- Polygons or MultiPolygons intersecting Polygons and MultiPolygons may return arbitrary results when the overlap is only an edge or a vertex.
|
||||
- Circles containing polygon will return a false positive result if all of the vertices of the polygon are within the circle, but the orientation of those points are clock-wise.
|
||||
- The edges of an Envelope follows the latitude and logitude lines instead of the shortest path on a globe.
|
||||
- The edges of an Envelope follows the latitude and longitude lines instead of the shortest path on a globe.
|
||||
- Envelope intersecting queries with LineStrings, MultiLineStrings, Polygons and MultiPolygons implicitly converts the Envelope into a Polygon which changes the curvature of the edges causing inaccurate results for few edge cases.
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/geo/geo.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/geo/geo.go
generated
vendored
@@ -114,7 +114,7 @@ func DegreesToRadians(d float64) float64 {
|
||||
return d * degreesToRadian
|
||||
}
|
||||
|
||||
// RadiansToDegrees converts an angle in radians to degress
|
||||
// RadiansToDegrees converts an angle in radians to degrees
|
||||
func RadiansToDegrees(r float64) float64 {
|
||||
return r * radiansToDegrees
|
||||
}
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/geo/geo_dist.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/geo/geo_dist.go
generated
vendored
@@ -83,7 +83,7 @@ func ParseDistanceUnit(u string) (float64, error) {
|
||||
}
|
||||
|
||||
// Haversin computes the distance between two points.
|
||||
// This implemenation uses the sloppy math implemenations which trade off
|
||||
// This implementation uses the sloppy math implementations which trade off
|
||||
// accuracy for performance. The distance returned is in kilometers.
|
||||
func Haversin(lon1, lat1, lon2, lat2 float64) float64 {
|
||||
x1 := lat1 * degreesToRadian
|
||||
|
||||
4
vendor/github.com/blevesearch/bleve/v2/index.go
generated
vendored
4
vendor/github.com/blevesearch/bleve/v2/index.go
generated
vendored
@@ -149,7 +149,7 @@ func (b *Batch) String() string {
|
||||
}
|
||||
|
||||
// Reset returns a Batch to the empty state so that it can
|
||||
// be re-used in the future.
|
||||
// be reused in the future.
|
||||
func (b *Batch) Reset() {
|
||||
b.internal.Reset()
|
||||
b.lastDocSize = 0
|
||||
@@ -325,6 +325,8 @@ func Open(path string) (Index, error) {
|
||||
// The mapping used when it was created will be used for all Index/Search operations.
|
||||
// The provided runtimeConfig can override settings
|
||||
// persisted when the kvstore was created.
|
||||
// If runtimeConfig has updated mapping, then an index update is attempted
|
||||
// Throws an error without any changes to the index if an unupdatable mapping is provided
|
||||
func OpenUsing(path string, runtimeConfig map[string]interface{}) (Index, error) {
|
||||
return openIndexUsing(path, runtimeConfig)
|
||||
}
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go
generated
vendored
@@ -293,7 +293,7 @@ func (s *Scorch) introducePersist(persist *persistIntroduction) {
|
||||
newIndexSnapshot.segment[i] = newSegmentSnapshot
|
||||
delete(persist.persisted, segmentSnapshot.id)
|
||||
|
||||
// update items persisted incase of a new segment snapshot
|
||||
// update items persisted in case of a new segment snapshot
|
||||
atomic.AddUint64(&s.stats.TotPersistedItems, newSegmentSnapshot.Count())
|
||||
atomic.AddUint64(&s.stats.TotPersistedSegments, 1)
|
||||
fileSegments++
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/merge_plan.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/merge_plan.go
generated
vendored
@@ -295,7 +295,7 @@ func plan(segmentsIn []Segment, o *MergePlanOptions) (*MergePlan, error) {
|
||||
if len(bestRoster) == 0 {
|
||||
return rv, nil
|
||||
}
|
||||
// create tasks with valid merges - i.e. there should be atleast 2 non-empty segments
|
||||
// create tasks with valid merges - i.e. there should be at least 2 non-empty segments
|
||||
if len(bestRoster) > 1 {
|
||||
rv.Tasks = append(rv.Tasks, &MergeTask{Segments: bestRoster})
|
||||
}
|
||||
|
||||
8
vendor/github.com/blevesearch/bleve/v2/index/scorch/optimize_knn.go
generated
vendored
8
vendor/github.com/blevesearch/bleve/v2/index/scorch/optimize_knn.go
generated
vendored
@@ -79,6 +79,12 @@ func (o *OptimizeVR) Finish() error {
|
||||
wg.Done()
|
||||
}()
|
||||
for field, vrs := range o.vrs {
|
||||
// Early exit if the field is supposed to be completely deleted or
|
||||
// if it's index data has been deleted
|
||||
if info, ok := o.snapshot.updatedFields[field]; ok && (info.Deleted || info.Index) {
|
||||
continue
|
||||
}
|
||||
|
||||
vecIndex, err := segment.InterpretVectorIndex(field,
|
||||
o.requiresFiltering, origSeg.deleted)
|
||||
if err != nil {
|
||||
@@ -185,7 +191,7 @@ func (s *IndexSnapshotVectorReader) VectorOptimize(ctx context.Context,
|
||||
err := cbF(sumVectorIndexSize)
|
||||
if err != nil {
|
||||
// it's important to invoke the end callback at this point since
|
||||
// if the earlier searchers of this optimze struct were successful
|
||||
// if the earlier searchers of this optimize struct were successful
|
||||
// the cost corresponding to it would be incremented and if the
|
||||
// current searcher fails the check then we end up erroring out
|
||||
// the overall optimized searcher creation, the cost needs to be
|
||||
|
||||
116
vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go
generated
vendored
116
vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go
generated
vendored
@@ -386,7 +386,7 @@ type flushable struct {
|
||||
totDocs uint64
|
||||
}
|
||||
|
||||
// number workers which parallely perform an in-memory merge of the segments
|
||||
// number workers which parallelly perform an in-memory merge of the segments
|
||||
// followed by a flush operation.
|
||||
var DefaultNumPersisterWorkers = 1
|
||||
|
||||
@@ -395,7 +395,7 @@ var DefaultNumPersisterWorkers = 1
|
||||
var DefaultMaxSizeInMemoryMergePerWorker = 0
|
||||
|
||||
func legacyFlushBehaviour(maxSizeInMemoryMergePerWorker, numPersisterWorkers int) bool {
|
||||
// DefaultMaxSizeInMemoryMergePerWorker = 0 is a special value to preserve the leagcy
|
||||
// DefaultMaxSizeInMemoryMergePerWorker = 0 is a special value to preserve the legacy
|
||||
// one-shot in-memory merge + flush behaviour.
|
||||
return maxSizeInMemoryMergePerWorker == 0 && numPersisterWorkers == 1
|
||||
}
|
||||
@@ -608,7 +608,7 @@ func persistToDirectory(seg segment.UnpersistedSegment, d index.Directory,
|
||||
func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
segPlugin SegmentPlugin, exclude map[uint64]struct{}, d index.Directory) (
|
||||
[]string, map[uint64]string, error) {
|
||||
snapshotsBucket, err := tx.CreateBucketIfNotExists(boltSnapshotsBucket)
|
||||
snapshotsBucket, err := tx.CreateBucketIfNotExists(util.BoltSnapshotsBucket)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -619,17 +619,17 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
}
|
||||
|
||||
// persist meta values
|
||||
metaBucket, err := snapshotBucket.CreateBucketIfNotExists(boltMetaDataKey)
|
||||
metaBucket, err := snapshotBucket.CreateBucketIfNotExists(util.BoltMetaDataKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = metaBucket.Put(boltMetaDataSegmentTypeKey, []byte(segPlugin.Type()))
|
||||
err = metaBucket.Put(util.BoltMetaDataSegmentTypeKey, []byte(segPlugin.Type()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
buf := make([]byte, binary.MaxVarintLen32)
|
||||
binary.BigEndian.PutUint32(buf, segPlugin.Version())
|
||||
err = metaBucket.Put(boltMetaDataSegmentVersionKey, buf)
|
||||
err = metaBucket.Put(util.BoltMetaDataSegmentVersionKey, buf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -643,13 +643,13 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = metaBucket.Put(boltMetaDataTimeStamp, timeStampBinary)
|
||||
err = metaBucket.Put(util.BoltMetaDataTimeStamp, timeStampBinary)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// persist internal values
|
||||
internalBucket, err := snapshotBucket.CreateBucketIfNotExists(boltInternalKey)
|
||||
internalBucket, err := snapshotBucket.CreateBucketIfNotExists(util.BoltInternalKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -665,7 +665,7 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
val := make([]byte, 8)
|
||||
bytesWritten := atomic.LoadUint64(&snapshot.parent.stats.TotBytesWrittenAtIndexTime)
|
||||
binary.LittleEndian.PutUint64(val, bytesWritten)
|
||||
err = internalBucket.Put(TotBytesWrittenKey, val)
|
||||
err = internalBucket.Put(util.TotBytesWrittenKey, val)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -689,7 +689,7 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
return nil, nil, fmt.Errorf("segment: %s copy err: %v", segPath, err)
|
||||
}
|
||||
filename := filepath.Base(segPath)
|
||||
err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename))
|
||||
err = snapshotSegmentBucket.Put(util.BoltPathKey, []byte(filename))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -705,7 +705,7 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
return nil, nil, fmt.Errorf("segment: %s persist err: %v", path, err)
|
||||
}
|
||||
newSegmentPaths[segmentSnapshot.id] = path
|
||||
err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename))
|
||||
err = snapshotSegmentBucket.Put(util.BoltPathKey, []byte(filename))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -721,7 +721,7 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error persisting roaring bytes: %v", err)
|
||||
}
|
||||
err = snapshotSegmentBucket.Put(boltDeletedKey, roaringBuf.Bytes())
|
||||
err = snapshotSegmentBucket.Put(util.BoltDeletedKey, roaringBuf.Bytes())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -733,7 +733,19 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = snapshotSegmentBucket.Put(boltStatsKey, b)
|
||||
err = snapshotSegmentBucket.Put(util.BoltStatsKey, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// store updated field info
|
||||
if segmentSnapshot.updatedFields != nil {
|
||||
b, err := json.Marshal(segmentSnapshot.updatedFields)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = snapshotSegmentBucket.Put(util.BoltUpdatedFieldsKey, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -832,22 +844,9 @@ func zapFileName(epoch uint64) string {
|
||||
|
||||
// bolt snapshot code
|
||||
|
||||
var (
|
||||
boltSnapshotsBucket = []byte{'s'}
|
||||
boltPathKey = []byte{'p'}
|
||||
boltDeletedKey = []byte{'d'}
|
||||
boltInternalKey = []byte{'i'}
|
||||
boltMetaDataKey = []byte{'m'}
|
||||
boltMetaDataSegmentTypeKey = []byte("type")
|
||||
boltMetaDataSegmentVersionKey = []byte("version")
|
||||
boltMetaDataTimeStamp = []byte("timeStamp")
|
||||
boltStatsKey = []byte("stats")
|
||||
TotBytesWrittenKey = []byte("TotBytesWritten")
|
||||
)
|
||||
|
||||
func (s *Scorch) loadFromBolt() error {
|
||||
err := s.rootBolt.View(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -912,7 +911,7 @@ func (s *Scorch) loadFromBolt() error {
|
||||
// NOTE: this is currently ONLY intended to be used by the command-line tool
|
||||
func (s *Scorch) LoadSnapshot(epoch uint64) (rv *IndexSnapshot, err error) {
|
||||
err = s.rootBolt.View(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -940,14 +939,14 @@ func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) {
|
||||
// first we look for the meta-data bucket, this will tell us
|
||||
// which segment type/version was used for this snapshot
|
||||
// all operations for this scorch will use this type/version
|
||||
metaBucket := snapshot.Bucket(boltMetaDataKey)
|
||||
metaBucket := snapshot.Bucket(util.BoltMetaDataKey)
|
||||
if metaBucket == nil {
|
||||
_ = rv.DecRef()
|
||||
return nil, fmt.Errorf("meta-data bucket missing")
|
||||
}
|
||||
segmentType := string(metaBucket.Get(boltMetaDataSegmentTypeKey))
|
||||
segmentType := string(metaBucket.Get(util.BoltMetaDataSegmentTypeKey))
|
||||
segmentVersion := binary.BigEndian.Uint32(
|
||||
metaBucket.Get(boltMetaDataSegmentVersionKey))
|
||||
metaBucket.Get(util.BoltMetaDataSegmentVersionKey))
|
||||
err := s.loadSegmentPlugin(segmentType, segmentVersion)
|
||||
if err != nil {
|
||||
_ = rv.DecRef()
|
||||
@@ -957,7 +956,7 @@ func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) {
|
||||
var running uint64
|
||||
c := snapshot.Cursor()
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
if k[0] == boltInternalKey[0] {
|
||||
if k[0] == util.BoltInternalKey[0] {
|
||||
internalBucket := snapshot.Bucket(k)
|
||||
if internalBucket == nil {
|
||||
_ = rv.DecRef()
|
||||
@@ -972,11 +971,11 @@ func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) {
|
||||
_ = rv.DecRef()
|
||||
return nil, err
|
||||
}
|
||||
} else if k[0] != boltMetaDataKey[0] {
|
||||
} else if k[0] != util.BoltMetaDataKey[0] {
|
||||
segmentBucket := snapshot.Bucket(k)
|
||||
if segmentBucket == nil {
|
||||
_ = rv.DecRef()
|
||||
return nil, fmt.Errorf("segment key, but bucket missing % x", k)
|
||||
return nil, fmt.Errorf("segment key, but bucket missing %x", k)
|
||||
}
|
||||
segmentSnapshot, err := s.loadSegment(segmentBucket)
|
||||
if err != nil {
|
||||
@@ -990,6 +989,10 @@ func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) {
|
||||
}
|
||||
rv.segment = append(rv.segment, segmentSnapshot)
|
||||
rv.offsets = append(rv.offsets, running)
|
||||
// Merge all segment level updated field info for use during queries
|
||||
if segmentSnapshot.updatedFields != nil {
|
||||
rv.MergeUpdateFieldsInfo(segmentSnapshot.updatedFields)
|
||||
}
|
||||
running += segmentSnapshot.segment.Count()
|
||||
}
|
||||
}
|
||||
@@ -997,46 +1000,59 @@ func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) {
|
||||
}
|
||||
|
||||
func (s *Scorch) loadSegment(segmentBucket *bolt.Bucket) (*SegmentSnapshot, error) {
|
||||
pathBytes := segmentBucket.Get(boltPathKey)
|
||||
pathBytes := segmentBucket.Get(util.BoltPathKey)
|
||||
if pathBytes == nil {
|
||||
return nil, fmt.Errorf("segment path missing")
|
||||
}
|
||||
segmentPath := s.path + string(os.PathSeparator) + string(pathBytes)
|
||||
segment, err := s.segPlugin.Open(segmentPath)
|
||||
seg, err := s.segPlugin.Open(segmentPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening bolt segment: %v", err)
|
||||
}
|
||||
|
||||
rv := &SegmentSnapshot{
|
||||
segment: segment,
|
||||
segment: seg,
|
||||
cachedDocs: &cachedDocs{cache: nil},
|
||||
cachedMeta: &cachedMeta{meta: nil},
|
||||
}
|
||||
deletedBytes := segmentBucket.Get(boltDeletedKey)
|
||||
deletedBytes := segmentBucket.Get(util.BoltDeletedKey)
|
||||
if deletedBytes != nil {
|
||||
deletedBitmap := roaring.NewBitmap()
|
||||
r := bytes.NewReader(deletedBytes)
|
||||
_, err := deletedBitmap.ReadFrom(r)
|
||||
if err != nil {
|
||||
_ = segment.Close()
|
||||
_ = seg.Close()
|
||||
return nil, fmt.Errorf("error reading deleted bytes: %v", err)
|
||||
}
|
||||
if !deletedBitmap.IsEmpty() {
|
||||
rv.deleted = deletedBitmap
|
||||
}
|
||||
}
|
||||
statBytes := segmentBucket.Get(boltStatsKey)
|
||||
statBytes := segmentBucket.Get(util.BoltStatsKey)
|
||||
if statBytes != nil {
|
||||
var statsMap map[string]map[string]uint64
|
||||
|
||||
err := json.Unmarshal(statBytes, &statsMap)
|
||||
stats := &fieldStats{statMap: statsMap}
|
||||
if err != nil {
|
||||
_ = segment.Close()
|
||||
_ = seg.Close()
|
||||
return nil, fmt.Errorf("error reading stat bytes: %v", err)
|
||||
}
|
||||
rv.stats = stats
|
||||
}
|
||||
updatedFieldBytes := segmentBucket.Get(util.BoltUpdatedFieldsKey)
|
||||
if updatedFieldBytes != nil {
|
||||
var updatedFields map[string]*index.UpdateFieldInfo
|
||||
|
||||
err := json.Unmarshal(updatedFieldBytes, &updatedFields)
|
||||
if err != nil {
|
||||
_ = seg.Close()
|
||||
return nil, fmt.Errorf("error reading updated field bytes: %v", err)
|
||||
}
|
||||
rv.updatedFields = updatedFields
|
||||
// Set the value within the segment base for use during merge
|
||||
rv.UpdateFieldsInfo(rv.updatedFields)
|
||||
}
|
||||
|
||||
return rv, nil
|
||||
}
|
||||
@@ -1215,7 +1231,7 @@ func (s *Scorch) removeOldBoltSnapshots() (numRemoved int, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -1293,7 +1309,7 @@ func (s *Scorch) removeOldZapFiles() error {
|
||||
// duration. This results in all of them being purged from the boltDB
|
||||
// and the next iteration of the removeOldData() would end up protecting
|
||||
// latest contiguous snapshot which is a poor pattern in the rollback checkpoints.
|
||||
// Hence we try to retain atmost retentionFactor portion worth of old snapshots
|
||||
// Hence we try to retain at most retentionFactor portion worth of old snapshots
|
||||
// in such a scenario using the following function
|
||||
func getBoundaryCheckPoint(retentionFactor float64,
|
||||
checkPoints []*snapshotMetaData, timeStamp time.Time,
|
||||
@@ -1325,7 +1341,7 @@ func (s *Scorch) rootBoltSnapshotMetaData() ([]*snapshotMetaData, error) {
|
||||
expirationDuration := time.Duration(s.numSnapshotsToKeep-1) * s.rollbackSamplingInterval
|
||||
|
||||
err := s.rootBolt.View(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1349,11 +1365,11 @@ func (s *Scorch) rootBoltSnapshotMetaData() ([]*snapshotMetaData, error) {
|
||||
if snapshot == nil {
|
||||
continue
|
||||
}
|
||||
metaBucket := snapshot.Bucket(boltMetaDataKey)
|
||||
metaBucket := snapshot.Bucket(util.BoltMetaDataKey)
|
||||
if metaBucket == nil {
|
||||
continue
|
||||
}
|
||||
timeStampBytes := metaBucket.Get(boltMetaDataTimeStamp)
|
||||
timeStampBytes := metaBucket.Get(util.BoltMetaDataTimeStamp)
|
||||
var timeStamp time.Time
|
||||
err = timeStamp.UnmarshalText(timeStampBytes)
|
||||
if err != nil {
|
||||
@@ -1390,7 +1406,7 @@ func (s *Scorch) rootBoltSnapshotMetaData() ([]*snapshotMetaData, error) {
|
||||
func (s *Scorch) RootBoltSnapshotEpochs() ([]uint64, error) {
|
||||
var rv []uint64
|
||||
err := s.rootBolt.View(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1411,7 +1427,7 @@ func (s *Scorch) RootBoltSnapshotEpochs() ([]uint64, error) {
|
||||
func (s *Scorch) loadZapFileNames() (map[string]struct{}, error) {
|
||||
rv := map[string]struct{}{}
|
||||
err := s.rootBolt.View(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1423,14 +1439,14 @@ func (s *Scorch) loadZapFileNames() (map[string]struct{}, error) {
|
||||
}
|
||||
segc := snapshot.Cursor()
|
||||
for segk, _ := segc.First(); segk != nil; segk, _ = segc.Next() {
|
||||
if segk[0] == boltInternalKey[0] {
|
||||
if segk[0] == util.BoltInternalKey[0] {
|
||||
continue
|
||||
}
|
||||
segmentBucket := snapshot.Bucket(segk)
|
||||
if segmentBucket == nil {
|
||||
continue
|
||||
}
|
||||
pathBytes := segmentBucket.Get(boltPathKey)
|
||||
pathBytes := segmentBucket.Get(util.BoltPathKey)
|
||||
if pathBytes == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
9
vendor/github.com/blevesearch/bleve/v2/index/scorch/rollback.go
generated
vendored
9
vendor/github.com/blevesearch/bleve/v2/index/scorch/rollback.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/util"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -61,7 +62,7 @@ func RollbackPoints(path string) ([]*RollbackPoint, error) {
|
||||
_ = rootBolt.Close()
|
||||
}()
|
||||
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -87,7 +88,7 @@ func RollbackPoints(path string) ([]*RollbackPoint, error) {
|
||||
meta := map[string][]byte{}
|
||||
c2 := snapshot.Cursor()
|
||||
for j, _ := c2.First(); j != nil; j, _ = c2.Next() {
|
||||
if j[0] == boltInternalKey[0] {
|
||||
if j[0] == util.BoltInternalKey[0] {
|
||||
internalBucket := snapshot.Bucket(j)
|
||||
if internalBucket == nil {
|
||||
err = fmt.Errorf("internal bucket missing")
|
||||
@@ -151,7 +152,7 @@ func Rollback(path string, to *RollbackPoint) error {
|
||||
var found bool
|
||||
var eligibleEpochs []uint64
|
||||
err = rootBolt.View(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -193,7 +194,7 @@ func Rollback(path string, to *RollbackPoint) error {
|
||||
}
|
||||
}()
|
||||
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
103
vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go
generated
vendored
103
vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/RoaringBitmap/roaring/v2"
|
||||
"github.com/blevesearch/bleve/v2/registry"
|
||||
"github.com/blevesearch/bleve/v2/util"
|
||||
index "github.com/blevesearch/bleve_index_api"
|
||||
segment "github.com/blevesearch/scorch_segment_api/v2"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -217,9 +218,11 @@ func (s *Scorch) fireAsyncError(err error) {
|
||||
}
|
||||
|
||||
func (s *Scorch) Open() error {
|
||||
err := s.openBolt()
|
||||
if err != nil {
|
||||
return err
|
||||
if s.rootBolt == nil {
|
||||
err := s.openBolt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.asyncTasks.Add(1)
|
||||
@@ -371,6 +374,7 @@ func (s *Scorch) Close() (err error) {
|
||||
}
|
||||
}
|
||||
s.root = nil
|
||||
s.rootBolt = nil
|
||||
s.rootLock.Unlock()
|
||||
}
|
||||
|
||||
@@ -940,3 +944,96 @@ func (s *Scorch) CopyReader() index.CopyReader {
|
||||
func (s *Scorch) FireIndexEvent() {
|
||||
s.fireEvent(EventKindIndexStart, 0)
|
||||
}
|
||||
|
||||
// Updates bolt db with the given field info. Existing field info already in bolt
|
||||
// will be merged before persisting. The index mapping is also overwritted both
|
||||
// in bolt as well as the index snapshot
|
||||
func (s *Scorch) UpdateFields(fieldInfo map[string]*index.UpdateFieldInfo, mappingBytes []byte) error {
|
||||
err := s.updateBolt(fieldInfo, mappingBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pass the update field info to all snapshots and segment bases
|
||||
s.root.UpdateFieldsInfo(fieldInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scorch) OpenMeta() error {
|
||||
if s.rootBolt == nil {
|
||||
err := s.openBolt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge and update deleted field info and rewrite index mapping
|
||||
func (s *Scorch) updateBolt(fieldInfo map[string]*index.UpdateFieldInfo, mappingBytes []byte) error {
|
||||
return s.rootBolt.Update(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(util.BoltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := snapshots.Cursor()
|
||||
for k, _ := c.Last(); k != nil; k, _ = c.Prev() {
|
||||
_, _, err := decodeUvarintAscending(k)
|
||||
if err != nil {
|
||||
fmt.Printf("unable to parse segment epoch %x, continuing", k)
|
||||
continue
|
||||
}
|
||||
snapshot := snapshots.Bucket(k)
|
||||
cc := snapshot.Cursor()
|
||||
for kk, _ := cc.First(); kk != nil; kk, _ = cc.Next() {
|
||||
if kk[0] == util.BoltInternalKey[0] {
|
||||
internalBucket := snapshot.Bucket(kk)
|
||||
if internalBucket == nil {
|
||||
return fmt.Errorf("segment key, but bucket missing %x", kk)
|
||||
}
|
||||
err = internalBucket.Put(util.MappingInternalKey, mappingBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if kk[0] != util.BoltMetaDataKey[0] {
|
||||
segmentBucket := snapshot.Bucket(kk)
|
||||
if segmentBucket == nil {
|
||||
return fmt.Errorf("segment key, but bucket missing %x", kk)
|
||||
}
|
||||
var updatedFields map[string]*index.UpdateFieldInfo
|
||||
updatedFieldBytes := segmentBucket.Get(util.BoltUpdatedFieldsKey)
|
||||
if updatedFieldBytes != nil {
|
||||
err := json.Unmarshal(updatedFieldBytes, &updatedFields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading updated field bytes: %v", err)
|
||||
}
|
||||
for field, info := range fieldInfo {
|
||||
if val, ok := updatedFields[field]; ok {
|
||||
updatedFields[field] = &index.UpdateFieldInfo{
|
||||
Deleted: info.Deleted || val.Deleted,
|
||||
Store: info.Store || val.Store,
|
||||
DocValues: info.DocValues || val.DocValues,
|
||||
Index: info.Index || val.Index,
|
||||
}
|
||||
} else {
|
||||
updatedFields[field] = info
|
||||
}
|
||||
}
|
||||
} else {
|
||||
updatedFields = fieldInfo
|
||||
}
|
||||
b, err := json.Marshal(updatedFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = segmentBucket.Put(util.BoltUpdatedFieldsKey, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
81
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go
generated
vendored
81
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go
generated
vendored
@@ -84,6 +84,13 @@ type IndexSnapshot struct {
|
||||
|
||||
m3 sync.RWMutex // bm25 metrics specific - not to interfere with TFR creation
|
||||
fieldCardinality map[string]int
|
||||
|
||||
// Stores information about zapx fields that have been
|
||||
// fully deleted (indicated by UpdateFieldInfo.Deleted) or
|
||||
// partially deleted index, store or docvalues (indicated by
|
||||
// UpdateFieldInfo.Index or .Store or .DocValues).
|
||||
// Used to short circuit queries trying to read stale data
|
||||
updatedFields map[string]*index.UpdateFieldInfo
|
||||
}
|
||||
|
||||
func (i *IndexSnapshot) Segments() []*SegmentSnapshot {
|
||||
@@ -509,6 +516,13 @@ func (is *IndexSnapshot) Document(id string) (rv index.Document, err error) {
|
||||
// Keeping that TODO for now until we have a cleaner way.
|
||||
rvd.StoredFieldsSize += uint64(len(val))
|
||||
|
||||
// Skip fields that have been completely deleted or had their
|
||||
// store data deleted
|
||||
if info, ok := is.updatedFields[name]; ok &&
|
||||
(info.Deleted || info.Store) {
|
||||
return true
|
||||
}
|
||||
|
||||
// copy value, array positions to preserve them beyond the scope of this callback
|
||||
value := append([]byte(nil), val...)
|
||||
arrayPos := append([]uint64(nil), pos...)
|
||||
@@ -634,10 +648,22 @@ func (is *IndexSnapshot) TermFieldReader(ctx context.Context, term []byte, field
|
||||
segBytesRead := s.segment.BytesRead()
|
||||
rv.incrementBytesRead(segBytesRead)
|
||||
}
|
||||
dict, err := s.segment.Dictionary(field)
|
||||
|
||||
var dict segment.TermDictionary
|
||||
var err error
|
||||
|
||||
// Skip fields that have been completely deleted or had their
|
||||
// index data deleted
|
||||
if info, ok := is.updatedFields[field]; ok &&
|
||||
(info.Index || info.Deleted) {
|
||||
dict, err = s.segment.Dictionary("")
|
||||
} else {
|
||||
dict, err = s.segment.Dictionary(field)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if dictStats, ok := dict.(segment.DiskStatsReporter); ok {
|
||||
bytesRead := dictStats.BytesRead()
|
||||
rv.incrementBytesRead(bytesRead)
|
||||
@@ -783,6 +809,23 @@ func (is *IndexSnapshot) documentVisitFieldTermsOnSegment(
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out fields that have been completely deleted or had their
|
||||
// docvalues data deleted from both visitable fields and required fields
|
||||
filterUpdatedFields := func(fields []string) []string {
|
||||
filteredFields := make([]string, 0)
|
||||
for _, field := range fields {
|
||||
if info, ok := is.updatedFields[field]; ok &&
|
||||
(info.DocValues || info.Deleted) {
|
||||
continue
|
||||
}
|
||||
filteredFields = append(filteredFields, field)
|
||||
}
|
||||
return filteredFields
|
||||
}
|
||||
|
||||
fieldsFiltered := filterUpdatedFields(fields)
|
||||
vFieldsFiltered := filterUpdatedFields(vFields)
|
||||
|
||||
var errCh chan error
|
||||
|
||||
// cFields represents the fields that we'll need from the
|
||||
@@ -790,7 +833,7 @@ func (is *IndexSnapshot) documentVisitFieldTermsOnSegment(
|
||||
// if the caller happens to know we're on the same segmentIndex
|
||||
// from a previous invocation
|
||||
if cFields == nil {
|
||||
cFields = subtractStrings(fields, vFields)
|
||||
cFields = subtractStrings(fieldsFiltered, vFieldsFiltered)
|
||||
|
||||
if !ss.cachedDocs.hasFields(cFields) {
|
||||
errCh = make(chan error, 1)
|
||||
@@ -805,8 +848,8 @@ func (is *IndexSnapshot) documentVisitFieldTermsOnSegment(
|
||||
}
|
||||
}
|
||||
|
||||
if ssvOk && ssv != nil && len(vFields) > 0 {
|
||||
dvs, err = ssv.VisitDocValues(localDocNum, fields, visitor, dvs)
|
||||
if ssvOk && ssv != nil && len(vFieldsFiltered) > 0 {
|
||||
dvs, err = ssv.VisitDocValues(localDocNum, fieldsFiltered, visitor, dvs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -1161,3 +1204,33 @@ func (is *IndexSnapshot) ThesaurusKeysRegexp(name string,
|
||||
func (is *IndexSnapshot) UpdateSynonymSearchCount(delta uint64) {
|
||||
atomic.AddUint64(&is.parent.stats.TotSynonymSearches, delta)
|
||||
}
|
||||
|
||||
// Update current snapshot updated field data as well as pass it on to all segments and segment bases
|
||||
func (is *IndexSnapshot) UpdateFieldsInfo(updatedFields map[string]*index.UpdateFieldInfo) {
|
||||
is.m.Lock()
|
||||
defer is.m.Unlock()
|
||||
|
||||
is.MergeUpdateFieldsInfo(updatedFields)
|
||||
|
||||
for _, segmentSnapshot := range is.segment {
|
||||
segmentSnapshot.UpdateFieldsInfo(is.updatedFields)
|
||||
}
|
||||
}
|
||||
|
||||
// Merge given updated field information with existing updated field information
|
||||
func (is *IndexSnapshot) MergeUpdateFieldsInfo(updatedFields map[string]*index.UpdateFieldInfo) {
|
||||
if is.updatedFields == nil {
|
||||
is.updatedFields = updatedFields
|
||||
} else {
|
||||
for fieldName, info := range updatedFields {
|
||||
if val, ok := is.updatedFields[fieldName]; ok {
|
||||
val.Deleted = val.Deleted || info.Deleted
|
||||
val.Index = val.Index || info.Index
|
||||
val.DocValues = val.DocValues || info.DocValues
|
||||
val.Store = val.Store || info.Store
|
||||
} else {
|
||||
is.updatedFields[fieldName] = info
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go
generated
vendored
@@ -163,7 +163,7 @@ func (i *IndexSnapshotTermFieldReader) Advance(ID index.IndexInternalID, preAllo
|
||||
// unadorned composite optimization
|
||||
// we need to reset all the iterators
|
||||
// back to the beginning, which effectively
|
||||
// achives the same thing as the above
|
||||
// achieves the same thing as the above
|
||||
for _, iter := range i.iterators {
|
||||
if optimizedIterator, ok := iter.(ResetablePostingsIterator); ok {
|
||||
optimizedIterator.ResetIterator()
|
||||
|
||||
4
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_vr.go
generated
vendored
4
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_vr.go
generated
vendored
@@ -83,6 +83,10 @@ func (i *IndexSnapshotVectorReader) Next(preAlloced *index.VectorDoc) (
|
||||
}
|
||||
|
||||
for i.segmentOffset < len(i.iterators) {
|
||||
if i.iterators[i.segmentOffset] == nil {
|
||||
i.segmentOffset++
|
||||
continue
|
||||
}
|
||||
next, err := i.iterators[i.segmentOffset].Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
35
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go
generated
vendored
35
vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go
generated
vendored
@@ -35,12 +35,13 @@ type SegmentSnapshot struct {
|
||||
// segment was mmaped recently, in which case
|
||||
// we consider the loading cost of the metadata
|
||||
// as part of IO stats.
|
||||
mmaped uint32
|
||||
id uint64
|
||||
segment segment.Segment
|
||||
deleted *roaring.Bitmap
|
||||
creator string
|
||||
stats *fieldStats
|
||||
mmaped uint32
|
||||
id uint64
|
||||
segment segment.Segment
|
||||
deleted *roaring.Bitmap
|
||||
creator string
|
||||
stats *fieldStats
|
||||
updatedFields map[string]*index.UpdateFieldInfo
|
||||
|
||||
cachedMeta *cachedMeta
|
||||
|
||||
@@ -146,6 +147,28 @@ func (s *SegmentSnapshot) Size() (rv int) {
|
||||
return
|
||||
}
|
||||
|
||||
// Merge given updated field information with existing and pass it on to the segment base
|
||||
func (s *SegmentSnapshot) UpdateFieldsInfo(updatedFields map[string]*index.UpdateFieldInfo) {
|
||||
if s.updatedFields == nil {
|
||||
s.updatedFields = updatedFields
|
||||
} else {
|
||||
for fieldName, info := range updatedFields {
|
||||
if val, ok := s.updatedFields[fieldName]; ok {
|
||||
val.Deleted = val.Deleted || info.Deleted
|
||||
val.Index = val.Index || info.Index
|
||||
val.DocValues = val.DocValues || info.DocValues
|
||||
val.Store = val.Store || info.Store
|
||||
} else {
|
||||
s.updatedFields[fieldName] = info
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if segment, ok := s.segment.(segment.UpdatableSegment); ok {
|
||||
segment.SetUpdatedFields(s.updatedFields)
|
||||
}
|
||||
}
|
||||
|
||||
type cachedFieldDocs struct {
|
||||
m sync.Mutex
|
||||
readyCh chan struct{} // closed when the cachedFieldDocs.docs is ready to be used.
|
||||
|
||||
13
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/protoc-README.md
generated
vendored
Normal file
13
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/protoc-README.md
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
## Instructions for generating new go stubs using upsidedown.proto
|
||||
|
||||
1. Download latest of protoc-gen-go
|
||||
```
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
||||
```
|
||||
|
||||
2. To generate `upsidedown.pb.go` using upsdidedown.proto:
|
||||
```
|
||||
protoc --go_out=. --go_opt=Mindex/upsidedown/upsidedown.proto=index/upsidedown/ index/upsidedown/upsidedown.proto
|
||||
```
|
||||
|
||||
3. Manually add back Size and MarshalTo methods for BackIndexRowValue, BackIndexTermsEntry, BackIndexStoreEntry to support upside_down.
|
||||
2
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/reader.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/reader.go
generated
vendored
@@ -371,6 +371,6 @@ func (r *UpsideDownCouchDocIDReader) nextOnly() bool {
|
||||
start = r.onlyPos
|
||||
r.onlyPos++
|
||||
}
|
||||
// inidicate if we got to the end of the list
|
||||
// indicate if we got to the end of the list
|
||||
return r.onlyPos < len(r.only)
|
||||
}
|
||||
|
||||
4
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row.go
generated
vendored
4
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/size"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -924,7 +924,7 @@ type backIndexFieldTermVisitor func(field uint32, term []byte)
|
||||
//
|
||||
// This code originates from:
|
||||
// func (m *BackIndexRowValue) Unmarshal(data []byte) error
|
||||
// the sections which create garbage or parse unintersting sections
|
||||
// the sections which create garbage or parse uninteresting sections
|
||||
// have been commented out. This was done by design to allow for easier
|
||||
// merging in the future if that original function is regenerated
|
||||
func visitBackIndexRow(data []byte, callback backIndexFieldTermVisitor) error {
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go
generated
vendored
@@ -30,7 +30,7 @@ import (
|
||||
index "github.com/blevesearch/bleve_index_api"
|
||||
store "github.com/blevesearch/upsidedown_store_api"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const Name = "upside_down"
|
||||
|
||||
865
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.pb.go
generated
vendored
865
vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.pb.go
generated
vendored
@@ -1,382 +1,319 @@
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: upsidedown.proto
|
||||
// DO NOT EDIT!
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc v5.29.3
|
||||
// source: index/upsidedown/upsidedown.proto
|
||||
|
||||
/*
|
||||
Package upsidedown is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
|
||||
upsidedown.proto
|
||||
|
||||
It has these top-level messages:
|
||||
|
||||
BackIndexTermsEntry
|
||||
BackIndexStoreEntry
|
||||
BackIndexRowValue
|
||||
*/
|
||||
package upsidedown
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
io "io"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
import io "io"
|
||||
import fmt "fmt"
|
||||
import github_com_golang_protobuf_proto "github.com/golang/protobuf/proto"
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = math.Inf
|
||||
var (
|
||||
ErrInvalidLengthUpsidedown = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
)
|
||||
|
||||
type BackIndexTermsEntry struct {
|
||||
Field *uint32 `protobuf:"varint,1,req,name=field" json:"field,omitempty"`
|
||||
Terms []string `protobuf:"bytes,2,rep,name=terms" json:"terms,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Field *uint32 `protobuf:"varint,1,req,name=field" json:"field,omitempty"`
|
||||
Terms []string `protobuf:"bytes,2,rep,name=terms" json:"terms,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (m *BackIndexTermsEntry) Reset() { *m = BackIndexTermsEntry{} }
|
||||
func (m *BackIndexTermsEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*BackIndexTermsEntry) ProtoMessage() {}
|
||||
func (x *BackIndexTermsEntry) Reset() {
|
||||
*x = BackIndexTermsEntry{}
|
||||
mi := &file_index_upsidedown_upsidedown_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (m *BackIndexTermsEntry) GetField() uint32 {
|
||||
if m != nil && m.Field != nil {
|
||||
return *m.Field
|
||||
func (x *BackIndexTermsEntry) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BackIndexTermsEntry) ProtoMessage() {}
|
||||
|
||||
func (x *BackIndexTermsEntry) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_index_upsidedown_upsidedown_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackIndexTermsEntry.ProtoReflect.Descriptor instead.
|
||||
func (*BackIndexTermsEntry) Descriptor() ([]byte, []int) {
|
||||
return file_index_upsidedown_upsidedown_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *BackIndexTermsEntry) GetField() uint32 {
|
||||
if x != nil && x.Field != nil {
|
||||
return *x.Field
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *BackIndexTermsEntry) GetTerms() []string {
|
||||
if m != nil {
|
||||
return m.Terms
|
||||
func (x *BackIndexTermsEntry) GetTerms() []string {
|
||||
if x != nil {
|
||||
return x.Terms
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackIndexTermsEntry) MarshalTo(data []byte) (n int, err error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if x.Field == nil {
|
||||
return 0, fmt.Errorf("missing required `Field`")
|
||||
} else {
|
||||
data[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(*x.Field))
|
||||
}
|
||||
if len(x.Terms) > 0 {
|
||||
for _, s := range x.Terms {
|
||||
data[i] = 0x12
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
data[i] = uint8(l)
|
||||
i++
|
||||
i += copy(data[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (x *BackIndexTermsEntry) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if x.Field != nil {
|
||||
n += 1 + sovUpsidedown(uint64(*x.Field))
|
||||
}
|
||||
if len(x.Terms) > 0 {
|
||||
for _, s := range x.Terms {
|
||||
l = len(s)
|
||||
n += 1 + l + sovUpsidedown(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
type BackIndexStoreEntry struct {
|
||||
Field *uint32 `protobuf:"varint,1,req,name=field" json:"field,omitempty"`
|
||||
ArrayPositions []uint64 `protobuf:"varint,2,rep,name=arrayPositions" json:"arrayPositions,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Field *uint32 `protobuf:"varint,1,req,name=field" json:"field,omitempty"`
|
||||
ArrayPositions []uint64 `protobuf:"varint,2,rep,name=arrayPositions" json:"arrayPositions,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (m *BackIndexStoreEntry) Reset() { *m = BackIndexStoreEntry{} }
|
||||
func (m *BackIndexStoreEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*BackIndexStoreEntry) ProtoMessage() {}
|
||||
func (x *BackIndexStoreEntry) Reset() {
|
||||
*x = BackIndexStoreEntry{}
|
||||
mi := &file_index_upsidedown_upsidedown_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (m *BackIndexStoreEntry) GetField() uint32 {
|
||||
if m != nil && m.Field != nil {
|
||||
return *m.Field
|
||||
func (x *BackIndexStoreEntry) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BackIndexStoreEntry) ProtoMessage() {}
|
||||
|
||||
func (x *BackIndexStoreEntry) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_index_upsidedown_upsidedown_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackIndexStoreEntry.ProtoReflect.Descriptor instead.
|
||||
func (*BackIndexStoreEntry) Descriptor() ([]byte, []int) {
|
||||
return file_index_upsidedown_upsidedown_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *BackIndexStoreEntry) GetField() uint32 {
|
||||
if x != nil && x.Field != nil {
|
||||
return *x.Field
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *BackIndexStoreEntry) GetArrayPositions() []uint64 {
|
||||
if m != nil {
|
||||
return m.ArrayPositions
|
||||
func (x *BackIndexStoreEntry) GetArrayPositions() []uint64 {
|
||||
if x != nil {
|
||||
return x.ArrayPositions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackIndexStoreEntry) MarshalTo(data []byte) (n int, err error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if x.Field == nil {
|
||||
return 0, fmt.Errorf("missing required `Field`")
|
||||
} else {
|
||||
data[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(*x.Field))
|
||||
}
|
||||
if len(x.ArrayPositions) > 0 {
|
||||
for _, num := range x.ArrayPositions {
|
||||
data[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(num))
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (x *BackIndexStoreEntry) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if x.Field != nil {
|
||||
n += 1 + sovUpsidedown(uint64(*x.Field))
|
||||
}
|
||||
if len(x.ArrayPositions) > 0 {
|
||||
for _, e := range x.ArrayPositions {
|
||||
n += 1 + sovUpsidedown(uint64(e))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
type BackIndexRowValue struct {
|
||||
TermsEntries []*BackIndexTermsEntry `protobuf:"bytes,1,rep,name=termsEntries" json:"termsEntries,omitempty"`
|
||||
StoredEntries []*BackIndexStoreEntry `protobuf:"bytes,2,rep,name=storedEntries" json:"storedEntries,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
TermsEntries []*BackIndexTermsEntry `protobuf:"bytes,1,rep,name=termsEntries" json:"termsEntries,omitempty"`
|
||||
StoredEntries []*BackIndexStoreEntry `protobuf:"bytes,2,rep,name=storedEntries" json:"storedEntries,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (m *BackIndexRowValue) Reset() { *m = BackIndexRowValue{} }
|
||||
func (m *BackIndexRowValue) String() string { return proto.CompactTextString(m) }
|
||||
func (*BackIndexRowValue) ProtoMessage() {}
|
||||
|
||||
func (m *BackIndexRowValue) GetTermsEntries() []*BackIndexTermsEntry {
|
||||
if m != nil {
|
||||
return m.TermsEntries
|
||||
}
|
||||
return nil
|
||||
func (x *BackIndexRowValue) Reset() {
|
||||
*x = BackIndexRowValue{}
|
||||
mi := &file_index_upsidedown_upsidedown_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (m *BackIndexRowValue) GetStoredEntries() []*BackIndexStoreEntry {
|
||||
if m != nil {
|
||||
return m.StoredEntries
|
||||
}
|
||||
return nil
|
||||
func (x *BackIndexRowValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (m *BackIndexTermsEntry) Unmarshal(data []byte) error {
|
||||
var hasFields [1]uint64
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
func (*BackIndexRowValue) ProtoMessage() {}
|
||||
|
||||
func (x *BackIndexRowValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_index_upsidedown_upsidedown_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType)
|
||||
}
|
||||
var v uint32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (uint32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Field = &v
|
||||
hasFields[0] |= uint64(0x00000001)
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Terms", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
postIndex := iNdEx + int(stringLen)
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Terms = append(m.Terms, string(data[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
var sizeOfWire int
|
||||
for {
|
||||
sizeOfWire++
|
||||
wire >>= 7
|
||||
if wire == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx -= sizeOfWire
|
||||
skippy, err := skipUpsidedown(data[iNdEx:])
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackIndexRowValue.ProtoReflect.Descriptor instead.
|
||||
func (*BackIndexRowValue) Descriptor() ([]byte, []int) {
|
||||
return file_index_upsidedown_upsidedown_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *BackIndexRowValue) GetTermsEntries() []*BackIndexTermsEntry {
|
||||
if x != nil {
|
||||
return x.TermsEntries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackIndexRowValue) GetStoredEntries() []*BackIndexStoreEntry {
|
||||
if x != nil {
|
||||
return x.StoredEntries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackIndexRowValue) MarshalTo(data []byte) (n int, err error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(x.TermsEntries) > 0 {
|
||||
for _, msg := range x.TermsEntries {
|
||||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthUpsidedown
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if hasFields[0]&uint64(0x00000001) == 0 {
|
||||
return new(github_com_golang_protobuf_proto.RequiredNotSetError)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (m *BackIndexStoreEntry) Unmarshal(data []byte) error {
|
||||
var hasFields [1]uint64
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType)
|
||||
}
|
||||
var v uint32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (uint32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Field = &v
|
||||
hasFields[0] |= uint64(0x00000001)
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ArrayPositions", wireType)
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.ArrayPositions = append(m.ArrayPositions, v)
|
||||
default:
|
||||
var sizeOfWire int
|
||||
for {
|
||||
sizeOfWire++
|
||||
wire >>= 7
|
||||
if wire == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx -= sizeOfWire
|
||||
skippy, err := skipUpsidedown(data[iNdEx:])
|
||||
if len(x.StoredEntries) > 0 {
|
||||
for _, msg := range x.StoredEntries {
|
||||
data[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthUpsidedown
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if hasFields[0]&uint64(0x00000001) == 0 {
|
||||
return new(github_com_golang_protobuf_proto.RequiredNotSetError)
|
||||
}
|
||||
|
||||
return nil
|
||||
return i, nil
|
||||
}
|
||||
func (m *BackIndexRowValue) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TermsEntries", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthUpsidedown
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.TermsEntries = append(m.TermsEntries, &BackIndexTermsEntry{})
|
||||
if err := m.TermsEntries[len(m.TermsEntries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field StoredEntries", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthUpsidedown
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.StoredEntries = append(m.StoredEntries, &BackIndexStoreEntry{})
|
||||
if err := m.StoredEntries[len(m.StoredEntries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
var sizeOfWire int
|
||||
for {
|
||||
sizeOfWire++
|
||||
wire >>= 7
|
||||
if wire == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx -= sizeOfWire
|
||||
skippy, err := skipUpsidedown(data[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthUpsidedown
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
|
||||
func (x *BackIndexRowValue) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if len(x.TermsEntries) > 0 {
|
||||
for _, e := range x.TermsEntries {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovUpsidedown(uint64(l))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
if len(x.StoredEntries) > 0 {
|
||||
for _, e := range x.StoredEntries {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovUpsidedown(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func skipUpsidedown(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
@@ -465,66 +402,6 @@ func skipUpsidedown(data []byte) (n int, err error) {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthUpsidedown = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
)
|
||||
|
||||
func (m *BackIndexTermsEntry) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Field != nil {
|
||||
n += 1 + sovUpsidedown(uint64(*m.Field))
|
||||
}
|
||||
if len(m.Terms) > 0 {
|
||||
for _, s := range m.Terms {
|
||||
l = len(s)
|
||||
n += 1 + l + sovUpsidedown(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *BackIndexStoreEntry) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Field != nil {
|
||||
n += 1 + sovUpsidedown(uint64(*m.Field))
|
||||
}
|
||||
if len(m.ArrayPositions) > 0 {
|
||||
for _, e := range m.ArrayPositions {
|
||||
n += 1 + sovUpsidedown(uint64(e))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *BackIndexRowValue) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.TermsEntries) > 0 {
|
||||
for _, e := range m.TermsEntries {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovUpsidedown(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.StoredEntries) > 0 {
|
||||
for _, e := range m.StoredEntries {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovUpsidedown(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovUpsidedown(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
@@ -535,150 +412,7 @@ func sovUpsidedown(x uint64) (n int) {
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozUpsidedown(x uint64) (n int) {
|
||||
return sovUpsidedown(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *BackIndexTermsEntry) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *BackIndexTermsEntry) MarshalTo(data []byte) (n int, err error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Field == nil {
|
||||
return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError)
|
||||
} else {
|
||||
data[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(*m.Field))
|
||||
}
|
||||
if len(m.Terms) > 0 {
|
||||
for _, s := range m.Terms {
|
||||
data[i] = 0x12
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
data[i] = uint8(l)
|
||||
i++
|
||||
i += copy(data[i:], s)
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(data[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *BackIndexStoreEntry) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *BackIndexStoreEntry) MarshalTo(data []byte) (n int, err error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Field == nil {
|
||||
return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError)
|
||||
} else {
|
||||
data[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(*m.Field))
|
||||
}
|
||||
if len(m.ArrayPositions) > 0 {
|
||||
for _, num := range m.ArrayPositions {
|
||||
data[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(num))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(data[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *BackIndexRowValue) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *BackIndexRowValue) MarshalTo(data []byte) (n int, err error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.TermsEntries) > 0 {
|
||||
for _, msg := range m.TermsEntries {
|
||||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if len(m.StoredEntries) > 0 {
|
||||
for _, msg := range m.StoredEntries {
|
||||
data[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintUpsidedown(data, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(data[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Upsidedown(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Upsidedown(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintUpsidedown(data []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
@@ -688,3 +422,70 @@ func encodeVarintUpsidedown(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
|
||||
var File_index_upsidedown_upsidedown_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_index_upsidedown_upsidedown_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"!index/upsidedown/upsidedown.proto\"A\n" +
|
||||
"\x13BackIndexTermsEntry\x12\x14\n" +
|
||||
"\x05field\x18\x01 \x02(\rR\x05field\x12\x14\n" +
|
||||
"\x05terms\x18\x02 \x03(\tR\x05terms\"S\n" +
|
||||
"\x13BackIndexStoreEntry\x12\x14\n" +
|
||||
"\x05field\x18\x01 \x02(\rR\x05field\x12&\n" +
|
||||
"\x0earrayPositions\x18\x02 \x03(\x04R\x0earrayPositions\"\x89\x01\n" +
|
||||
"\x11BackIndexRowValue\x128\n" +
|
||||
"\ftermsEntries\x18\x01 \x03(\v2\x14.BackIndexTermsEntryR\ftermsEntries\x12:\n" +
|
||||
"\rstoredEntries\x18\x02 \x03(\v2\x14.BackIndexStoreEntryR\rstoredEntries"
|
||||
|
||||
var (
|
||||
file_index_upsidedown_upsidedown_proto_rawDescOnce sync.Once
|
||||
file_index_upsidedown_upsidedown_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_index_upsidedown_upsidedown_proto_rawDescGZIP() []byte {
|
||||
file_index_upsidedown_upsidedown_proto_rawDescOnce.Do(func() {
|
||||
file_index_upsidedown_upsidedown_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_index_upsidedown_upsidedown_proto_rawDesc), len(file_index_upsidedown_upsidedown_proto_rawDesc)))
|
||||
})
|
||||
return file_index_upsidedown_upsidedown_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_index_upsidedown_upsidedown_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_index_upsidedown_upsidedown_proto_goTypes = []any{
|
||||
(*BackIndexTermsEntry)(nil), // 0: BackIndexTermsEntry
|
||||
(*BackIndexStoreEntry)(nil), // 1: BackIndexStoreEntry
|
||||
(*BackIndexRowValue)(nil), // 2: BackIndexRowValue
|
||||
}
|
||||
var file_index_upsidedown_upsidedown_proto_depIdxs = []int32{
|
||||
0, // 0: BackIndexRowValue.termsEntries:type_name -> BackIndexTermsEntry
|
||||
1, // 1: BackIndexRowValue.storedEntries:type_name -> BackIndexStoreEntry
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_index_upsidedown_upsidedown_proto_init() }
|
||||
func file_index_upsidedown_upsidedown_proto_init() {
|
||||
if File_index_upsidedown_upsidedown_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_index_upsidedown_upsidedown_proto_rawDesc), len(file_index_upsidedown_upsidedown_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_index_upsidedown_upsidedown_proto_goTypes,
|
||||
DependencyIndexes: file_index_upsidedown_upsidedown_proto_depIdxs,
|
||||
MessageInfos: file_index_upsidedown_upsidedown_proto_msgTypes,
|
||||
}.Build()
|
||||
File_index_upsidedown_upsidedown_proto = out.File
|
||||
file_index_upsidedown_upsidedown_proto_goTypes = nil
|
||||
file_index_upsidedown_upsidedown_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
87
vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go
generated
vendored
87
vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go
generated
vendored
@@ -32,7 +32,7 @@ type indexAliasImpl struct {
|
||||
indexes []Index
|
||||
mutex sync.RWMutex
|
||||
open bool
|
||||
// if all the indexes in tha alias have the same mapping
|
||||
// if all the indexes in that alias have the same mapping
|
||||
// then the user can set the mapping here to avoid
|
||||
// checking the mapping of each index in the alias
|
||||
mapping mapping.IndexMapping
|
||||
@@ -186,6 +186,7 @@ func (i *indexAliasImpl) SearchInContext(ctx context.Context, req *SearchRequest
|
||||
if len(i.indexes) < 1 {
|
||||
return nil, ErrorAliasEmpty
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(search.PreSearchKey).(bool); ok {
|
||||
// since preSearchKey is set, it means that the request
|
||||
// is being executed as part of a preSearch, which
|
||||
@@ -227,6 +228,21 @@ func (i *indexAliasImpl) SearchInContext(ctx context.Context, req *SearchRequest
|
||||
return i.indexes[0].SearchInContext(ctx, req)
|
||||
}
|
||||
|
||||
// rescorer will be set if score fusion is supposed to happen
|
||||
// at this alias (root alias), else will be nil
|
||||
var rescorer *rescorer
|
||||
if _, ok := ctx.Value(search.ScoreFusionKey).(bool); !ok {
|
||||
// new context will be used in internal functions to collect data
|
||||
// as suitable for fusion. Rescorer is used for rescoring
|
||||
// using fusion algorithms.
|
||||
if IsScoreFusionRequested(req) {
|
||||
ctx = context.WithValue(ctx, search.ScoreFusionKey, true)
|
||||
rescorer = newRescorer(req)
|
||||
rescorer.prepareSearchRequest()
|
||||
defer rescorer.restoreSearchRequest()
|
||||
}
|
||||
}
|
||||
|
||||
// at this stage we know we have multiple indexes
|
||||
// check if preSearchData needs to be gathered from all indexes
|
||||
// before executing the query
|
||||
@@ -236,6 +252,14 @@ func (i *indexAliasImpl) SearchInContext(ctx context.Context, req *SearchRequest
|
||||
// - the request requires preSearch
|
||||
var preSearchDuration time.Duration
|
||||
var sr *SearchResult
|
||||
|
||||
// fusionKnnHits stores the KnnHits at the root alias.
|
||||
// This is used with score fusion in case there is no need to
|
||||
// send the knn hits to the leaf indexes in search phase.
|
||||
// Refer to constructPreSearchDataAndFusionKnnHits for more info.
|
||||
// This variable is left nil if we have to send the knn hits to leaf
|
||||
// indexes again, else contains the knn hits if not required.
|
||||
var fusionKnnHits search.DocumentMatchCollection
|
||||
flags, err := preSearchRequired(ctx, req, i.mapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -261,10 +285,10 @@ func (i *indexAliasImpl) SearchInContext(ctx context.Context, req *SearchRequest
|
||||
// if the request is satisfied by the preSearch result, then we can
|
||||
// directly return the preSearch result as the final result
|
||||
if requestSatisfiedByPreSearch(req, flags) {
|
||||
sr = finalizeSearchResult(req, preSearchResult)
|
||||
sr = finalizeSearchResult(ctx, req, preSearchResult, rescorer)
|
||||
// no need to run the 2nd phase MultiSearch(..)
|
||||
} else {
|
||||
preSearchData, err = constructPreSearchData(req, flags, preSearchResult, i.indexes)
|
||||
preSearchData, fusionKnnHits, err = constructPreSearchDataAndFusionKnnHits(req, flags, preSearchResult, rescorer, i.indexes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -274,7 +298,8 @@ func (i *indexAliasImpl) SearchInContext(ctx context.Context, req *SearchRequest
|
||||
|
||||
// check if search result was generated as part of preSearch itself
|
||||
if sr == nil {
|
||||
sr, err = MultiSearch(ctx, req, preSearchData, i.indexes...)
|
||||
multiSearchParams := &multiSearchParams{preSearchData, rescorer, fusionKnnHits}
|
||||
sr, err = MultiSearch(ctx, req, multiSearchParams, i.indexes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -653,7 +678,7 @@ func preSearch(ctx context.Context, req *SearchRequest, flags *preSearchFlags, i
|
||||
// if the request is satisfied by just the preSearch result,
|
||||
// finalize the result and return it directly without
|
||||
// performing multi search
|
||||
func finalizeSearchResult(req *SearchRequest, preSearchResult *SearchResult) *SearchResult {
|
||||
func finalizeSearchResult(ctx context.Context, req *SearchRequest, preSearchResult *SearchResult, rescorer *rescorer) *SearchResult {
|
||||
if preSearchResult == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -682,7 +707,16 @@ func finalizeSearchResult(req *SearchRequest, preSearchResult *SearchResult) *Se
|
||||
if req.SearchAfter != nil {
|
||||
preSearchResult.Hits = collector.FilterHitsBySearchAfter(preSearchResult.Hits, req.Sort, req.SearchAfter)
|
||||
}
|
||||
|
||||
if rescorer != nil {
|
||||
// rescore takes ftsHits and knnHits as first and second argument respectively
|
||||
// since this is pure knn, set ftsHits to nil. preSearchResult.Hits contains knn results
|
||||
preSearchResult.Hits, preSearchResult.Total, preSearchResult.MaxScore = rescorer.rescore(nil, preSearchResult.Hits)
|
||||
rescorer.restoreSearchRequest()
|
||||
}
|
||||
|
||||
preSearchResult.Hits = hitsInCurrentPage(req, preSearchResult.Hits)
|
||||
|
||||
if reverseQueryExecution {
|
||||
// reverse the sort back to the original
|
||||
req.Sort.Reverse()
|
||||
@@ -759,6 +793,31 @@ func constructPreSearchData(req *SearchRequest, flags *preSearchFlags,
|
||||
return mergedOut, nil
|
||||
}
|
||||
|
||||
// Constructs the presearch data if required during the search phase.
|
||||
// Also if we need to store knn hits at alias.
|
||||
// If we need to store knn hits at alias: returns all the knn hits
|
||||
// If we should send it to leaf indexes: includes in presearch data
|
||||
func constructPreSearchDataAndFusionKnnHits(req *SearchRequest, flags *preSearchFlags,
|
||||
preSearchResult *SearchResult, rescorer *rescorer, indexes []Index,
|
||||
) (map[string]map[string]interface{}, search.DocumentMatchCollection, error) {
|
||||
var fusionknnhits search.DocumentMatchCollection
|
||||
|
||||
// Checks if we need to send the KNN hits to the indexes in the
|
||||
// search phase. If there is score fusion enabled, we do not
|
||||
// send the KNN hits to the indexes.
|
||||
if rescorer != nil && flags.knn {
|
||||
fusionknnhits = preSearchResult.Hits
|
||||
preSearchResult.Hits = nil
|
||||
}
|
||||
|
||||
preSearchData, err := constructPreSearchData(req, flags, preSearchResult, indexes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return preSearchData, fusionknnhits, nil
|
||||
}
|
||||
|
||||
func preSearchDataSearch(ctx context.Context, req *SearchRequest, flags *preSearchFlags, indexes ...Index) (*SearchResult, error) {
|
||||
asyncResults := make(chan *asyncSearchResult, len(indexes))
|
||||
// run search on each index in separate go routine
|
||||
@@ -912,9 +971,16 @@ func hitsInCurrentPage(req *SearchRequest, hits []*search.DocumentMatch) []*sear
|
||||
return hits
|
||||
}
|
||||
|
||||
// Extra parameters for MultiSearch
|
||||
type multiSearchParams struct {
|
||||
preSearchData map[string]map[string]interface{}
|
||||
rescorer *rescorer
|
||||
fusionKnnHits search.DocumentMatchCollection
|
||||
}
|
||||
|
||||
// MultiSearch executes a SearchRequest across multiple Index objects,
|
||||
// then merges the results. The indexes must honor any ctx deadline.
|
||||
func MultiSearch(ctx context.Context, req *SearchRequest, preSearchData map[string]map[string]interface{}, indexes ...Index) (*SearchResult, error) {
|
||||
func MultiSearch(ctx context.Context, req *SearchRequest, params *multiSearchParams, indexes ...Index) (*SearchResult, error) {
|
||||
searchStart := time.Now()
|
||||
asyncResults := make(chan *asyncSearchResult, len(indexes))
|
||||
|
||||
@@ -939,8 +1005,8 @@ func MultiSearch(ctx context.Context, req *SearchRequest, preSearchData map[stri
|
||||
waitGroup.Add(len(indexes))
|
||||
for _, in := range indexes {
|
||||
var payload map[string]interface{}
|
||||
if preSearchData != nil {
|
||||
payload = preSearchData[in.Name()]
|
||||
if params.preSearchData != nil {
|
||||
payload = params.preSearchData[in.Name()]
|
||||
}
|
||||
go searchChildIndex(in, createChildSearchRequest(req, payload))
|
||||
}
|
||||
@@ -980,6 +1046,11 @@ func MultiSearch(ctx context.Context, req *SearchRequest, preSearchData map[stri
|
||||
}
|
||||
}
|
||||
|
||||
if params.rescorer != nil {
|
||||
sr.Hits, sr.Total, sr.MaxScore = params.rescorer.rescore(sr.Hits, params.fusionKnnHits)
|
||||
params.rescorer.restoreSearchRequest()
|
||||
}
|
||||
|
||||
sr.Hits = hitsInCurrentPage(req, sr.Hits)
|
||||
|
||||
// fix up facets
|
||||
|
||||
148
vendor/github.com/blevesearch/bleve/v2/index_impl.go
generated
vendored
148
vendor/github.com/blevesearch/bleve/v2/index_impl.go
generated
vendored
@@ -133,7 +133,7 @@ func newIndexUsing(path string, mapping mapping.IndexMapping, indexType string,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = rv.i.SetInternal(mappingInternalKey, mappingBytes)
|
||||
err = rv.i.SetInternal(util.MappingInternalKey, mappingBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -163,6 +163,9 @@ func openIndexUsing(path string, runtimeConfig map[string]interface{}) (rv *inde
|
||||
rv.meta.IndexType = upsidedown.Name
|
||||
}
|
||||
|
||||
var um *mapping.IndexMappingImpl
|
||||
var umBytes []byte
|
||||
|
||||
storeConfig := rv.meta.Config
|
||||
if storeConfig == nil {
|
||||
storeConfig = map[string]interface{}{}
|
||||
@@ -173,6 +176,21 @@ func openIndexUsing(path string, runtimeConfig map[string]interface{}) (rv *inde
|
||||
storeConfig["error_if_exists"] = false
|
||||
for rck, rcv := range runtimeConfig {
|
||||
storeConfig[rck] = rcv
|
||||
if rck == "updated_mapping" {
|
||||
if val, ok := rcv.(string); ok {
|
||||
if len(val) == 0 {
|
||||
return nil, fmt.Errorf("updated_mapping is empty")
|
||||
}
|
||||
umBytes = []byte(val)
|
||||
|
||||
err = util.UnmarshalJSON(umBytes, &um)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing updated_mapping into JSON: %v\nmapping contents:\n%v", err, rck)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("updated_mapping not of type string")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// open the index
|
||||
@@ -185,15 +203,32 @@ func openIndexUsing(path string, runtimeConfig map[string]interface{}) (rv *inde
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = rv.i.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func(rv *indexImpl) {
|
||||
if !rv.open {
|
||||
rv.i.Close()
|
||||
|
||||
var ui index.UpdateIndex
|
||||
if um != nil {
|
||||
var ok bool
|
||||
ui, ok = rv.i.(index.UpdateIndex)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("updated mapping present for unupdatable index")
|
||||
}
|
||||
}(rv)
|
||||
|
||||
// Load the meta data from bolt so that we can read the current index
|
||||
// mapping to compare with
|
||||
err = ui.OpenMeta()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err = rv.i.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func(rv *indexImpl) {
|
||||
if !rv.open {
|
||||
rv.i.Close()
|
||||
}
|
||||
}(rv)
|
||||
}
|
||||
|
||||
// now load the mapping
|
||||
indexReader, err := rv.i.Reader()
|
||||
@@ -206,7 +241,7 @@ func openIndexUsing(path string, runtimeConfig map[string]interface{}) (rv *inde
|
||||
}
|
||||
}()
|
||||
|
||||
mappingBytes, err := indexReader.GetInternal(mappingInternalKey)
|
||||
mappingBytes, err := indexReader.GetInternal(util.MappingInternalKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -217,19 +252,48 @@ func openIndexUsing(path string, runtimeConfig map[string]interface{}) (rv *inde
|
||||
return nil, fmt.Errorf("error parsing mapping JSON: %v\nmapping contents:\n%s", err, string(mappingBytes))
|
||||
}
|
||||
|
||||
// validate the mapping
|
||||
err = im.Validate()
|
||||
if err != nil {
|
||||
// no longer return usable index on error because there
|
||||
// is a chance the index is not open at this stage
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate and update the index with the new mapping
|
||||
if um != nil && ui != nil {
|
||||
err = um.Validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fieldInfo, err := DeletedFields(im, um)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = ui.UpdateFields(fieldInfo, umBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
im = um
|
||||
|
||||
err = rv.i.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func(rv *indexImpl) {
|
||||
if !rv.open {
|
||||
rv.i.Close()
|
||||
}
|
||||
}(rv)
|
||||
}
|
||||
|
||||
// mark the index as open
|
||||
rv.mutex.Lock()
|
||||
defer rv.mutex.Unlock()
|
||||
rv.open = true
|
||||
|
||||
// validate the mapping
|
||||
err = im.Validate()
|
||||
if err != nil {
|
||||
// note even if the mapping is invalid
|
||||
// we still return an open usable index
|
||||
return rv, err
|
||||
}
|
||||
|
||||
rv.m = im
|
||||
indexStats.Register(rv)
|
||||
return rv, err
|
||||
@@ -562,6 +626,21 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
|
||||
}
|
||||
}()
|
||||
|
||||
// rescorer will be set if score fusion is supposed to happen
|
||||
// at this alias (root alias), else will be nil
|
||||
var rescorer *rescorer
|
||||
if _, ok := ctx.Value(search.ScoreFusionKey).(bool); !ok {
|
||||
// new context will be used in internal functions to collect data
|
||||
// as suitable for hybrid search. Rescorer is used for rescoring
|
||||
// using fusion algorithms.
|
||||
if IsScoreFusionRequested(req) {
|
||||
ctx = context.WithValue(ctx, search.ScoreFusionKey, true)
|
||||
rescorer = newRescorer(req)
|
||||
rescorer.prepareSearchRequest()
|
||||
defer rescorer.restoreSearchRequest()
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(search.PreSearchKey).(bool); ok {
|
||||
preSearchResult, err := i.preSearch(ctx, req, indexReader)
|
||||
if err != nil {
|
||||
@@ -632,10 +711,21 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
|
||||
}
|
||||
}
|
||||
}
|
||||
if !skipKNNCollector && requestHasKNN(req) {
|
||||
knnHits, err = i.runKnnCollector(ctx, req, indexReader, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
_, contextScoreFusionKeyExists := ctx.Value(search.ScoreFusionKey).(bool)
|
||||
|
||||
if !contextScoreFusionKeyExists {
|
||||
// if no score fusion, default behaviour
|
||||
if !skipKNNCollector && requestHasKNN(req) {
|
||||
knnHits, err = i.runKnnCollector(ctx, req, indexReader, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// if score fusion, run collect if rescorer is defined
|
||||
if rescorer != nil && requestHasKNN(req) {
|
||||
knnHits, err = i.runKnnCollector(ctx, req, indexReader, false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -650,7 +740,12 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
|
||||
}
|
||||
}
|
||||
|
||||
setKnnHitsInCollector(knnHits, req, coll)
|
||||
// if score fusion, no faceting for knn hits is done
|
||||
// hence we can skip setting the knn hits in the collector
|
||||
if !contextScoreFusionKeyExists {
|
||||
setKnnHitsInCollector(knnHits, req, coll)
|
||||
}
|
||||
|
||||
|
||||
if fts != nil {
|
||||
if is, ok := indexReader.(*scorch.IndexSnapshot); ok {
|
||||
@@ -859,6 +954,13 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr
|
||||
Facets: coll.FacetResults(),
|
||||
}
|
||||
|
||||
// rescore if fusion flag is set
|
||||
if rescorer != nil {
|
||||
rv.Hits, rv.Total, rv.MaxScore = rescorer.rescore(rv.Hits, knnHits)
|
||||
rescorer.restoreSearchRequest()
|
||||
rv.Hits = hitsInCurrentPage(req, rv.Hits)
|
||||
}
|
||||
|
||||
if req.Explain {
|
||||
rv.Request = req
|
||||
}
|
||||
|
||||
595
vendor/github.com/blevesearch/bleve/v2/index_update.go
generated
vendored
Normal file
595
vendor/github.com/blevesearch/bleve/v2/index_update.go
generated
vendored
Normal file
@@ -0,0 +1,595 @@
|
||||
// Copyright (c) 2025 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bleve
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/mapping"
|
||||
index "github.com/blevesearch/bleve_index_api"
|
||||
)
|
||||
|
||||
// Store all the fields that interact with the data
|
||||
// from a document path
|
||||
type pathInfo struct {
|
||||
fieldMapInfo []*fieldMapInfo
|
||||
dynamic bool
|
||||
path string
|
||||
analyser string
|
||||
parentPath string
|
||||
}
|
||||
|
||||
// Store the field information with respect to the
|
||||
// document paths
|
||||
type fieldMapInfo struct {
|
||||
fieldMapping *mapping.FieldMapping
|
||||
analyzer string
|
||||
datetimeParser string
|
||||
rootName string
|
||||
parent *pathInfo
|
||||
}
|
||||
|
||||
// Compare two index mappings to identify all of the updatable changes
|
||||
func DeletedFields(ori, upd *mapping.IndexMappingImpl) (map[string]*index.UpdateFieldInfo, error) {
|
||||
// Compare all of the top level fields in an index mapping
|
||||
err := compareMappings(ori, upd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for new mappings present in the type mappings
|
||||
// of the updated compared to the original
|
||||
for name, updDMapping := range upd.TypeMapping {
|
||||
err = checkUpdatedMapping(ori.TypeMapping[name], updDMapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check for new mappings present in the default mappings
|
||||
// of the updated compared to the original
|
||||
err = checkUpdatedMapping(ori.DefaultMapping, upd.DefaultMapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oriPaths := make(map[string]*pathInfo)
|
||||
updPaths := make(map[string]*pathInfo)
|
||||
|
||||
// Go through each mapping present in the original
|
||||
// and consolidate according to the document paths
|
||||
for name, oriDMapping := range ori.TypeMapping {
|
||||
addPathInfo(oriPaths, "", oriDMapping, ori, nil, name)
|
||||
}
|
||||
addPathInfo(oriPaths, "", ori.DefaultMapping, ori, nil, "")
|
||||
|
||||
// Go through each mapping present in the updated
|
||||
// and consolidate according to the document paths
|
||||
for name, updDMapping := range upd.TypeMapping {
|
||||
addPathInfo(updPaths, "", updDMapping, upd, nil, name)
|
||||
}
|
||||
addPathInfo(updPaths, "", upd.DefaultMapping, upd, nil, "")
|
||||
|
||||
// Compare all components of custom analysis currently in use
|
||||
err = compareCustomComponents(oriPaths, updPaths, ori, upd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compare both the mappings based on the document paths
|
||||
// and create a list of index, docvalues, store differences
|
||||
// for every single field possible
|
||||
fieldInfo := make(map[string]*index.UpdateFieldInfo)
|
||||
for path, info := range oriPaths {
|
||||
err = addFieldInfo(fieldInfo, info, updPaths[path])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove entries from the list with no changes between the
|
||||
// original and the updated mapping
|
||||
for name, info := range fieldInfo {
|
||||
if !info.Deleted && !info.Index && !info.DocValues && !info.Store {
|
||||
delete(fieldInfo, name)
|
||||
}
|
||||
// A field cannot be completely deleted with any dynamic value turned on
|
||||
if info.Deleted {
|
||||
if upd.IndexDynamic {
|
||||
return nil, fmt.Errorf("Mapping cannot be removed when index dynamic is true")
|
||||
}
|
||||
if upd.StoreDynamic {
|
||||
return nil, fmt.Errorf("Mapping cannot be removed when store dynamic is true")
|
||||
}
|
||||
if upd.DocValuesDynamic {
|
||||
return nil, fmt.Errorf("Mapping cannot be removed when docvalues dynamic is true")
|
||||
}
|
||||
}
|
||||
}
|
||||
return fieldInfo, nil
|
||||
}
|
||||
|
||||
// Ensures none of the top level index mapping fields have changed
|
||||
func compareMappings(ori, upd *mapping.IndexMappingImpl) error {
|
||||
if ori.TypeField != upd.TypeField &&
|
||||
(len(ori.TypeMapping) != 0 || len(upd.TypeMapping) != 0) {
|
||||
return fmt.Errorf("type field cannot be changed when type mappings are present")
|
||||
}
|
||||
|
||||
if ori.DefaultType != upd.DefaultType {
|
||||
return fmt.Errorf("default type cannot be changed")
|
||||
}
|
||||
|
||||
if ori.IndexDynamic != upd.IndexDynamic {
|
||||
return fmt.Errorf("index dynamic cannot be changed")
|
||||
}
|
||||
|
||||
if ori.StoreDynamic != upd.StoreDynamic {
|
||||
return fmt.Errorf("store dynamic cannot be changed")
|
||||
}
|
||||
|
||||
if ori.DocValuesDynamic != upd.DocValuesDynamic {
|
||||
return fmt.Errorf("docvalues dynamic cannot be changed")
|
||||
}
|
||||
|
||||
if ori.DefaultAnalyzer != upd.DefaultAnalyzer && upd.IndexDynamic {
|
||||
return fmt.Errorf("default analyser cannot be changed if index dynamic is true")
|
||||
}
|
||||
|
||||
if ori.DefaultDateTimeParser != upd.DefaultDateTimeParser && upd.IndexDynamic {
|
||||
return fmt.Errorf("default datetime parser cannot be changed if index dynamic is true")
|
||||
}
|
||||
|
||||
// Scoring model changes between "", "tf-idf" and "bm25" require no index changes to be made
|
||||
if ori.ScoringModel != upd.ScoringModel {
|
||||
if ori.ScoringModel != "" && ori.ScoringModel != index.TFIDFScoring && ori.ScoringModel != index.BM25Scoring ||
|
||||
upd.ScoringModel != "" && upd.ScoringModel != index.TFIDFScoring && upd.ScoringModel != index.BM25Scoring {
|
||||
return fmt.Errorf("scoring model can only be changed between \"\", %q and %q", index.TFIDFScoring, index.BM25Scoring)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensures updated document mapping does not contain new
|
||||
// field mappings or document mappings
|
||||
func checkUpdatedMapping(ori, upd *mapping.DocumentMapping) error {
|
||||
// Check to verify both original and updated are not nil
|
||||
// and are enabled before proceeding
|
||||
if ori == nil {
|
||||
if upd == nil || !upd.Enabled {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("updated index mapping contains new properties")
|
||||
}
|
||||
|
||||
if upd == nil || !upd.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
// Recursively go through the child mappings
|
||||
for name, updDMapping := range upd.Properties {
|
||||
err = checkUpdatedMapping(ori.Properties[name], updDMapping)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Simple checks to ensure no new field mappings present
|
||||
// in updated
|
||||
for _, updFMapping := range upd.Fields {
|
||||
var oriFMapping *mapping.FieldMapping
|
||||
for _, fMapping := range ori.Fields {
|
||||
if updFMapping.Name == fMapping.Name {
|
||||
oriFMapping = fMapping
|
||||
}
|
||||
}
|
||||
if oriFMapping == nil {
|
||||
return fmt.Errorf("updated index mapping contains new fields")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds all of the field mappings while maintaining a tree of the document structure
|
||||
// to ensure traversal and verification is possible incase of multiple mappings defined
|
||||
// for a single field or multiple document fields' data getting written to a single zapx field
|
||||
func addPathInfo(paths map[string]*pathInfo, name string, mp *mapping.DocumentMapping,
|
||||
im *mapping.IndexMappingImpl, parent *pathInfo, rootName string) {
|
||||
// Early exit if mapping has been disabled
|
||||
// Comparisions later on will be done with a nil object
|
||||
if !mp.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
// Consolidate path information like index dynamic across multiple
|
||||
// mappings if path is the same
|
||||
var pInfo *pathInfo
|
||||
if val, ok := paths[name]; ok {
|
||||
pInfo = val
|
||||
} else {
|
||||
pInfo = &pathInfo{
|
||||
fieldMapInfo: make([]*fieldMapInfo, 0),
|
||||
}
|
||||
pInfo.dynamic = mp.Dynamic && im.IndexDynamic
|
||||
pInfo.analyser = im.AnalyzerNameForPath(name)
|
||||
}
|
||||
|
||||
pInfo.dynamic = (pInfo.dynamic || mp.Dynamic) && im.IndexDynamic
|
||||
pInfo.path = name
|
||||
if parent != nil {
|
||||
pInfo.parentPath = parent.path
|
||||
}
|
||||
|
||||
// Recursively add path information for all child mappings
|
||||
for cName, cMapping := range mp.Properties {
|
||||
var pathName string
|
||||
if name == "" {
|
||||
pathName = cName
|
||||
} else {
|
||||
pathName = name + "." + cName
|
||||
}
|
||||
addPathInfo(paths, pathName, cMapping, im, pInfo, rootName)
|
||||
}
|
||||
|
||||
// Add field mapping information keeping the document structure intact
|
||||
for _, fMap := range mp.Fields {
|
||||
fieldMapInfo := &fieldMapInfo{
|
||||
fieldMapping: fMap,
|
||||
rootName: rootName,
|
||||
parent: pInfo,
|
||||
}
|
||||
pInfo.fieldMapInfo = append(pInfo.fieldMapInfo, fieldMapInfo)
|
||||
}
|
||||
|
||||
paths[name] = pInfo
|
||||
}
|
||||
|
||||
// Compares all of the custom analysis components in use
|
||||
func compareCustomComponents(oriPaths, updPaths map[string]*pathInfo, ori, upd *mapping.IndexMappingImpl) error {
|
||||
// Compare all analysers currently in use
|
||||
err := compareAnalysers(oriPaths, updPaths, ori, upd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compare all datetime parsers currently in use
|
||||
err = compareDateTimeParsers(oriPaths, updPaths, ori, upd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compare all synonum sources
|
||||
err = compareSynonymSources(ori, upd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compare all char filters, tokenizers, token filters and token maps
|
||||
err = compareAnalyserSubcomponents(ori, upd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compares all analysers currently in use
|
||||
// Standard analysers not in custom analysis are not compared
|
||||
// Analysers in custom analysis but not in use are not compared
|
||||
func compareAnalysers(oriPaths, updPaths map[string]*pathInfo, ori, upd *mapping.IndexMappingImpl) error {
|
||||
oriAnalyzers := make(map[string]interface{})
|
||||
updAnalyzers := make(map[string]interface{})
|
||||
|
||||
extractAnalyzers := func(paths map[string]*pathInfo, customAnalyzers map[string]map[string]interface{},
|
||||
analyzers map[string]interface{}, indexMapping *mapping.IndexMappingImpl) {
|
||||
for path, info := range paths {
|
||||
for _, fInfo := range info.fieldMapInfo {
|
||||
if fInfo.fieldMapping.Type == "text" {
|
||||
analyzerName := indexMapping.AnalyzerNameForPath(path)
|
||||
fInfo.analyzer = analyzerName
|
||||
if val, ok := customAnalyzers[analyzerName]; ok {
|
||||
analyzers[analyzerName] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extractAnalyzers(oriPaths, ori.CustomAnalysis.Analyzers, oriAnalyzers, ori)
|
||||
extractAnalyzers(updPaths, upd.CustomAnalysis.Analyzers, updAnalyzers, upd)
|
||||
|
||||
for name, anUpd := range updAnalyzers {
|
||||
if anOri, ok := oriAnalyzers[name]; ok {
|
||||
if !reflect.DeepEqual(anUpd, anOri) {
|
||||
return fmt.Errorf("analyser %s changed while being used by fields", name)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("analyser %s newly added to an existing field", name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compares all date time parsers currently in use
|
||||
// Date time parsers in custom analysis but not in use are not compared
|
||||
func compareDateTimeParsers(oriPaths, updPaths map[string]*pathInfo, ori, upd *mapping.IndexMappingImpl) error {
|
||||
oriDateTimeParsers := make(map[string]interface{})
|
||||
updDateTimeParsers := make(map[string]interface{})
|
||||
|
||||
extractDateTimeParsers := func(paths map[string]*pathInfo, customParsers map[string]map[string]interface{},
|
||||
parsers map[string]interface{}, indexMapping *mapping.IndexMappingImpl) {
|
||||
for _, info := range paths {
|
||||
for _, fInfo := range info.fieldMapInfo {
|
||||
if fInfo.fieldMapping.Type == "datetime" {
|
||||
parserName := fInfo.fieldMapping.DateFormat
|
||||
if parserName == "" {
|
||||
parserName = indexMapping.DefaultDateTimeParser
|
||||
}
|
||||
fInfo.datetimeParser = parserName
|
||||
if val, ok := customParsers[parserName]; ok {
|
||||
parsers[parserName] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extractDateTimeParsers(oriPaths, ori.CustomAnalysis.DateTimeParsers, oriDateTimeParsers, ori)
|
||||
extractDateTimeParsers(updPaths, upd.CustomAnalysis.DateTimeParsers, updDateTimeParsers, upd)
|
||||
|
||||
for name, dtUpd := range updDateTimeParsers {
|
||||
if dtOri, ok := oriDateTimeParsers[name]; ok {
|
||||
if !reflect.DeepEqual(dtUpd, dtOri) {
|
||||
return fmt.Errorf("datetime parser %s changed while being used by fields", name)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("datetime parser %s added to an existing field", name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compares all synonym sources
|
||||
// Synonym sources currently not in use are also compared
|
||||
func compareSynonymSources(ori, upd *mapping.IndexMappingImpl) error {
|
||||
if !reflect.DeepEqual(ori.CustomAnalysis.SynonymSources, upd.CustomAnalysis.SynonymSources) {
|
||||
return fmt.Errorf("synonym sources cannot be changed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compares all char filters, tokenizers, token filters and token maps
|
||||
// Components not currently in use are also compared
|
||||
func compareAnalyserSubcomponents(ori, upd *mapping.IndexMappingImpl) error {
|
||||
if !reflect.DeepEqual(ori.CustomAnalysis.CharFilters, upd.CustomAnalysis.CharFilters) {
|
||||
return fmt.Errorf("char filters cannot be changed")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ori.CustomAnalysis.TokenFilters, upd.CustomAnalysis.TokenFilters) {
|
||||
return fmt.Errorf("token filters cannot be changed")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ori.CustomAnalysis.TokenMaps, upd.CustomAnalysis.TokenMaps) {
|
||||
return fmt.Errorf("token maps cannot be changed")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ori.CustomAnalysis.Tokenizers, upd.CustomAnalysis.Tokenizers) {
|
||||
return fmt.Errorf("tokenizers cannot be changed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compare all of the fields at a particular document path and add its field information
|
||||
func addFieldInfo(fInfo map[string]*index.UpdateFieldInfo, ori, upd *pathInfo) error {
|
||||
var info *index.UpdateFieldInfo
|
||||
var err error
|
||||
|
||||
// Assume deleted or disabled mapping if upd is nil. Checks for ori being nil
|
||||
// or upd having mappings not in orihave already been done before this stage
|
||||
if upd == nil {
|
||||
for _, oriFMapInfo := range ori.fieldMapInfo {
|
||||
info, err = compareFieldMapping(oriFMapInfo.fieldMapping, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = validateFieldInfo(info, fInfo, ori, oriFMapInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if upd.dynamic && ori.analyser != upd.analyser {
|
||||
return fmt.Errorf("analyser has been changed for a dynamic mapping")
|
||||
}
|
||||
for _, oriFMapInfo := range ori.fieldMapInfo {
|
||||
var updFMap *mapping.FieldMapping
|
||||
var updAnalyser string
|
||||
var updDatetimeParser string
|
||||
|
||||
// For multiple fields at a single document path, compare
|
||||
// only with the matching ones
|
||||
for _, updFMapInfo := range upd.fieldMapInfo {
|
||||
if oriFMapInfo.rootName == updFMapInfo.rootName &&
|
||||
oriFMapInfo.fieldMapping.Name == updFMapInfo.fieldMapping.Name {
|
||||
updFMap = updFMapInfo.fieldMapping
|
||||
if updFMap.Type == "text" {
|
||||
updAnalyser = updFMapInfo.analyzer
|
||||
} else if updFMap.Type == "datetime" {
|
||||
updDatetimeParser = updFMapInfo.datetimeParser
|
||||
}
|
||||
}
|
||||
}
|
||||
// Compare analyser, datetime parser and synonym source before comparing
|
||||
// the field mapping as it might not have this information
|
||||
if updAnalyser != "" && oriFMapInfo.analyzer != updAnalyser {
|
||||
return fmt.Errorf("analyser has been changed for a text field")
|
||||
}
|
||||
if updDatetimeParser != "" && oriFMapInfo.datetimeParser != updDatetimeParser {
|
||||
return fmt.Errorf("datetime parser has been changed for a date time field")
|
||||
}
|
||||
info, err = compareFieldMapping(oriFMapInfo.fieldMapping, updFMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate to ensure change is possible
|
||||
// Needed if multiple mappings are aliased to the same field
|
||||
err = validateFieldInfo(info, fInfo, ori, oriFMapInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compares two field mappings against each other, checking for changes in index, store, doc values
|
||||
// and complete deletiion of the mapping while noting that the changes made are doable based on
|
||||
// other values like includeInAll and dynamic
|
||||
// first return argument gives an empty fieldInfo if no changes detected
|
||||
// second return argument gives a flag indicating whether any changes, if detected, are doable or if
|
||||
// update is impossible
|
||||
// third argument is an error explaining exactly why the change is not possible
|
||||
func compareFieldMapping(original, updated *mapping.FieldMapping) (*index.UpdateFieldInfo, error) {
|
||||
rv := &index.UpdateFieldInfo{}
|
||||
|
||||
if updated == nil {
|
||||
if original != nil && !original.IncludeInAll {
|
||||
rv.Deleted = true
|
||||
return rv, nil
|
||||
} else if original == nil {
|
||||
return nil, fmt.Errorf("both field mappings cannot be nil")
|
||||
}
|
||||
return nil, fmt.Errorf("deleted field present in '_all' field")
|
||||
} else if original == nil {
|
||||
return nil, fmt.Errorf("matching field not found in original index mapping")
|
||||
}
|
||||
|
||||
if original.Type != updated.Type {
|
||||
return nil, fmt.Errorf("field type cannot be updated")
|
||||
}
|
||||
if original.Type == "text" {
|
||||
if original.Analyzer != updated.Analyzer {
|
||||
return nil, fmt.Errorf("analyzer cannot be updated for text fields")
|
||||
}
|
||||
}
|
||||
if original.Type == "datetime" {
|
||||
if original.DateFormat != updated.DateFormat {
|
||||
return nil, fmt.Errorf("dateFormat cannot be updated for datetime fields")
|
||||
}
|
||||
}
|
||||
if original.Type == "vector" || original.Type == "vector_base64" {
|
||||
if original.Dims != updated.Dims {
|
||||
return nil, fmt.Errorf("dimensions cannot be updated for vector and vector_base64 fields")
|
||||
}
|
||||
if original.Similarity != updated.Similarity {
|
||||
return nil, fmt.Errorf("similarity cannot be updated for vector and vector_base64 fields")
|
||||
}
|
||||
if original.VectorIndexOptimizedFor != updated.VectorIndexOptimizedFor {
|
||||
return nil, fmt.Errorf("vectorIndexOptimizedFor cannot be updated for vector and vector_base64 fields")
|
||||
}
|
||||
}
|
||||
if original.IncludeInAll != updated.IncludeInAll {
|
||||
return nil, fmt.Errorf("includeInAll cannot be changed")
|
||||
}
|
||||
if original.IncludeTermVectors != updated.IncludeTermVectors {
|
||||
return nil, fmt.Errorf("includeTermVectors cannot be changed")
|
||||
}
|
||||
if original.SkipFreqNorm != updated.SkipFreqNorm {
|
||||
return nil, fmt.Errorf("skipFreqNorm cannot be changed")
|
||||
}
|
||||
|
||||
// Updating is not possible if store changes from true
|
||||
// to false when the field is included in _all
|
||||
if original.Store != updated.Store {
|
||||
if updated.Store {
|
||||
return nil, fmt.Errorf("store cannot be changed from false to true")
|
||||
} else if updated.IncludeInAll {
|
||||
return nil, fmt.Errorf("store cannot be changed if field present in `_all' field")
|
||||
} else {
|
||||
rv.Store = true
|
||||
}
|
||||
}
|
||||
|
||||
// Updating is not possible if index changes from true
|
||||
// to false when the field is included in _all
|
||||
if original.Index != updated.Index {
|
||||
if updated.Index {
|
||||
return nil, fmt.Errorf("index cannot be changed from false to true")
|
||||
} else if updated.IncludeInAll {
|
||||
return nil, fmt.Errorf("index cannot be changed if field present in `_all' field")
|
||||
} else {
|
||||
rv.Index = true
|
||||
rv.DocValues = true
|
||||
}
|
||||
}
|
||||
|
||||
// Updating is not possible if docvalues changes from true
|
||||
// to false when the field is included in _all
|
||||
if original.DocValues != updated.DocValues {
|
||||
if updated.DocValues {
|
||||
return nil, fmt.Errorf("docvalues cannot be changed from false to true")
|
||||
} else if updated.IncludeInAll {
|
||||
return nil, fmt.Errorf("docvalues cannot be changed if field present in `_all' field")
|
||||
} else {
|
||||
rv.DocValues = true
|
||||
}
|
||||
}
|
||||
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
// After identifying changes, validate against the existing changes incase of duplicate fields.
|
||||
// In such a situation, any conflicting changes found will abort the update process
|
||||
func validateFieldInfo(newInfo *index.UpdateFieldInfo, fInfo map[string]*index.UpdateFieldInfo,
|
||||
ori *pathInfo, oriFMapInfo *fieldMapInfo) error {
|
||||
var name string
|
||||
if oriFMapInfo.parent.parentPath == "" {
|
||||
if oriFMapInfo.fieldMapping.Name == "" {
|
||||
name = oriFMapInfo.parent.path
|
||||
} else {
|
||||
name = oriFMapInfo.fieldMapping.Name
|
||||
}
|
||||
} else {
|
||||
if oriFMapInfo.fieldMapping.Name == "" {
|
||||
name = oriFMapInfo.parent.parentPath + "." + oriFMapInfo.parent.path
|
||||
} else {
|
||||
name = oriFMapInfo.parent.parentPath + "." + oriFMapInfo.fieldMapping.Name
|
||||
}
|
||||
}
|
||||
if (newInfo.Deleted || newInfo.Index || newInfo.DocValues || newInfo.Store) && ori.dynamic {
|
||||
return fmt.Errorf("updated field is under a dynamic property")
|
||||
}
|
||||
if oldInfo, ok := fInfo[name]; ok {
|
||||
if !reflect.DeepEqual(oldInfo, newInfo) {
|
||||
return fmt.Errorf("updated field impossible to verify because multiple mappings point to the same field name")
|
||||
}
|
||||
} else {
|
||||
fInfo[name] = newInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
2
vendor/github.com/blevesearch/bleve/v2/numeric/bin.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/numeric/bin.go
generated
vendored
@@ -13,7 +13,7 @@ var interleaveMagic = []uint64{
|
||||
var interleaveShift = []uint{1, 2, 4, 8, 16}
|
||||
|
||||
// Interleave the first 32 bits of each uint64
|
||||
// apdated from org.apache.lucene.util.BitUtil
|
||||
// adapted from org.apache.lucene.util.BitUtil
|
||||
// which was adapted from:
|
||||
// http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN
|
||||
func Interleave(v1, v2 uint64) uint64 {
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/registry/store.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/registry/store.go
generated
vendored
@@ -30,7 +30,7 @@ func RegisterKVStore(name string, constructor KVStoreConstructor) error {
|
||||
}
|
||||
|
||||
// KVStoreConstructor is used to build a KVStore of a specific type when
|
||||
// specificied by the index configuration. In addition to meeting the
|
||||
// specified by the index configuration. In addition to meeting the
|
||||
// store.KVStore interface, KVStores must also support this constructor.
|
||||
// Note that currently the values of config must
|
||||
// be able to be marshaled and unmarshaled using the encoding/json library (used
|
||||
|
||||
162
vendor/github.com/blevesearch/bleve/v2/rescorer.go
generated
vendored
Normal file
162
vendor/github.com/blevesearch/bleve/v2/rescorer.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright (c) 2025 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bleve
|
||||
|
||||
import (
|
||||
"github.com/blevesearch/bleve/v2/fusion"
|
||||
"github.com/blevesearch/bleve/v2/search"
|
||||
"github.com/blevesearch/bleve/v2/search/query"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultScoreRankConstant = 60
|
||||
)
|
||||
|
||||
// Rescorer is applied after all the query and knn results are obtained.
|
||||
// The main use of Rescorer is in hybrid search; all the individual scores
|
||||
// for query and knn are combined using Rescorer. Makes use of algorithms
|
||||
// defined in `fusion`
|
||||
type rescorer struct {
|
||||
req *SearchRequest
|
||||
|
||||
// Stores the original From, Size and Boost parameters from the request
|
||||
origFrom int
|
||||
origSize int
|
||||
origBoosts []float64
|
||||
|
||||
// Flag variable to make sure that restoreSearchRequest is only run once
|
||||
// when it is deferred
|
||||
restored bool
|
||||
}
|
||||
|
||||
// Stores information about the hybrid search into FusionRescorer.
|
||||
// Also mutates the SearchRequest by:
|
||||
// - Setting boosts to 1: top level boosts only used for rescoring
|
||||
// - Setting From and Size to 0 and ScoreWindowSize
|
||||
func (r *rescorer) prepareSearchRequest() error {
|
||||
if r.req.Params == nil {
|
||||
r.req.Params = NewDefaultParams(r.req.From, r.req.Size)
|
||||
}
|
||||
|
||||
r.origFrom = r.req.From
|
||||
r.origSize = r.req.Size
|
||||
|
||||
r.req.From = 0
|
||||
r.req.Size = r.req.Params.ScoreWindowSize
|
||||
|
||||
// req.Query's top level boost comes first, followed by the KNN queries
|
||||
numQueries := numKNNQueries(r.req) + 1
|
||||
r.origBoosts = make([]float64, numQueries)
|
||||
|
||||
// only modify queries if it is boostable. If not, ignore
|
||||
if bQuery, ok := r.req.Query.(query.BoostableQuery); ok {
|
||||
r.origBoosts[0] = bQuery.Boost()
|
||||
bQuery.SetBoost(1.0)
|
||||
} else {
|
||||
r.origBoosts[0] = 1.0
|
||||
}
|
||||
|
||||
// for all the knn queries, replace boost values
|
||||
r.prepareKnnRequest()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rescorer) restoreSearchRequest() {
|
||||
// Skip if already restored
|
||||
if r.restored {
|
||||
return
|
||||
}
|
||||
r.restored = true
|
||||
|
||||
r.req.From = r.origFrom
|
||||
r.req.Size = r.origSize
|
||||
|
||||
if bQuery, ok := r.req.Query.(query.BoostableQuery); ok {
|
||||
bQuery.SetBoost(r.origBoosts[0])
|
||||
}
|
||||
|
||||
// for all the knn queries, restore boost values
|
||||
r.restoreKnnRequest()
|
||||
}
|
||||
|
||||
func (r *rescorer) rescore(ftsHits, knnHits search.DocumentMatchCollection) (search.DocumentMatchCollection, uint64, float64) {
|
||||
mergedHits := r.mergeDocs(ftsHits, knnHits)
|
||||
|
||||
var fusionResult *fusion.FusionResult
|
||||
|
||||
switch r.req.Score {
|
||||
case ScoreRRF:
|
||||
res := fusion.ReciprocalRankFusion(
|
||||
mergedHits,
|
||||
r.origBoosts,
|
||||
r.req.Params.ScoreRankConstant,
|
||||
r.req.Params.ScoreWindowSize,
|
||||
numKNNQueries(r.req),
|
||||
r.req.Explain,
|
||||
)
|
||||
fusionResult = &res
|
||||
case ScoreRSF:
|
||||
res := fusion.RelativeScoreFusion(
|
||||
mergedHits,
|
||||
r.origBoosts,
|
||||
r.req.Params.ScoreWindowSize,
|
||||
numKNNQueries(r.req),
|
||||
r.req.Explain,
|
||||
)
|
||||
fusionResult = &res
|
||||
}
|
||||
|
||||
return fusionResult.Hits, fusionResult.Total, fusionResult.MaxScore
|
||||
}
|
||||
|
||||
// Merge all the FTS and KNN docs along with explanations
|
||||
func (r *rescorer) mergeDocs(ftsHits, knnHits search.DocumentMatchCollection) search.DocumentMatchCollection {
|
||||
if len(knnHits) == 0 {
|
||||
return ftsHits
|
||||
}
|
||||
|
||||
knnHitMap := make(map[string]*search.DocumentMatch, len(knnHits))
|
||||
|
||||
for _, hit := range knnHits {
|
||||
knnHitMap[hit.ID] = hit
|
||||
}
|
||||
|
||||
for _, hit := range ftsHits {
|
||||
if knnHit, ok := knnHitMap[hit.ID]; ok {
|
||||
hit.ScoreBreakdown = knnHit.ScoreBreakdown
|
||||
if r.req.Explain {
|
||||
hit.Expl = &search.Explanation{Value: 0.0, Message: "", Children: append([]*search.Explanation{hit.Expl}, knnHit.Expl.Children...)}
|
||||
}
|
||||
delete(knnHitMap, hit.ID)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hit := range knnHitMap {
|
||||
hit.Score = 0
|
||||
ftsHits = append(ftsHits, hit)
|
||||
if r.req.Explain {
|
||||
hit.Expl = &search.Explanation{Value: 0.0, Message: "", Children: append([]*search.Explanation{nil}, hit.Expl.Children...)}
|
||||
}
|
||||
}
|
||||
|
||||
return ftsHits
|
||||
}
|
||||
|
||||
func newRescorer(req *SearchRequest) *rescorer {
|
||||
return &rescorer{
|
||||
req: req,
|
||||
}
|
||||
}
|
||||
157
vendor/github.com/blevesearch/bleve/v2/search.go
generated
vendored
157
vendor/github.com/blevesearch/bleve/v2/search.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/analysis"
|
||||
@@ -47,6 +48,15 @@ var cache = registry.NewCache()
|
||||
|
||||
const defaultDateTimeParser = optional.Name
|
||||
|
||||
const (
|
||||
ScoreDefault = ""
|
||||
ScoreNone = "none"
|
||||
ScoreRRF = "rrf"
|
||||
ScoreRSF = "rsf"
|
||||
)
|
||||
|
||||
var AllowedFusionSort = search.SortOrder{&search.SortScore{Desc: true}}
|
||||
|
||||
type dateTimeRange struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Start time.Time `json:"start,omitempty"`
|
||||
@@ -311,13 +321,71 @@ func (r *SearchRequest) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
err := validateKNN(r)
|
||||
err := r.validatePagination()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if IsScoreFusionRequested(r) {
|
||||
if r.SearchAfter != nil || r.SearchBefore != nil {
|
||||
return fmt.Errorf("cannot use search after or search before with score fusion")
|
||||
}
|
||||
|
||||
if r.Sort != nil {
|
||||
if !reflect.DeepEqual(r.Sort, AllowedFusionSort) {
|
||||
return fmt.Errorf("sort must be empty or descending order of score for score fusion")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = validateKNN(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Facets.Validate()
|
||||
}
|
||||
|
||||
// Validates SearchAfter/SearchBefore
|
||||
func (r *SearchRequest) validatePagination() error {
|
||||
var pagination []string
|
||||
var afterOrBefore string
|
||||
|
||||
if r.SearchAfter != nil {
|
||||
pagination = r.SearchAfter
|
||||
afterOrBefore = "search after"
|
||||
} else if r.SearchBefore != nil {
|
||||
pagination = r.SearchBefore
|
||||
afterOrBefore = "search before"
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range pagination {
|
||||
switch ss := r.Sort[i].(type) {
|
||||
case *search.SortGeoDistance:
|
||||
_, err := strconv.ParseFloat(pagination[i], 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid %s value for sort field '%s': '%s'. %s", afterOrBefore, ss.Field, pagination[i], err)
|
||||
}
|
||||
case *search.SortField:
|
||||
switch ss.Type {
|
||||
case search.SortFieldAsNumber:
|
||||
_, err := strconv.ParseFloat(pagination[i], 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid %s value for sort field '%s': '%s'. %s", afterOrBefore, ss.Field, pagination[i], err)
|
||||
}
|
||||
case search.SortFieldAsDate:
|
||||
_, err := time.Parse(time.RFC3339Nano, pagination[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid %s value for sort field '%s': '%s'. %s", afterOrBefore, ss.Field, pagination[i], err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddFacet adds a FacetRequest to this SearchRequest
|
||||
func (r *SearchRequest) AddFacet(facetName string, f *FacetRequest) {
|
||||
if r.Facets == nil {
|
||||
@@ -353,6 +421,11 @@ func (r *SearchRequest) SetSearchBefore(before []string) {
|
||||
r.SearchBefore = before
|
||||
}
|
||||
|
||||
// AddParams adds a RequestParams field to the search request
|
||||
func (r *SearchRequest) AddParams(params RequestParams) {
|
||||
r.Params = ¶ms
|
||||
}
|
||||
|
||||
// NewSearchRequest creates a new SearchRequest
|
||||
// for the Query, using default values for all
|
||||
// other search parameters.
|
||||
@@ -377,7 +450,7 @@ func NewSearchRequestOptions(q query.Query, size, from int, explain bool) *Searc
|
||||
// IndexErrMap tracks errors with the name of the index where it occurred
|
||||
type IndexErrMap map[string]error
|
||||
|
||||
// MarshalJSON seralizes the error into a string for JSON consumption
|
||||
// MarshalJSON serializes the error into a string for JSON consumption
|
||||
func (iem IndexErrMap) MarshalJSON() ([]byte, error) {
|
||||
tmp := make(map[string]string, len(iem))
|
||||
for k, v := range iem {
|
||||
@@ -398,7 +471,7 @@ func (iem IndexErrMap) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchStatus is a secion in the SearchResult reporting how many
|
||||
// SearchStatus is a section in the SearchResult reporting how many
|
||||
// underlying indexes were queried, how many were successful/failed
|
||||
// and a map of any errors that were encountered
|
||||
type SearchStatus struct {
|
||||
@@ -433,7 +506,7 @@ func (ss *SearchStatus) Merge(other *SearchStatus) {
|
||||
// scores, score explanation, location info and so on.
|
||||
// Total - The total number of documents that matched the query.
|
||||
// Cost - indicates how expensive was the query with respect to bytes read
|
||||
// from the mmaped index files.
|
||||
// from the mapped index files.
|
||||
// MaxScore - The maximum score seen across all document hits seen for this query.
|
||||
// Took - The time taken to execute the search.
|
||||
// Facets - The facet results for the search.
|
||||
@@ -607,3 +680,79 @@ func isMatchAllQuery(q query.Query) bool {
|
||||
_, ok := q.(*query.MatchAllQuery)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Checks if the request is hybrid search. Currently supports: RRF, RSF.
|
||||
func IsScoreFusionRequested(req *SearchRequest) bool {
|
||||
switch req.Score {
|
||||
case ScoreRRF, ScoreRSF:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Additional parameters in the search request. Currently only being
|
||||
// used for score fusion parameters.
|
||||
type RequestParams struct {
|
||||
ScoreRankConstant int `json:"score_rank_constant,omitempty"`
|
||||
ScoreWindowSize int `json:"score_window_size,omitempty"`
|
||||
}
|
||||
|
||||
func NewDefaultParams(from, size int) *RequestParams {
|
||||
return &RequestParams{
|
||||
ScoreRankConstant: DefaultScoreRankConstant,
|
||||
ScoreWindowSize: from + size,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *RequestParams) UnmarshalJSON(input []byte) error {
|
||||
var temp struct {
|
||||
ScoreRankConstant *int `json:"score_rank_constant,omitempty"`
|
||||
ScoreWindowSize *int `json:"score_window_size,omitempty"`
|
||||
}
|
||||
|
||||
if err := util.UnmarshalJSON(input, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if temp.ScoreRankConstant != nil {
|
||||
p.ScoreRankConstant = *temp.ScoreRankConstant
|
||||
}
|
||||
|
||||
if temp.ScoreWindowSize != nil {
|
||||
p.ScoreWindowSize = *temp.ScoreWindowSize
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RequestParams) Validate(size int) error {
|
||||
if p.ScoreWindowSize < 1 {
|
||||
return fmt.Errorf("score window size must be greater than 0")
|
||||
} else if p.ScoreWindowSize < size {
|
||||
return fmt.Errorf("score window size must be greater than or equal to Size (%d)", size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseParams(r *SearchRequest, input []byte) (*RequestParams, error) {
|
||||
params := NewDefaultParams(r.From, r.Size)
|
||||
if len(input) == 0 {
|
||||
return params, nil
|
||||
}
|
||||
|
||||
err := util.UnmarshalJSON(input, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// validate params
|
||||
err = params.Validate(r.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return params, nil
|
||||
}
|
||||
|
||||
|
||||
49
vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go
generated
vendored
49
vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/numeric"
|
||||
"github.com/blevesearch/bleve/v2/search"
|
||||
"github.com/blevesearch/bleve/v2/size"
|
||||
index "github.com/blevesearch/bleve_index_api"
|
||||
@@ -117,9 +118,15 @@ func newTopNCollector(size int, skip int, sort search.SortOrder) *TopNCollector
|
||||
return hc
|
||||
}
|
||||
|
||||
// Creates a dummy document to compare with for pagination.
|
||||
func createSearchAfterDocument(sort search.SortOrder, after []string) *search.DocumentMatch {
|
||||
encodedAfter := make([]string, len(after))
|
||||
for i, ss := range sort {
|
||||
encodedAfter[i] = encodeSearchAfter(ss, after[i])
|
||||
}
|
||||
|
||||
rv := &search.DocumentMatch{
|
||||
Sort: after,
|
||||
Sort: encodedAfter,
|
||||
}
|
||||
for pos, ss := range sort {
|
||||
if ss.RequiresDocID() {
|
||||
@@ -134,6 +141,46 @@ func createSearchAfterDocument(sort search.SortOrder, after []string) *search.Do
|
||||
return rv
|
||||
}
|
||||
|
||||
// encodeSearchAfter applies prefix-coding to SearchAfter
|
||||
// if required to enable pagination on numeric, datetime,
|
||||
// and geo fields
|
||||
func encodeSearchAfter(ss search.SearchSort, after string) string {
|
||||
encodeFloat := func() string {
|
||||
f64, _ := strconv.ParseFloat(after, 64) // error checking in SearchRequest.Validate
|
||||
i64 := numeric.Float64ToInt64(f64)
|
||||
return string(numeric.MustNewPrefixCodedInt64(i64, 0))
|
||||
}
|
||||
|
||||
encodeDate := func() string {
|
||||
t, _ := time.Parse(time.RFC3339Nano, after) // error checking in SearchRequest.Validate
|
||||
i64 := t.UnixNano()
|
||||
return string(numeric.MustNewPrefixCodedInt64(i64, 0))
|
||||
}
|
||||
|
||||
switch ss := ss.(type) {
|
||||
case *search.SortGeoDistance:
|
||||
return encodeFloat()
|
||||
case *search.SortField:
|
||||
switch ss.Type {
|
||||
case search.SortFieldAsNumber:
|
||||
return encodeFloat()
|
||||
case search.SortFieldAsDate:
|
||||
return encodeDate()
|
||||
default:
|
||||
// For SortFieldAsString and SortFieldAuto
|
||||
// NOTE: SortFieldAuto is used if you set Sort with a string
|
||||
// or if the type of the field is not set in the object
|
||||
// in the Sort slice. We cannot perform type inference in
|
||||
// this case, so we return the original string, even if
|
||||
// its actually numeric or date.
|
||||
return after
|
||||
}
|
||||
default:
|
||||
// For SortDocID and SortScore
|
||||
return after
|
||||
}
|
||||
}
|
||||
|
||||
// Filter document matches based on the SearchAfter field in the SearchRequest.
|
||||
func FilterHitsBySearchAfter(hits []*search.DocumentMatch, sort search.SortOrder, after []string) []*search.DocumentMatch {
|
||||
if len(hits) == 0 {
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/pool.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/pool.go
generated
vendored
@@ -31,7 +31,7 @@ func init() {
|
||||
// a message, or panic, etc.
|
||||
type DocumentMatchPoolTooSmall func(p *DocumentMatchPool) *DocumentMatch
|
||||
|
||||
// DocumentMatchPool manages use/re-use of DocumentMatch instances
|
||||
// DocumentMatchPool manages use/reuse of DocumentMatch instances
|
||||
// it pre-allocates space from a single large block with the expected
|
||||
// number of instances. It is not thread-safe as currently all
|
||||
// aspects of search take place in a single goroutine.
|
||||
|
||||
92
vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go
generated
vendored
92
vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -30,6 +31,7 @@ type BooleanQuery struct {
|
||||
Must Query `json:"must,omitempty"`
|
||||
Should Query `json:"should,omitempty"`
|
||||
MustNot Query `json:"must_not,omitempty"`
|
||||
Filter Query `json:"filter,omitempty"`
|
||||
BoostVal *Boost `json:"boost,omitempty"`
|
||||
queryStringMode bool
|
||||
}
|
||||
@@ -115,6 +117,13 @@ func (q *BooleanQuery) AddMustNot(m ...Query) {
|
||||
}
|
||||
}
|
||||
|
||||
func (q *BooleanQuery) AddFilter(m Query) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
q.Filter = m
|
||||
}
|
||||
|
||||
func (q *BooleanQuery) SetBoost(b float64) {
|
||||
boost := Boost(b)
|
||||
q.BoostVal = &boost
|
||||
@@ -162,11 +171,61 @@ func (q *BooleanQuery) Searcher(ctx context.Context, i index.IndexReader, m mapp
|
||||
}
|
||||
}
|
||||
|
||||
// if all 3 are nil, return MatchNone
|
||||
if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher == nil {
|
||||
var filterFunc searcher.FilterFunc
|
||||
if q.Filter != nil {
|
||||
// create a new searcher options with disabled scoring, since filter should not affect scoring
|
||||
// and we don't want to pay the cost of scoring if we don't need it, also disable term vectors
|
||||
// and explain, since we don't need them for filters
|
||||
filterOptions := search.SearcherOptions{
|
||||
Explain: false,
|
||||
IncludeTermVectors: false,
|
||||
Score: "none",
|
||||
}
|
||||
filterSearcher, err := q.Filter.Searcher(ctx, i, m, filterOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filterFunc = func(sctx *search.SearchContext, d *search.DocumentMatch) bool {
|
||||
// Attempt to advance the filter searcher to the document identified by
|
||||
// the base searcher's (unfiltered boolean) current result (d.IndexInternalID).
|
||||
//
|
||||
// If the filter searcher successfully finds a document with the same
|
||||
// internal ID, it means the document satisfies the filter and should be kept.
|
||||
//
|
||||
// If the filter searcher returns an error, does not find a matching document,
|
||||
// or finds a document with a different internal ID, the document should be discarded.
|
||||
dm, err := filterSearcher.Advance(sctx, d.IndexInternalID)
|
||||
return err == nil && dm != nil && bytes.Equal(dm.IndexInternalID, d.IndexInternalID)
|
||||
}
|
||||
}
|
||||
|
||||
// if all 4 are nil, return MatchNone
|
||||
if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher == nil && filterFunc == nil {
|
||||
return searcher.NewMatchNoneSearcher(i)
|
||||
}
|
||||
|
||||
// optimization, if only must searcher, just return it instead
|
||||
if mustSearcher != nil && shouldSearcher == nil && mustNotSearcher == nil && filterFunc == nil {
|
||||
return mustSearcher, nil
|
||||
}
|
||||
|
||||
// optimization, if only should searcher, just return it instead
|
||||
if mustSearcher == nil && shouldSearcher != nil && mustNotSearcher == nil && filterFunc == nil {
|
||||
return shouldSearcher, nil
|
||||
}
|
||||
|
||||
// optimization, if only filter searcher, wrap around a MatchAllSearcher
|
||||
if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher == nil && filterFunc != nil {
|
||||
mustSearcher, err = searcher.NewMatchAllSearcher(ctx, i, 1.0, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return searcher.NewFilteringSearcher(ctx,
|
||||
mustSearcher,
|
||||
filterFunc,
|
||||
), nil
|
||||
}
|
||||
|
||||
// if only mustNotSearcher, start with MatchAll
|
||||
if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher != nil {
|
||||
mustSearcher, err = searcher.NewMatchAllSearcher(ctx, i, 1.0, options)
|
||||
@@ -175,12 +234,15 @@ func (q *BooleanQuery) Searcher(ctx context.Context, i index.IndexReader, m mapp
|
||||
}
|
||||
}
|
||||
|
||||
// optimization, if only should searcher, just return it instead
|
||||
if mustSearcher == nil && shouldSearcher != nil && mustNotSearcher == nil {
|
||||
return shouldSearcher, nil
|
||||
bs, err := searcher.NewBooleanSearcher(ctx, i, mustSearcher, shouldSearcher, mustNotSearcher, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return searcher.NewBooleanSearcher(ctx, i, mustSearcher, shouldSearcher, mustNotSearcher, options)
|
||||
if filterFunc != nil {
|
||||
return searcher.NewFilteringSearcher(ctx, bs, filterFunc), nil
|
||||
}
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
func (q *BooleanQuery) Validate() error {
|
||||
@@ -202,8 +264,14 @@ func (q *BooleanQuery) Validate() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if q.Must == nil && q.Should == nil && q.MustNot == nil {
|
||||
return fmt.Errorf("boolean query must contain at least one must or should or not must clause")
|
||||
if qf, ok := q.Filter.(ValidatableQuery); ok {
|
||||
err := qf.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if q.Must == nil && q.Should == nil && q.MustNot == nil && q.Filter == nil {
|
||||
return fmt.Errorf("boolean query must contain at least one must or should or not must or filter clause")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -213,6 +281,7 @@ func (q *BooleanQuery) UnmarshalJSON(data []byte) error {
|
||||
Must json.RawMessage `json:"must,omitempty"`
|
||||
Should json.RawMessage `json:"should,omitempty"`
|
||||
MustNot json.RawMessage `json:"must_not,omitempty"`
|
||||
Filter json.RawMessage `json:"filter,omitempty"`
|
||||
Boost *Boost `json:"boost,omitempty"`
|
||||
}{}
|
||||
err := util.UnmarshalJSON(data, &tmp)
|
||||
@@ -253,6 +322,13 @@ func (q *BooleanQuery) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
if tmp.Filter != nil {
|
||||
q.Filter, err = ParseQuery(tmp.Filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
q.BoostVal = tmp.Boost
|
||||
|
||||
return nil
|
||||
|
||||
3
vendor/github.com/blevesearch/bleve/v2/search/query/query.go
generated
vendored
3
vendor/github.com/blevesearch/bleve/v2/search/query/query.go
generated
vendored
@@ -196,7 +196,8 @@ func ParseQuery(input []byte) (Query, error) {
|
||||
_, hasMust := tmp["must"]
|
||||
_, hasShould := tmp["should"]
|
||||
_, hasMustNot := tmp["must_not"]
|
||||
if hasMust || hasShould || hasMustNot {
|
||||
_, hasFilter := tmp["filter"]
|
||||
if hasMust || hasShould || hasMustNot || hasFilter {
|
||||
var rv BooleanQuery
|
||||
err := util.UnmarshalJSON(input, &rv)
|
||||
if err != nil {
|
||||
|
||||
11
vendor/github.com/blevesearch/bleve/v2/search/search.go
generated
vendored
11
vendor/github.com/blevesearch/bleve/v2/search/search.go
generated
vendored
@@ -17,6 +17,7 @@ package search
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"github.com/blevesearch/bleve/v2/size"
|
||||
@@ -41,15 +42,7 @@ func init() {
|
||||
type ArrayPositions []uint64
|
||||
|
||||
func (ap ArrayPositions) Equals(other ArrayPositions) bool {
|
||||
if len(ap) != len(other) {
|
||||
return false
|
||||
}
|
||||
for i := range ap {
|
||||
if ap[i] != other[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return slices.Equal(ap, other)
|
||||
}
|
||||
|
||||
func (ap ArrayPositions) Compare(other ArrayPositions) int {
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
// DisjunctionMaxClauseCount is a compile time setting that applications can
|
||||
// adjust to non-zero value to cause the DisjunctionSearcher to return an
|
||||
// error instead of exeucting searches when the size exceeds this value.
|
||||
// error instead of executing searches when the size exceeds this value.
|
||||
var DisjunctionMaxClauseCount = 0
|
||||
|
||||
// DisjunctionHeapTakeover is a compile time setting that applications can
|
||||
|
||||
6
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go
generated
vendored
6
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go
generated
vendored
@@ -33,7 +33,7 @@ func init() {
|
||||
// FilterFunc defines a function which can filter documents
|
||||
// returning true means keep the document
|
||||
// returning false means do not keep the document
|
||||
type FilterFunc func(d *search.DocumentMatch) bool
|
||||
type FilterFunc func(sctx *search.SearchContext, d *search.DocumentMatch) bool
|
||||
|
||||
// FilteringSearcher wraps any other searcher, but checks any Next/Advance
|
||||
// call against the supplied FilterFunc
|
||||
@@ -57,7 +57,7 @@ func (f *FilteringSearcher) Size() int {
|
||||
func (f *FilteringSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) {
|
||||
next, err := f.child.Next(ctx)
|
||||
for next != nil && err == nil {
|
||||
if f.accept(next) {
|
||||
if f.accept(ctx, next) {
|
||||
return next, nil
|
||||
}
|
||||
next, err = f.child.Next(ctx)
|
||||
@@ -73,7 +73,7 @@ func (f *FilteringSearcher) Advance(ctx *search.SearchContext, ID index.IndexInt
|
||||
if adv == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if f.accept(adv) {
|
||||
if f.accept(ctx, adv) {
|
||||
return adv, nil
|
||||
}
|
||||
return f.Next(ctx)
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go
generated
vendored
@@ -208,7 +208,7 @@ func buildIsIndexedFunc(ctx context.Context, indexReader index.IndexReader, fiel
|
||||
func buildRectFilter(ctx context.Context, dvReader index.DocValueReader, field string,
|
||||
minLon, minLat, maxLon, maxLat float64,
|
||||
) FilterFunc {
|
||||
return func(d *search.DocumentMatch) bool {
|
||||
return func(sctx *search.SearchContext, d *search.DocumentMatch) bool {
|
||||
// check geo matches against all numeric type terms indexed
|
||||
var lons, lats []float64
|
||||
var found bool
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go
generated
vendored
@@ -115,7 +115,7 @@ func boxSearcher(ctx context.Context, indexReader index.IndexReader,
|
||||
|
||||
func buildDistFilter(ctx context.Context, dvReader index.DocValueReader, field string,
|
||||
centerLon, centerLat, maxDist float64) FilterFunc {
|
||||
return func(d *search.DocumentMatch) bool {
|
||||
return func(sctx *search.SearchContext, d *search.DocumentMatch) bool {
|
||||
// check geo matches against all numeric type terms indexed
|
||||
var lons, lats []float64
|
||||
var found bool
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go
generated
vendored
@@ -85,7 +85,7 @@ func almostEqual(a, b float64) bool {
|
||||
// here: https://wrf.ecse.rpi.edu/nikola/pubdetails/pnpoly.html
|
||||
func buildPolygonFilter(ctx context.Context, dvReader index.DocValueReader, field string,
|
||||
coordinates []geo.Point) FilterFunc {
|
||||
return func(d *search.DocumentMatch) bool {
|
||||
return func(sctx *search.SearchContext, d *search.DocumentMatch) bool {
|
||||
// check geo matches against all numeric type terms indexed
|
||||
var lons, lats []float64
|
||||
var found bool
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoshape.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoshape.go
generated
vendored
@@ -77,7 +77,7 @@ func buildRelationFilterOnShapes(ctx context.Context, dvReader index.DocValueRea
|
||||
bufPool = bufPoolCallback()
|
||||
}
|
||||
|
||||
return func(d *search.DocumentMatch) bool {
|
||||
return func(sctx *search.SearchContext, d *search.DocumentMatch) bool {
|
||||
var found bool
|
||||
|
||||
err := dvReader.VisitDocValues(d.IndexInternalID,
|
||||
|
||||
4
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go
generated
vendored
4
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go
generated
vendored
@@ -296,7 +296,7 @@ func (s *PhraseSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch,
|
||||
}
|
||||
|
||||
// checkCurrMustMatch is solely concerned with determining if the DocumentMatch
|
||||
// pointed to by s.currMust (which satisifies the pre-condition searcher)
|
||||
// pointed to by s.currMust (which satisfies the pre-condition searcher)
|
||||
// also satisfies the phrase constraints. if so, it returns a DocumentMatch
|
||||
// for this document, otherwise nil
|
||||
func (s *PhraseSearcher) checkCurrMustMatch(ctx *search.SearchContext) *search.DocumentMatch {
|
||||
@@ -458,7 +458,7 @@ func findPhrasePaths(prevPos uint64, ap search.ArrayPositions, phraseTerms [][]s
|
||||
if len(car) == 0 || (len(car) == 1 && car[0] == "") {
|
||||
nextPos := prevPos + 1
|
||||
if prevPos == 0 {
|
||||
// if prevPos was 0, don't set it to 1 (as thats not a real abs pos)
|
||||
// if prevPos was 0, don't set it to 1 (as that's not a real abs pos)
|
||||
nextPos = 0 // don't advance nextPos if prevPos was 0
|
||||
}
|
||||
return findPhrasePaths(nextPos, ap, cdr, tlm, p, remainingSlop, rv)
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go
generated
vendored
@@ -96,7 +96,7 @@ func NewRegexpStringSearcher(ctx context.Context, indexReader index.IndexReader,
|
||||
// NewRegexpSearcher creates a searcher which will match documents that
|
||||
// contain terms which match the pattern regexp. The match must be EXACT
|
||||
// matching the entire term. The provided regexp SHOULD NOT start with ^
|
||||
// or end with $ as this can intefere with the implementation. Separately,
|
||||
// or end with $ as this can interfere with the implementation. Separately,
|
||||
// matches will be checked to ensure they match the entire term.
|
||||
func NewRegexpSearcher(ctx context.Context, indexReader index.IndexReader, pattern Regexp,
|
||||
field string, boost float64, options search.SearcherOptions) (
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/v2/search/sort.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/v2/search/sort.go
generated
vendored
@@ -408,7 +408,7 @@ func (s *SortField) DecodeValue(value string) string {
|
||||
if err != nil {
|
||||
return value
|
||||
}
|
||||
return time.Unix(0, i64).UTC().String()
|
||||
return time.Unix(0, i64).UTC().Format(time.RFC3339Nano)
|
||||
default:
|
||||
return value
|
||||
}
|
||||
|
||||
4
vendor/github.com/blevesearch/bleve/v2/search/util.go
generated
vendored
4
vendor/github.com/blevesearch/bleve/v2/search/util.go
generated
vendored
@@ -152,6 +152,10 @@ const (
|
||||
// BM25StatsKey is used to store and transport the BM25 Data
|
||||
// to the actual search phase which would use it to perform the search.
|
||||
BM25StatsKey ContextKey = "_bm25_stats_key"
|
||||
|
||||
// ScoreFusionKey is used to communicate whether KNN hits need to be preserved for
|
||||
// hybrid search algorithms (like RRF)
|
||||
ScoreFusionKey ContextKey = "_fusion_rescoring_key"
|
||||
)
|
||||
|
||||
func RecordSearchCost(ctx context.Context,
|
||||
|
||||
53
vendor/github.com/blevesearch/bleve/v2/search_knn.go
generated
vendored
53
vendor/github.com/blevesearch/bleve/v2/search_knn.go
generated
vendored
@@ -67,6 +67,8 @@ type SearchRequest struct {
|
||||
|
||||
PreSearchData map[string]interface{} `json:"pre_search_data,omitempty"`
|
||||
|
||||
Params *RequestParams `json:"params,omitempty"`
|
||||
|
||||
sortFunc func(sort.Interface)
|
||||
}
|
||||
|
||||
@@ -148,6 +150,7 @@ func (r *SearchRequest) UnmarshalJSON(input []byte) error {
|
||||
KNN []*tempKNNReq `json:"knn"`
|
||||
KNNOperator knnOperator `json:"knn_operator"`
|
||||
PreSearchData json.RawMessage `json:"pre_search_data"`
|
||||
Params json.RawMessage `json:"params"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal(input, &temp)
|
||||
@@ -189,6 +192,22 @@ func (r *SearchRequest) UnmarshalJSON(input []byte) error {
|
||||
r.From = 0
|
||||
}
|
||||
|
||||
if IsScoreFusionRequested(r) {
|
||||
if temp.Params == nil {
|
||||
// If params is not present and it is requires rescoring, assign
|
||||
// default values
|
||||
r.Params = NewDefaultParams(r.From, r.Size)
|
||||
} else {
|
||||
// if it is a request that requires rescoring, parse the rescoring
|
||||
// parameters.
|
||||
params, err := ParseParams(r, temp.Params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Params = params
|
||||
}
|
||||
}
|
||||
|
||||
r.KNN = make([]*KNNRequest, len(temp.KNN))
|
||||
for i, knnReq := range temp.KNN {
|
||||
r.KNN[i] = &KNNRequest{}
|
||||
@@ -243,6 +262,7 @@ func copySearchRequest(req *SearchRequest, preSearchData map[string]interface{})
|
||||
KNN: req.KNN,
|
||||
KNNOperator: req.KNNOperator,
|
||||
PreSearchData: preSearchData,
|
||||
Params: req.Params,
|
||||
}
|
||||
return &rv
|
||||
|
||||
@@ -327,6 +347,7 @@ func validateKNN(req *SearchRequest) error {
|
||||
default:
|
||||
return fmt.Errorf("knn_operator must be either 'and' / 'or'")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -458,6 +479,12 @@ func finalizeKNNResults(req *SearchRequest, knnHits []*search.DocumentMatch) []*
|
||||
}
|
||||
knnHits = knnHits[:idx]
|
||||
}
|
||||
|
||||
// if score fusion required, return early because
|
||||
// score breakdown is retained
|
||||
if IsScoreFusionRequested(req) {
|
||||
return knnHits
|
||||
}
|
||||
// fix the score using score breakdown now
|
||||
// if the score is none, then we need to set the score to 0.0
|
||||
// if req.Explain is true, then we need to use the expl breakdown to
|
||||
@@ -537,6 +564,10 @@ func requestHasKNN(req *SearchRequest) bool {
|
||||
return len(req.KNN) > 0
|
||||
}
|
||||
|
||||
func numKNNQueries(req *SearchRequest) int {
|
||||
return len(req.KNN)
|
||||
}
|
||||
|
||||
// returns true if the search request contains a KNN request that can be
|
||||
// satisfied by just performing a preSearch, completely bypassing the
|
||||
// actual search.
|
||||
@@ -608,3 +639,25 @@ func newKnnPreSearchResultProcessor(req *SearchRequest) *knnPreSearchResultProce
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Replace knn boost values for fusion rescoring queries
|
||||
func (r *rescorer) prepareKnnRequest() {
|
||||
for i := range r.req.KNN {
|
||||
b := r.req.KNN[i].Boost
|
||||
if b != nil {
|
||||
r.origBoosts[i+1] = b.Value()
|
||||
newB := query.Boost(1.0)
|
||||
r.req.KNN[i].Boost = &newB
|
||||
} else {
|
||||
r.origBoosts[i+1] = 1.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore knn boost values for fusion rescoring queries
|
||||
func (r *rescorer) restoreKnnRequest() {
|
||||
for i := range r.req.KNN {
|
||||
b := query.Boost(r.origBoosts[i+1])
|
||||
r.req.KNN[i].Boost = &b
|
||||
}
|
||||
}
|
||||
|
||||
30
vendor/github.com/blevesearch/bleve/v2/search_no_knn.go
generated
vendored
30
vendor/github.com/blevesearch/bleve/v2/search_no_knn.go
generated
vendored
@@ -77,6 +77,8 @@ type SearchRequest struct {
|
||||
|
||||
PreSearchData map[string]interface{} `json:"pre_search_data,omitempty"`
|
||||
|
||||
Params *RequestParams `json:"params,omitempty"`
|
||||
|
||||
sortFunc func(sort.Interface)
|
||||
}
|
||||
|
||||
@@ -97,6 +99,7 @@ func (r *SearchRequest) UnmarshalJSON(input []byte) error {
|
||||
SearchAfter []string `json:"search_after"`
|
||||
SearchBefore []string `json:"search_before"`
|
||||
PreSearchData json.RawMessage `json:"pre_search_data"`
|
||||
Params json.RawMessage `json:"params"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal(input, &temp)
|
||||
@@ -137,6 +140,23 @@ func (r *SearchRequest) UnmarshalJSON(input []byte) error {
|
||||
if r.From < 0 {
|
||||
r.From = 0
|
||||
}
|
||||
|
||||
if IsScoreFusionRequested(r) {
|
||||
if temp.Params == nil {
|
||||
// If params is not present and it is requires rescoring, assign
|
||||
// default values
|
||||
r.Params = NewDefaultParams(r.From, r.Size)
|
||||
} else {
|
||||
// if it is a request that requires rescoring, parse the rescoring
|
||||
// parameters.
|
||||
params, err := ParseParams(r, temp.Params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Params = params
|
||||
}
|
||||
}
|
||||
|
||||
if temp.PreSearchData != nil {
|
||||
r.PreSearchData, err = query.ParsePreSearchData(temp.PreSearchData)
|
||||
if err != nil {
|
||||
@@ -184,6 +204,10 @@ func requestHasKNN(req *SearchRequest) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func numKNNQueries(req *SearchRequest) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func addKnnToDummyRequest(dummyReq *SearchRequest, realReq *SearchRequest) {
|
||||
}
|
||||
|
||||
@@ -207,3 +231,9 @@ func finalizeKNNResults(req *SearchRequest, knnHits []*search.DocumentMatch) []*
|
||||
func newKnnPreSearchResultProcessor(req *SearchRequest) *knnPreSearchResultProcessor {
|
||||
return &knnPreSearchResultProcessor{} // equivalent to nil
|
||||
}
|
||||
|
||||
func (r *rescorer) prepareKnnRequest() {
|
||||
}
|
||||
|
||||
func (r *rescorer) restoreKnnRequest() {
|
||||
}
|
||||
|
||||
32
vendor/github.com/blevesearch/bleve/v2/util/keys.go
generated
vendored
Normal file
32
vendor/github.com/blevesearch/bleve/v2/util/keys.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (c) 2025 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
var (
|
||||
// Bolt keys
|
||||
BoltSnapshotsBucket = []byte{'s'}
|
||||
BoltPathKey = []byte{'p'}
|
||||
BoltDeletedKey = []byte{'d'}
|
||||
BoltInternalKey = []byte{'i'}
|
||||
BoltMetaDataKey = []byte{'m'}
|
||||
BoltMetaDataSegmentTypeKey = []byte("type")
|
||||
BoltMetaDataSegmentVersionKey = []byte("version")
|
||||
BoltMetaDataTimeStamp = []byte("timeStamp")
|
||||
BoltStatsKey = []byte("stats")
|
||||
BoltUpdatedFieldsKey = []byte("fields")
|
||||
TotBytesWrittenKey = []byte("TotBytesWritten")
|
||||
|
||||
MappingInternalKey = []byte("_mapping")
|
||||
)
|
||||
13
vendor/github.com/blevesearch/bleve_index_api/index.go
generated
vendored
13
vendor/github.com/blevesearch/bleve_index_api/index.go
generated
vendored
@@ -65,6 +65,19 @@ type EventIndex interface {
|
||||
FireIndexEvent()
|
||||
}
|
||||
|
||||
type UpdateFieldInfo struct {
|
||||
Deleted bool
|
||||
Store bool
|
||||
Index bool
|
||||
DocValues bool
|
||||
}
|
||||
|
||||
type UpdateIndex interface {
|
||||
Index
|
||||
UpdateFields(fieldInfo map[string]*UpdateFieldInfo, updatedMapping []byte) error
|
||||
OpenMeta() error
|
||||
}
|
||||
|
||||
type IndexReader interface {
|
||||
TermFieldReader(ctx context.Context, term []byte, field string, includeFreq, includeNorm, includeTermVectors bool) (TermFieldReader, error)
|
||||
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve_index_api/indexing_options.go
generated
vendored
2
vendor/github.com/blevesearch/bleve_index_api/indexing_options.go
generated
vendored
@@ -26,7 +26,7 @@ const (
|
||||
|
||||
const (
|
||||
BM25Scoring = "bm25"
|
||||
TFIDFScoring = "tfidf"
|
||||
TFIDFScoring = "tf-idf"
|
||||
)
|
||||
|
||||
// Scoring model indicates the algorithm used to rank documents fetched
|
||||
|
||||
6
vendor/github.com/blevesearch/scorch_segment_api/v2/segment.go
generated
vendored
6
vendor/github.com/blevesearch/scorch_segment_api/v2/segment.go
generated
vendored
@@ -61,6 +61,12 @@ type PersistedSegment interface {
|
||||
Path() string
|
||||
}
|
||||
|
||||
type UpdatableSegment interface {
|
||||
Segment
|
||||
GetUpdatedFields() map[string]*index.UpdateFieldInfo
|
||||
SetUpdatedFields(fieldInfo map[string]*index.UpdateFieldInfo)
|
||||
}
|
||||
|
||||
type TermDictionary interface {
|
||||
PostingsList(term []byte, except *roaring.Bitmap, prealloc PostingsList) (PostingsList, error)
|
||||
|
||||
|
||||
2
vendor/github.com/blevesearch/zapx/v16/build.go
generated
vendored
2
vendor/github.com/blevesearch/zapx/v16/build.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
index "github.com/blevesearch/bleve_index_api"
|
||||
"github.com/blevesearch/vellum"
|
||||
)
|
||||
|
||||
@@ -169,6 +170,7 @@ func InitSegmentBase(mem []byte, memCRC uint32, chunkMode uint32, numDocs uint64
|
||||
sectionsIndexOffset: sectionsIndexOffset,
|
||||
fieldDvReaders: make([]map[uint16]*docValueReader, len(segmentSections)),
|
||||
docValueOffset: 0, // docValueOffsets identified automatically by the section
|
||||
updatedFields: make(map[string]*index.UpdateFieldInfo),
|
||||
fieldFSTs: make(map[uint16]*vellum.FST),
|
||||
vecIndexCache: newVectorIndexCache(),
|
||||
synIndexCache: newSynonymIndexCache(),
|
||||
|
||||
68
vendor/github.com/blevesearch/zapx/v16/merge.go
generated
vendored
68
vendor/github.com/blevesearch/zapx/v16/merge.go
generated
vendored
@@ -24,6 +24,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/RoaringBitmap/roaring/v2"
|
||||
index "github.com/blevesearch/bleve_index_api"
|
||||
seg "github.com/blevesearch/scorch_segment_api/v2"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
@@ -109,6 +110,19 @@ func mergeSegmentBases(segmentBases []*SegmentBase, drops []*roaring.Bitmap, pat
|
||||
return newDocNums, uint64(cr.Count()), nil
|
||||
}
|
||||
|
||||
// Remove fields that have been completely deleted from fieldsInv
|
||||
func filterFields(fieldsInv []string, fieldInfo map[string]*index.UpdateFieldInfo) []string {
|
||||
idx := 0
|
||||
for _, field := range fieldsInv {
|
||||
if val, ok := fieldInfo[field]; ok && val.Deleted {
|
||||
continue
|
||||
}
|
||||
fieldsInv[idx] = field
|
||||
idx++
|
||||
}
|
||||
return fieldsInv[:idx]
|
||||
}
|
||||
|
||||
func mergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
chunkMode uint32, cr *CountHashWriter, closeCh chan struct{}) (
|
||||
newDocNums [][]uint64, numDocs, storedIndexOffset uint64,
|
||||
@@ -117,6 +131,8 @@ func mergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
|
||||
var fieldsSame bool
|
||||
fieldsSame, fieldsInv = mergeFields(segments)
|
||||
updatedFields := mergeUpdatedFields(segments)
|
||||
fieldsInv = filterFields(fieldsInv, updatedFields)
|
||||
fieldsMap = mapFields(fieldsInv)
|
||||
|
||||
numDocs = computeNewDocCount(segments, drops)
|
||||
@@ -130,15 +146,16 @@ func mergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
// offsets in the fields section index of the file (the final merged file).
|
||||
mergeOpaque := map[int]resetable{}
|
||||
args := map[string]interface{}{
|
||||
"chunkMode": chunkMode,
|
||||
"fieldsSame": fieldsSame,
|
||||
"fieldsMap": fieldsMap,
|
||||
"numDocs": numDocs,
|
||||
"chunkMode": chunkMode,
|
||||
"fieldsSame": fieldsSame,
|
||||
"fieldsMap": fieldsMap,
|
||||
"numDocs": numDocs,
|
||||
"updatedFields": updatedFields,
|
||||
}
|
||||
|
||||
if numDocs > 0 {
|
||||
storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops,
|
||||
fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh)
|
||||
fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh, updatedFields)
|
||||
if err != nil {
|
||||
return nil, 0, 0, nil, nil, 0, err
|
||||
}
|
||||
@@ -358,7 +375,7 @@ type varintEncoder func(uint64) (int, error)
|
||||
|
||||
func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64,
|
||||
w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) {
|
||||
w *CountHashWriter, closeCh chan struct{}, updatedFields map[string]*index.UpdateFieldInfo) (uint64, [][]uint64, error) {
|
||||
var rv [][]uint64 // The remapped or newDocNums for each segment.
|
||||
|
||||
var newDocNum uint64
|
||||
@@ -397,7 +414,8 @@ func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
// optimize when the field mapping is the same across all
|
||||
// segments and there are no deletions, via byte-copying
|
||||
// of stored docs bytes directly to the writer
|
||||
if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) {
|
||||
// cannot copy directly if fields might have been deleted
|
||||
if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) && len(updatedFields) == 0 {
|
||||
err := segment.copyStoredDocs(newDocNum, docNumOffsets, w)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
@@ -440,6 +458,10 @@ func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
// no entry for field in fieldsMap
|
||||
return false
|
||||
}
|
||||
// early exit if the stored portion of the field is deleted
|
||||
if val, ok := updatedFields[fieldsInv[fieldID]]; ok && val.Store {
|
||||
return true
|
||||
}
|
||||
vals[fieldID] = append(vals[fieldID], value)
|
||||
typs[fieldID] = append(typs[fieldID], typ)
|
||||
|
||||
@@ -471,6 +493,10 @@ func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
|
||||
|
||||
// now walk the non-"_id" fields in order
|
||||
for fieldID := 1; fieldID < len(fieldsInv); fieldID++ {
|
||||
// early exit if the stored portion of the field is deleted
|
||||
if val, ok := updatedFields[fieldsInv[fieldID]]; ok && val.Store {
|
||||
continue
|
||||
}
|
||||
storedFieldValues := vals[fieldID]
|
||||
|
||||
stf := typs[fieldID]
|
||||
@@ -606,6 +632,34 @@ func mergeFields(segments []*SegmentBase) (bool, []string) {
|
||||
return fieldsSame, rv
|
||||
}
|
||||
|
||||
// Combine updateFieldInfo from all segments
|
||||
func mergeUpdatedFields(segments []*SegmentBase) map[string]*index.UpdateFieldInfo {
|
||||
var fieldInfo map[string]*index.UpdateFieldInfo
|
||||
|
||||
for _, segment := range segments {
|
||||
for field, info := range segment.updatedFields {
|
||||
if fieldInfo == nil {
|
||||
fieldInfo = make(map[string]*index.UpdateFieldInfo)
|
||||
}
|
||||
if _, ok := fieldInfo[field]; !ok {
|
||||
fieldInfo[field] = &index.UpdateFieldInfo{
|
||||
Deleted: info.Deleted,
|
||||
Index: info.Index,
|
||||
Store: info.Store,
|
||||
DocValues: info.DocValues,
|
||||
}
|
||||
} else {
|
||||
fieldInfo[field].Deleted = fieldInfo[field].Deleted || info.Deleted
|
||||
fieldInfo[field].Index = fieldInfo[field].Index || info.Index
|
||||
fieldInfo[field].Store = fieldInfo[field].Store || info.Store
|
||||
fieldInfo[field].DocValues = fieldInfo[field].Store || info.DocValues
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return fieldInfo
|
||||
}
|
||||
|
||||
func isClosed(closeCh chan struct{}) bool {
|
||||
select {
|
||||
case <-closeCh:
|
||||
|
||||
17
vendor/github.com/blevesearch/zapx/v16/section_faiss_vector_index.go
generated
vendored
17
vendor/github.com/blevesearch/zapx/v16/section_faiss_vector_index.go
generated
vendored
@@ -105,6 +105,10 @@ func (v *faissVectorIndexSection) Merge(opaque map[int]resetable, segments []*Se
|
||||
if _, ok := sb.fieldsMap[fieldName]; !ok {
|
||||
continue
|
||||
}
|
||||
// early exit if index data is supposed to be deleted
|
||||
if info, ok := vo.updatedFields[fieldName]; ok && info.Index {
|
||||
continue
|
||||
}
|
||||
|
||||
// check if the section address is a valid one for "fieldName" in the
|
||||
// segment sb. the local fieldID (fetched by the fieldsMap of the sb)
|
||||
@@ -686,9 +690,10 @@ func (v *faissVectorIndexSection) getvectorIndexOpaque(opaque map[int]resetable)
|
||||
|
||||
func (v *faissVectorIndexSection) InitOpaque(args map[string]interface{}) resetable {
|
||||
rv := &vectorIndexOpaque{
|
||||
fieldAddrs: make(map[uint16]int),
|
||||
vecIDMap: make(map[int64]*vecInfo),
|
||||
vecFieldMap: make(map[uint16]*indexContent),
|
||||
fieldAddrs: make(map[uint16]int),
|
||||
vecIDMap: make(map[int64]*vecInfo),
|
||||
vecFieldMap: make(map[uint16]*indexContent),
|
||||
updatedFields: make(map[string]*index.UpdateFieldInfo),
|
||||
}
|
||||
for k, v := range args {
|
||||
rv.Set(k, v)
|
||||
@@ -727,6 +732,8 @@ type vectorIndexOpaque struct {
|
||||
// index to be build.
|
||||
vecFieldMap map[uint16]*indexContent
|
||||
|
||||
updatedFields map[string]*index.UpdateFieldInfo
|
||||
|
||||
tmp0 []byte
|
||||
}
|
||||
|
||||
@@ -773,4 +780,8 @@ func (v *vectorIndexOpaque) Reset() (err error) {
|
||||
}
|
||||
|
||||
func (v *vectorIndexOpaque) Set(key string, val interface{}) {
|
||||
switch key {
|
||||
case "updatedFields":
|
||||
v.updatedFields = val.(map[string]*index.UpdateFieldInfo)
|
||||
}
|
||||
}
|
||||
|
||||
24
vendor/github.com/blevesearch/zapx/v16/section_inverted_text_index.go
generated
vendored
24
vendor/github.com/blevesearch/zapx/v16/section_inverted_text_index.go
generated
vendored
@@ -82,7 +82,8 @@ func (i *invertedTextIndexSection) AddrForField(opaque map[int]resetable, fieldI
|
||||
func mergeAndPersistInvertedSection(segments []*SegmentBase, dropsIn []*roaring.Bitmap,
|
||||
fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool,
|
||||
newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32,
|
||||
w *CountHashWriter, closeCh chan struct{}) (map[int]int, uint64, error) {
|
||||
updatedFields map[string]*index.UpdateFieldInfo, w *CountHashWriter,
|
||||
closeCh chan struct{}) (map[int]int, uint64, error) {
|
||||
var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64)
|
||||
var bufLoc []uint64
|
||||
|
||||
@@ -125,6 +126,10 @@ func mergeAndPersistInvertedSection(segments []*SegmentBase, dropsIn []*roaring.
|
||||
if isClosed(closeCh) {
|
||||
return nil, 0, seg.ErrClosed
|
||||
}
|
||||
// early exit if index data is supposed to be deleted
|
||||
if info, ok := updatedFields[fieldName]; ok && info.Index {
|
||||
continue
|
||||
}
|
||||
|
||||
dict, err2 := segment.dictionary(fieldName)
|
||||
if err2 != nil {
|
||||
@@ -244,7 +249,8 @@ func mergeAndPersistInvertedSection(segments []*SegmentBase, dropsIn []*roaring.
|
||||
|
||||
postItr = postings.iterator(true, true, true, postItr)
|
||||
|
||||
if fieldsSame {
|
||||
// can only safely copy data if no field data has been deleted
|
||||
if fieldsSame && len(updatedFields) == 0 {
|
||||
// can optimize by copying freq/norm/loc bytes directly
|
||||
lastDocNum, lastFreq, lastNorm, err = mergeTermFreqNormLocsByCopying(
|
||||
term, postItr, newDocNums[itrI], newRoaring,
|
||||
@@ -317,7 +323,10 @@ func mergeAndPersistInvertedSection(segments []*SegmentBase, dropsIn []*roaring.
|
||||
if isClosed(closeCh) {
|
||||
return nil, 0, seg.ErrClosed
|
||||
}
|
||||
|
||||
// early exit if docvalues data is supposed to be deleted
|
||||
if info, ok := updatedFields[fieldName]; ok && info.DocValues {
|
||||
continue
|
||||
}
|
||||
fieldIDPlus1 := uint16(segment.fieldsMap[fieldName])
|
||||
if dvIter, exists := segment.fieldDvReaders[SectionInvertedTextIndex][fieldIDPlus1-1]; exists &&
|
||||
dvIter != nil {
|
||||
@@ -398,7 +407,7 @@ func (i *invertedTextIndexSection) Merge(opaque map[int]resetable, segments []*S
|
||||
w *CountHashWriter, closeCh chan struct{}) error {
|
||||
io := i.getInvertedIndexOpaque(opaque)
|
||||
fieldAddrs, _, err := mergeAndPersistInvertedSection(segments, drops, fieldsInv,
|
||||
io.FieldsMap, io.fieldsSame, newDocNumsIn, io.numDocs, io.chunkMode, w, closeCh)
|
||||
io.FieldsMap, io.fieldsSame, newDocNumsIn, io.numDocs, io.chunkMode, io.updatedFields, w, closeCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -925,7 +934,8 @@ func (i *invertedIndexOpaque) getOrDefineField(fieldName string) int {
|
||||
|
||||
func (i *invertedTextIndexSection) InitOpaque(args map[string]interface{}) resetable {
|
||||
rv := &invertedIndexOpaque{
|
||||
fieldAddrs: map[int]int{},
|
||||
fieldAddrs: map[int]int{},
|
||||
updatedFields: make(map[string]*index.UpdateFieldInfo),
|
||||
}
|
||||
for k, v := range args {
|
||||
rv.Set(k, v)
|
||||
@@ -994,6 +1004,8 @@ type invertedIndexOpaque struct {
|
||||
|
||||
fieldAddrs map[int]int
|
||||
|
||||
updatedFields map[string]*index.UpdateFieldInfo
|
||||
|
||||
fieldsSame bool
|
||||
numDocs uint64
|
||||
}
|
||||
@@ -1061,5 +1073,7 @@ func (i *invertedIndexOpaque) Set(key string, val interface{}) {
|
||||
i.FieldsMap = val.(map[string]uint16)
|
||||
case "numDocs":
|
||||
i.numDocs = val.(uint64)
|
||||
case "updatedFields":
|
||||
i.updatedFields = val.(map[string]*index.UpdateFieldInfo)
|
||||
}
|
||||
}
|
||||
|
||||
13
vendor/github.com/blevesearch/zapx/v16/segment.go
generated
vendored
13
vendor/github.com/blevesearch/zapx/v16/segment.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/RoaringBitmap/roaring/v2"
|
||||
index "github.com/blevesearch/bleve_index_api"
|
||||
mmap "github.com/blevesearch/mmap-go"
|
||||
segment "github.com/blevesearch/scorch_segment_api/v2"
|
||||
"github.com/blevesearch/vellum"
|
||||
@@ -109,6 +110,8 @@ type SegmentBase struct {
|
||||
fieldDvNames []string // field names cached in fieldDvReaders
|
||||
size uint64
|
||||
|
||||
updatedFields map[string]*index.UpdateFieldInfo
|
||||
|
||||
m sync.Mutex
|
||||
fieldFSTs map[uint16]*vellum.FST
|
||||
|
||||
@@ -952,3 +955,13 @@ func (sb *SegmentBase) loadDvReaders() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Getter method to retrieve updateFieldInfo within segment base
|
||||
func (s *SegmentBase) GetUpdatedFields() map[string]*index.UpdateFieldInfo {
|
||||
return s.updatedFields
|
||||
}
|
||||
|
||||
// Setter method to store updateFieldInfo within segment base
|
||||
func (s *SegmentBase) SetUpdatedFields(updatedFields map[string]*index.UpdateFieldInfo) {
|
||||
s.updatedFields = updatedFields
|
||||
}
|
||||
|
||||
45
vendor/github.com/blevesearch/zapx/v16/zap.md
generated
vendored
45
vendor/github.com/blevesearch/zapx/v16/zap.md
generated
vendored
@@ -162,6 +162,51 @@ In case of inverted text index, the dictionary is encoded in [Vellum](https://gi
|
||||
|
||||
ITI - Inverted Text Index
|
||||
|
||||
## Vector Index Section
|
||||
|
||||
In a vector index, each vector in a document is given a unique Id. This vector Id is to be used within the [Faiss](https://github.com/blevesearch/faiss) index. The mapping between the document Id and the vector Id is stored along with a serialized vector index. Doc Values are not applicable to this section.
|
||||
|
||||
|================================================================+- Inverted Text Index Section
|
||||
| |
|
||||
|================================================================+- Vector Index Section
|
||||
| |
|
||||
| +~~~~~~~~~~+~~~~~~~+~~~~~+~~~~~~+ |
|
||||
+-------> DV Start | DVEnd | VIO | NVEC | |
|
||||
| | +~~~~~~~~~~+~~~~~~~+~~~~~+~~~~~~+ |
|
||||
| | |
|
||||
| | +~~~~~~~~~~~~+~~~~~~~~~~~~+ |
|
||||
| | | VectorID_0 | DocID_0 | |
|
||||
| | +~~~~~~~~~~~~+~~~~~~~~~~~~+ |
|
||||
| | | VectorID_1 | DocID_1 | |
|
||||
| | +~~~~~~~~~~~~+~~~~~~~~~~~~+ |
|
||||
| | | ... | ... | |
|
||||
| | +~~~~~~~~~~~~+~~~~~~~~~~~~+ |
|
||||
| | | VectorID_N | DocID_N | |
|
||||
| | +~~~~~~~~~~~~+~~~~~~~~~~~~+ |
|
||||
| | |
|
||||
| | +~~~~~~~~~~~~~+ |
|
||||
| | | FAISS LEN | |
|
||||
| | +~~~~~~~~~~~~~+ |
|
||||
| | |
|
||||
| | +---------------------------+...+------------------------+ |
|
||||
| | | SERIALIZED FAISS INDEX | |
|
||||
| | +---------------------------+...+------------------------+ |
|
||||
| | |
|
||||
| |================================================================+- Synonym Index Section
|
||||
| | |
|
||||
| |================================================================+- Sections Info
|
||||
+-----------------------------+ |
|
||||
| | |
|
||||
| +-------+-----+-----+------+~~~~~~~~+~~~~~~~~+--+...+--+ |
|
||||
| | ... | VI | VI ADDR | NS | Length | Name | |
|
||||
| +-------+-----+------------+~~~~~~~~+~~~~~~~~+--+...+--+ |
|
||||
+================================================================+
|
||||
|
||||
VI - Vector Index
|
||||
VIO - Vector Index Optimized for
|
||||
NVEC - Number of vectors
|
||||
FAISS LEN - Length of serialized FAISS index
|
||||
|
||||
## Synonym Index Section
|
||||
|
||||
In a synonyms index, the relationship between a term and its synonyms is represented using a Thesaurus. The Thesaurus is encoded in the [Vellum](https://github.com/couchbase/vellum) format and consists of pairs in the form `(term, offset)`. Here, the offset specifies the position of the postings list containing the synonyms for the given term. The postings list is stored as a Roaring64 bitmap, with each entry representing an encoded synonym for the term.
|
||||
|
||||
1
vendor/github.com/kovidgoyal/go-parallel/.gitignore
generated
vendored
Normal file
1
vendor/github.com/kovidgoyal/go-parallel/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
dist
|
||||
2
vendor/github.com/kovidgoyal/go-parallel/.goreleaser.yaml
generated
vendored
Normal file
2
vendor/github.com/kovidgoyal/go-parallel/.goreleaser.yaml
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
builds:
|
||||
- skip: true
|
||||
28
vendor/github.com/kovidgoyal/go-parallel/LICENSE
generated
vendored
Normal file
28
vendor/github.com/kovidgoyal/go-parallel/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2025, Kovid Goyal
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
5
vendor/github.com/kovidgoyal/go-parallel/README.md
generated
vendored
Normal file
5
vendor/github.com/kovidgoyal/go-parallel/README.md
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# go-parallel
|
||||
|
||||
Utility functions to make running code in parallel easier and safer.
|
||||
Panics in go routines are turned into regular errors, instead of crashing
|
||||
the program.
|
||||
171
vendor/github.com/kovidgoyal/go-parallel/parallel.go
generated
vendored
Normal file
171
vendor/github.com/kovidgoyal/go-parallel/parallel.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package parallel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"iter"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type PanicError struct {
|
||||
frames []runtime.Frame
|
||||
panic_value any
|
||||
}
|
||||
|
||||
const indent_lead = " "
|
||||
|
||||
func format_frame_line(frame runtime.Frame) string {
|
||||
return fmt.Sprintf("\r\n%s%s%s:%d", indent_lead, frame.Function, frame.File, frame.Line)
|
||||
}
|
||||
|
||||
func (e *PanicError) walk(level int, yield func(string) bool) bool {
|
||||
s := "Panic"
|
||||
cause := fmt.Sprintf("%v", e.panic_value)
|
||||
if _, ok := e.panic_value.(*PanicError); ok {
|
||||
cause = "sub-panic (see below)"
|
||||
}
|
||||
if level > 0 {
|
||||
s = "\r\n--> Sub-panic"
|
||||
}
|
||||
if !yield(fmt.Sprintf("%s caused by: %s\r\nStack trace (most recent call first):", s, cause)) {
|
||||
return false
|
||||
}
|
||||
for _, f := range e.frames {
|
||||
if !yield(format_frame_line(f)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if sp, ok := e.panic_value.(*PanicError); ok {
|
||||
return sp.walk(level+1, yield)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *PanicError) lines() iter.Seq[string] {
|
||||
return func(yield func(string) bool) {
|
||||
e.walk(0, yield)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *PanicError) Error() string {
|
||||
return strings.Join(slices.Collect(e.lines()), "")
|
||||
}
|
||||
|
||||
func (e *PanicError) Unwrap() error {
|
||||
if ans, ok := e.panic_value.(*PanicError); ok {
|
||||
return ans
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Format a stack trace on panic and return it as an error
|
||||
func Format_stacktrace_on_panic(r any, skip_frames int) (err *PanicError) {
|
||||
pcs := make([]uintptr, 512)
|
||||
n := runtime.Callers(2+skip_frames, pcs)
|
||||
var ans []runtime.Frame
|
||||
frames := runtime.CallersFrames(pcs[:n])
|
||||
found_first_frame := false
|
||||
for frame, more := frames.Next(); more; frame, more = frames.Next() {
|
||||
if !found_first_frame {
|
||||
if strings.HasPrefix(frame.Function, "runtime.") {
|
||||
continue
|
||||
}
|
||||
found_first_frame = true
|
||||
}
|
||||
ans = append(ans, frame)
|
||||
}
|
||||
return &PanicError{frames: ans, panic_value: r}
|
||||
}
|
||||
|
||||
// Run the specified function in parallel over chunks from the specified range.
|
||||
// If the function panics, it is turned into a regular error. If multiple function calls panic,
|
||||
// any one of the panics will be returned.
|
||||
func Run_in_parallel_over_range(num_procs int, f func(int, int), start, limit int) (err error) {
|
||||
num_items := limit - start
|
||||
if num_procs <= 0 {
|
||||
num_procs = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
num_procs = max(1, min(num_procs, num_items))
|
||||
if num_procs < 2 {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = Format_stacktrace_on_panic(r, 1)
|
||||
}
|
||||
}()
|
||||
f(start, limit)
|
||||
return
|
||||
}
|
||||
chunk_sz := max(1, num_items/num_procs)
|
||||
var wg sync.WaitGroup
|
||||
echan := make(chan error, num_items/chunk_sz+1)
|
||||
for start < limit {
|
||||
end := min(start+chunk_sz, limit)
|
||||
wg.Add(1)
|
||||
go func(start, end int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
echan <- Format_stacktrace_on_panic(r, 1)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
f(start, end)
|
||||
}(start, end)
|
||||
start = end
|
||||
}
|
||||
wg.Wait()
|
||||
close(echan)
|
||||
for qerr := range echan {
|
||||
return qerr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Run the specified function in parallel over chunks from the specified range.
|
||||
// If the function panics, it is turned into a regular error. If the function
|
||||
// returns an error it is returned. If multiple function calls panic or return errors,
|
||||
// any one of them will be returned.
|
||||
func Run_in_parallel_over_range_with_error(num_procs int, f func(int, int) error, start, limit int) (err error) {
|
||||
num_items := limit - start
|
||||
if num_procs <= 0 {
|
||||
num_procs = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
num_procs = max(1, min(num_procs, num_items))
|
||||
if num_procs < 2 {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = Format_stacktrace_on_panic(r, 1)
|
||||
}
|
||||
}()
|
||||
err = f(start, limit)
|
||||
return
|
||||
}
|
||||
chunk_sz := max(1, num_items/num_procs)
|
||||
var wg sync.WaitGroup
|
||||
echan := make(chan error, num_items/chunk_sz+1)
|
||||
for start < limit {
|
||||
end := min(start+chunk_sz, limit)
|
||||
wg.Add(1)
|
||||
go func(start, end int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
echan <- Format_stacktrace_on_panic(r, 1)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
if cerr := f(start, end); cerr != nil {
|
||||
echan <- cerr
|
||||
}
|
||||
}(start, end)
|
||||
start = end
|
||||
}
|
||||
wg.Wait()
|
||||
close(echan)
|
||||
for qerr := range echan {
|
||||
return qerr
|
||||
}
|
||||
return
|
||||
}
|
||||
31
vendor/github.com/kovidgoyal/go-parallel/publish.py
generated
vendored
Normal file
31
vendor/github.com/kovidgoyal/go-parallel/publish.py
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
VERSION = '1.0.1'
|
||||
|
||||
|
||||
def run(*args: str):
|
||||
cp = subprocess.run(args)
|
||||
if cp.returncode != 0:
|
||||
raise SystemExit(cp.returncode)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
ans = input(f'Publish version \033[91m{VERSION}\033[m (y/n): ')
|
||||
except KeyboardInterrupt:
|
||||
ans = 'n'
|
||||
if ans.lower() != 'y':
|
||||
return
|
||||
os.environ['GITHUB_TOKEN'] = open(os.path.join(os.environ['PENV'], 'github-token')).read().strip().partition(':')[2]
|
||||
run('git', 'tag', '-a', 'v' + VERSION, '-m', f'version {VERSION}')
|
||||
run('git', 'push')
|
||||
run('goreleaser', 'release', '--clean')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
4
vendor/github.com/kovidgoyal/imaging/.goreleaser.yaml
generated
vendored
4
vendor/github.com/kovidgoyal/imaging/.goreleaser.yaml
generated
vendored
@@ -19,7 +19,7 @@ builds:
|
||||
- skip: true
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
- formats: [ 'tar.gz' ]
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
@@ -31,7 +31,7 @@ archives:
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
formats: [ 'zip' ]
|
||||
|
||||
changelog:
|
||||
disable: true
|
||||
|
||||
57
vendor/github.com/kovidgoyal/imaging/adjust.go
generated
vendored
57
vendor/github.com/kovidgoyal/imaging/adjust.go
generated
vendored
@@ -10,10 +10,10 @@ import (
|
||||
func Grayscale(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
r := d[0]
|
||||
@@ -27,7 +27,9 @@ func Grayscale(img image.Image) *image.NRGBA {
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, src.h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -35,10 +37,10 @@ func Grayscale(img image.Image) *image.NRGBA {
|
||||
func Invert(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
d[0] = 255 - d[0]
|
||||
@@ -47,7 +49,9 @@ func Invert(img image.Image) *image.NRGBA {
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, src.h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -58,9 +62,9 @@ func Invert(img image.Image) *image.NRGBA {
|
||||
// The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale).
|
||||
//
|
||||
// Examples:
|
||||
// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
|
||||
// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
|
||||
//
|
||||
// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
|
||||
// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
|
||||
func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
|
||||
if percentage == 0 {
|
||||
return Clone(img)
|
||||
@@ -85,9 +89,9 @@ func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
|
||||
// The shift = 180 (or -180) corresponds to a 180° degree rotation of the color wheel and thus gives the image with its hue inverted for each pixel.
|
||||
//
|
||||
// Examples:
|
||||
// dstImage = imaging.AdjustHue(srcImage, 90) // Shift Hue by 90°.
|
||||
// dstImage = imaging.AdjustHue(srcImage, -30) // Shift Hue by -30°.
|
||||
//
|
||||
// dstImage = imaging.AdjustHue(srcImage, 90) // Shift Hue by 90°.
|
||||
// dstImage = imaging.AdjustHue(srcImage, -30) // Shift Hue by -30°.
|
||||
func AdjustHue(img image.Image, shift float64) *image.NRGBA {
|
||||
if math.Mod(shift, 360) == 0 {
|
||||
return Clone(img)
|
||||
@@ -116,7 +120,6 @@ func AdjustHue(img image.Image, shift float64) *image.NRGBA {
|
||||
//
|
||||
// dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%.
|
||||
// dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%.
|
||||
//
|
||||
func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
|
||||
if percentage == 0 {
|
||||
return Clone(img)
|
||||
@@ -148,7 +151,6 @@ func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
|
||||
//
|
||||
// dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%.
|
||||
// dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%.
|
||||
//
|
||||
func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
|
||||
if percentage == 0 {
|
||||
return Clone(img)
|
||||
@@ -172,7 +174,6 @@ func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
|
||||
// Example:
|
||||
//
|
||||
// dstImage = imaging.AdjustGamma(srcImage, 0.7)
|
||||
//
|
||||
func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
|
||||
if gamma == 1 {
|
||||
return Clone(img)
|
||||
@@ -198,7 +199,6 @@ func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
|
||||
//
|
||||
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast.
|
||||
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast.
|
||||
//
|
||||
func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
|
||||
if factor == 0 {
|
||||
return Clone(img)
|
||||
@@ -212,14 +212,14 @@ func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
|
||||
e := 1.0e-6
|
||||
|
||||
if factor > 0 {
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
x := float64(i) / 255.0
|
||||
sigX := sigmoid(a, b, x)
|
||||
f := (sigX - sig0) / (sig1 - sig0)
|
||||
lut[i] = clamp(f * 255.0)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
x := float64(i) / 255.0
|
||||
arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e)
|
||||
f := a - math.Log(1.0/arg-1.0)/b
|
||||
@@ -239,10 +239,10 @@ func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
lut = lut[0:256]
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
d[0] = lut[d[0]]
|
||||
@@ -251,7 +251,9 @@ func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, src.h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -270,14 +272,13 @@ func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
|
||||
// return color.NRGBA{uint8(r), c.G, c.B, c.A}
|
||||
// }
|
||||
// )
|
||||
//
|
||||
func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+4 : i+4]
|
||||
r := d[0]
|
||||
@@ -292,6 +293,8 @@ func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGB
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, src.h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
8
vendor/github.com/kovidgoyal/imaging/convolution.go
generated
vendored
8
vendor/github.com/kovidgoyal/imaging/convolution.go
generated
vendored
@@ -70,8 +70,8 @@ func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *imag
|
||||
}
|
||||
}
|
||||
|
||||
parallel(0, h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
for x := 0; x < w; x++ {
|
||||
var r, g, b float64
|
||||
for _, c := range coefs {
|
||||
@@ -123,7 +123,9 @@ func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *imag
|
||||
d[3] = src.Pix[srcOff+3]
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
56
vendor/github.com/kovidgoyal/imaging/effects.go
generated
vendored
56
vendor/github.com/kovidgoyal/imaging/effects.go
generated
vendored
@@ -15,7 +15,6 @@ func gaussianBlurKernel(x, sigma float64) float64 {
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Blur(srcImage, 3.5)
|
||||
//
|
||||
func Blur(img image.Image, sigma float64) *image.NRGBA {
|
||||
if sigma <= 0 {
|
||||
return Clone(img)
|
||||
@@ -36,25 +35,19 @@ func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
radius := len(kernel) - 1
|
||||
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
scanLineF := make([]float64, len(scanLine))
|
||||
for y := range ys {
|
||||
src.scan(0, y, src.w, y+1, scanLine)
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, src.w, y+1, scanLine)
|
||||
for i, v := range scanLine {
|
||||
scanLineF[i] = float64(v)
|
||||
}
|
||||
for x := 0; x < src.w; x++ {
|
||||
min := x - radius
|
||||
if min < 0 {
|
||||
min = 0
|
||||
}
|
||||
max := x + radius
|
||||
if max > src.w-1 {
|
||||
max = src.w - 1
|
||||
}
|
||||
minv := max(0, x-radius)
|
||||
maxv := min(x+radius, src.w-1)
|
||||
var r, g, b, a, wsum float64
|
||||
for ix := min; ix <= max; ix++ {
|
||||
for ix := minv; ix <= maxv; ix++ {
|
||||
i := ix * 4
|
||||
weight := kernel[absint(x-ix)]
|
||||
wsum += weight
|
||||
@@ -76,7 +69,9 @@ func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, src.h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
@@ -86,25 +81,19 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
radius := len(kernel) - 1
|
||||
|
||||
parallel(0, src.w, func(xs <-chan int) {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.h*4)
|
||||
scanLineF := make([]float64, len(scanLine))
|
||||
for x := range xs {
|
||||
src.scan(x, 0, x+1, src.h, scanLine)
|
||||
for x := start; x < limit; x++ {
|
||||
src.Scan(x, 0, x+1, src.h, scanLine)
|
||||
for i, v := range scanLine {
|
||||
scanLineF[i] = float64(v)
|
||||
}
|
||||
for y := 0; y < src.h; y++ {
|
||||
min := y - radius
|
||||
if min < 0 {
|
||||
min = 0
|
||||
}
|
||||
max := y + radius
|
||||
if max > src.h-1 {
|
||||
max = src.h - 1
|
||||
}
|
||||
minv := max(0, y-radius)
|
||||
maxv := min(y+radius, src.h-1)
|
||||
var r, g, b, a, wsum float64
|
||||
for iy := min; iy <= max; iy++ {
|
||||
for iy := minv; iy <= maxv; iy++ {
|
||||
i := iy * 4
|
||||
weight := kernel[absint(y-iy)]
|
||||
wsum += weight
|
||||
@@ -126,7 +115,9 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, src.w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
@@ -137,7 +128,6 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Sharpen(srcImage, 3.5)
|
||||
//
|
||||
func Sharpen(img image.Image, sigma float64) *image.NRGBA {
|
||||
if sigma <= 0 {
|
||||
return Clone(img)
|
||||
@@ -147,10 +137,10 @@ func Sharpen(img image.Image, sigma float64) *image.NRGBA {
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
blurred := Blur(img, sigma)
|
||||
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
for y := range ys {
|
||||
src.scan(0, y, src.w, y+1, scanLine)
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, src.w, y+1, scanLine)
|
||||
j := y * dst.Stride
|
||||
for i := 0; i < src.w*4; i++ {
|
||||
val := int(scanLine[i])<<1 - int(blurred.Pix[j])
|
||||
@@ -163,7 +153,9 @@ func Sharpen(img image.Image, sigma float64) *image.NRGBA {
|
||||
j++
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 0, src.h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
14
vendor/github.com/kovidgoyal/imaging/histogram.go
generated
vendored
14
vendor/github.com/kovidgoyal/imaging/histogram.go
generated
vendored
@@ -19,12 +19,12 @@ func Histogram(img image.Image) [256]float64 {
|
||||
return histogram
|
||||
}
|
||||
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
var tmpHistogram [256]float64
|
||||
var tmpTotal float64
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
for y := range ys {
|
||||
src.scan(0, y, src.w, y+1, scanLine)
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, src.w, y+1, scanLine)
|
||||
i := 0
|
||||
for x := 0; x < src.w; x++ {
|
||||
s := scanLine[i : i+3 : i+3]
|
||||
@@ -38,14 +38,16 @@ func Histogram(img image.Image) [256]float64 {
|
||||
}
|
||||
}
|
||||
mu.Lock()
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
histogram[i] += tmpHistogram[i]
|
||||
}
|
||||
total += tmpTotal
|
||||
mu.Unlock()
|
||||
})
|
||||
}, 0, src.h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
histogram[i] = histogram[i] / total
|
||||
}
|
||||
return histogram
|
||||
|
||||
185
vendor/github.com/kovidgoyal/imaging/io.go
generated
vendored
185
vendor/github.com/kovidgoyal/imaging/io.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"errors"
|
||||
"image"
|
||||
"image/draw"
|
||||
@@ -9,11 +9,14 @@ import (
|
||||
"image/jpeg"
|
||||
"image/png"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta/autometa"
|
||||
"github.com/rwcarlsen/goexif/exif"
|
||||
|
||||
"golang.org/x/image/bmp"
|
||||
"golang.org/x/image/tiff"
|
||||
)
|
||||
@@ -35,7 +38,7 @@ type decodeConfig struct {
|
||||
}
|
||||
|
||||
var defaultDecodeConfig = decodeConfig{
|
||||
autoOrientation: false,
|
||||
autoOrientation: true,
|
||||
}
|
||||
|
||||
// DecodeOption sets an optional parameter for the Decode and Open functions.
|
||||
@@ -43,7 +46,7 @@ type DecodeOption func(*decodeConfig)
|
||||
|
||||
// AutoOrientation returns a DecodeOption that sets the auto-orientation mode.
|
||||
// If auto-orientation is enabled, the image will be transformed after decoding
|
||||
// according to the EXIF orientation tag (if present). By default it's disabled.
|
||||
// according to the EXIF orientation tag (if present). By default it's enabled.
|
||||
func AutoOrientation(enabled bool) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.autoOrientation = enabled
|
||||
@@ -53,6 +56,7 @@ func AutoOrientation(enabled bool) DecodeOption {
|
||||
// Decode reads an image from r.
|
||||
func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
|
||||
cfg := defaultDecodeConfig
|
||||
|
||||
for _, option := range opts {
|
||||
option(&cfg)
|
||||
}
|
||||
@@ -61,25 +65,27 @@ func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
|
||||
img, _, err := image.Decode(r)
|
||||
return img, err
|
||||
}
|
||||
|
||||
var orient orientation
|
||||
pr, pw := io.Pipe()
|
||||
r = io.TeeReader(r, pw)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
orient = readOrientation(pr)
|
||||
io.Copy(ioutil.Discard, pr)
|
||||
}()
|
||||
md, r, err := autometa.Load(r)
|
||||
var oval orientation = orientationUnspecified
|
||||
if err == nil && md != nil && len(md.ExifData) > 6 {
|
||||
exif_data, err := exif.Decode(bytes.NewReader(md.ExifData))
|
||||
if err == nil {
|
||||
orient, err := exif_data.Get(exif.Orientation)
|
||||
if err == nil && orient != nil {
|
||||
x, err := strconv.ParseUint(orient.String(), 10, 0)
|
||||
if err == nil && x > 0 && x < 9 {
|
||||
oval = orientation(int(x))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
img, _, err := image.Decode(r)
|
||||
pw.Close()
|
||||
<-done
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fixOrientation(img, orient), nil
|
||||
return fixOrientation(img, oval), nil
|
||||
}
|
||||
|
||||
// Open loads an image from file.
|
||||
@@ -91,7 +97,6 @@ func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
|
||||
//
|
||||
// // Load an image and transform it depending on the EXIF orientation tag (if present).
|
||||
// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
|
||||
//
|
||||
func Open(filename string, opts ...DecodeOption) (image.Image, error) {
|
||||
file, err := fs.Open(filename)
|
||||
if err != nil {
|
||||
@@ -101,6 +106,15 @@ func Open(filename string, opts ...DecodeOption) (image.Image, error) {
|
||||
return Decode(file, opts...)
|
||||
}
|
||||
|
||||
func OpenConfig(filename string) (ans image.Config, format_name string, err error) {
|
||||
file, err := fs.Open(filename)
|
||||
if err != nil {
|
||||
return ans, "", err
|
||||
}
|
||||
defer file.Close()
|
||||
return image.DecodeConfig(file)
|
||||
}
|
||||
|
||||
// Format is an image file format.
|
||||
type Format int
|
||||
|
||||
@@ -111,6 +125,10 @@ const (
|
||||
GIF
|
||||
TIFF
|
||||
BMP
|
||||
PBM
|
||||
PGM
|
||||
PPM
|
||||
PAM
|
||||
)
|
||||
|
||||
var formatExts = map[string]Format{
|
||||
@@ -121,6 +139,10 @@ var formatExts = map[string]Format{
|
||||
"tif": TIFF,
|
||||
"tiff": TIFF,
|
||||
"bmp": BMP,
|
||||
"pbm": PBM,
|
||||
"pgm": PGM,
|
||||
"ppm": PPM,
|
||||
"pam": PAM,
|
||||
}
|
||||
|
||||
var formatNames = map[Format]string{
|
||||
@@ -129,6 +151,9 @@ var formatNames = map[Format]string{
|
||||
GIF: "GIF",
|
||||
TIFF: "TIFF",
|
||||
BMP: "BMP",
|
||||
PBM: "PBM",
|
||||
PGM: "PGM",
|
||||
PAM: "PAM",
|
||||
}
|
||||
|
||||
func (f Format) String() string {
|
||||
@@ -264,7 +289,6 @@ func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) e
|
||||
//
|
||||
// // Save the image as JPEG with optional quality parameter set to 80.
|
||||
// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
|
||||
//
|
||||
func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
|
||||
f, err := FormatFromFilename(filename)
|
||||
if err != nil {
|
||||
@@ -298,129 +322,6 @@ const (
|
||||
orientationRotate90 = 8
|
||||
)
|
||||
|
||||
// readOrientation tries to read the orientation EXIF flag from image data in r.
|
||||
// If the EXIF data block is not found or the orientation flag is not found
|
||||
// or any other error occures while reading the data, it returns the
|
||||
// orientationUnspecified (0) value.
|
||||
func readOrientation(r io.Reader) orientation {
|
||||
const (
|
||||
markerSOI = 0xffd8
|
||||
markerAPP1 = 0xffe1
|
||||
exifHeader = 0x45786966
|
||||
byteOrderBE = 0x4d4d
|
||||
byteOrderLE = 0x4949
|
||||
orientationTag = 0x0112
|
||||
)
|
||||
|
||||
// Check if JPEG SOI marker is present.
|
||||
var soi uint16
|
||||
if err := binary.Read(r, binary.BigEndian, &soi); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if soi != markerSOI {
|
||||
return orientationUnspecified // Missing JPEG SOI marker.
|
||||
}
|
||||
|
||||
// Find JPEG APP1 marker.
|
||||
for {
|
||||
var marker, size uint16
|
||||
if err := binary.Read(r, binary.BigEndian, &marker); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if err := binary.Read(r, binary.BigEndian, &size); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if marker>>8 != 0xff {
|
||||
return orientationUnspecified // Invalid JPEG marker.
|
||||
}
|
||||
if marker == markerAPP1 {
|
||||
break
|
||||
}
|
||||
if size < 2 {
|
||||
return orientationUnspecified // Invalid block size.
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, int64(size-2)); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
}
|
||||
|
||||
// Check if EXIF header is present.
|
||||
var header uint32
|
||||
if err := binary.Read(r, binary.BigEndian, &header); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if header != exifHeader {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Read byte order information.
|
||||
var (
|
||||
byteOrderTag uint16
|
||||
byteOrder binary.ByteOrder
|
||||
)
|
||||
if err := binary.Read(r, binary.BigEndian, &byteOrderTag); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
switch byteOrderTag {
|
||||
case byteOrderBE:
|
||||
byteOrder = binary.BigEndian
|
||||
case byteOrderLE:
|
||||
byteOrder = binary.LittleEndian
|
||||
default:
|
||||
return orientationUnspecified // Invalid byte order flag.
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Skip the EXIF offset.
|
||||
var offset uint32
|
||||
if err := binary.Read(r, byteOrder, &offset); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if offset < 8 {
|
||||
return orientationUnspecified // Invalid offset value.
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, int64(offset-8)); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Read the number of tags.
|
||||
var numTags uint16
|
||||
if err := binary.Read(r, byteOrder, &numTags); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Find the orientation tag.
|
||||
for i := 0; i < int(numTags); i++ {
|
||||
var tag uint16
|
||||
if err := binary.Read(r, byteOrder, &tag); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if tag != orientationTag {
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 10); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 6); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
var val uint16
|
||||
if err := binary.Read(r, byteOrder, &val); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if val < 1 || val > 8 {
|
||||
return orientationUnspecified // Invalid tag value.
|
||||
}
|
||||
return orientation(val)
|
||||
}
|
||||
return orientationUnspecified // Missing orientation tag.
|
||||
}
|
||||
|
||||
// fixOrientation applies a transform to img corresponding to the given orientation flag.
|
||||
func fixOrientation(img image.Image, o orientation) image.Image {
|
||||
switch o {
|
||||
|
||||
510
vendor/github.com/kovidgoyal/imaging/netpbm.go
generated
vendored
Normal file
510
vendor/github.com/kovidgoyal/imaging/netpbm.go
generated
vendored
Normal file
@@ -0,0 +1,510 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
// skip_comments reads ahead past any comment lines (starting with #) and returns the first non-comment, non-empty line.
|
||||
func skip_comments(br *bufio.Reader) (string, error) {
|
||||
for {
|
||||
line, err := br.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
return line, nil
|
||||
}
|
||||
}
|
||||
|
||||
type data_type int
|
||||
|
||||
const (
|
||||
rgb data_type = iota
|
||||
blackwhite
|
||||
grayscale
|
||||
)
|
||||
|
||||
type header struct {
|
||||
format string
|
||||
width, height, num_channels uint
|
||||
maxval uint32
|
||||
has_alpha bool
|
||||
data_type data_type
|
||||
}
|
||||
|
||||
func (h header) bytes_per_channel() uint {
|
||||
if h.maxval > 255 {
|
||||
return 2
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (h header) num_bytes_per_pixel() uint {
|
||||
return h.num_channels * h.bytes_per_channel()
|
||||
}
|
||||
|
||||
func read_ppm_header(br *bufio.Reader, magic string) (ans header, err error) {
|
||||
ans.format = magic
|
||||
required_num_fields := 3
|
||||
switch magic {
|
||||
case "P1", "P4":
|
||||
ans.data_type = blackwhite
|
||||
ans.num_channels = 1
|
||||
ans.maxval = 1
|
||||
required_num_fields = 2
|
||||
case "P2", "P5":
|
||||
ans.data_type = grayscale
|
||||
ans.num_channels = 1
|
||||
default:
|
||||
ans.data_type = rgb
|
||||
ans.num_channels = 3
|
||||
}
|
||||
var fields []uint
|
||||
for len(fields) < required_num_fields {
|
||||
var line string
|
||||
if line, err = skip_comments(br); err != nil {
|
||||
return
|
||||
}
|
||||
for x := range strings.FieldsSeq(line) {
|
||||
var val uint64
|
||||
if val, err = strconv.ParseUint(x, 10, 0); err != nil {
|
||||
return
|
||||
}
|
||||
fields = append(fields, uint(val))
|
||||
}
|
||||
}
|
||||
ans.width = fields[0]
|
||||
ans.height = fields[1]
|
||||
if required_num_fields > 2 {
|
||||
ans.maxval = uint32(fields[2])
|
||||
}
|
||||
if ans.maxval > 65535 {
|
||||
return ans, fmt.Errorf("header specifies a maximum value %d larger than 65535", ans.maxval)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func read_pam_header(br *bufio.Reader) (ans header, err error) {
|
||||
ans.format = "P7"
|
||||
ans.data_type = rgb
|
||||
ans.num_channels = 3
|
||||
for {
|
||||
line, err := skip_comments(br)
|
||||
if err != nil {
|
||||
return ans, err
|
||||
}
|
||||
if line == "ENDHDR" {
|
||||
break
|
||||
}
|
||||
prefix, payload, found := strings.Cut(line, " ")
|
||||
if !found {
|
||||
return ans, fmt.Errorf("invalid line in header: %#v", line)
|
||||
}
|
||||
switch prefix {
|
||||
case "WIDTH":
|
||||
w, err := strconv.ParseUint(payload, 10, 0)
|
||||
if err != nil {
|
||||
return ans, fmt.Errorf("invalid width %#v in header: %w", payload, err)
|
||||
}
|
||||
ans.width = uint(w)
|
||||
case "HEIGHT":
|
||||
w, err := strconv.ParseUint(payload, 10, 0)
|
||||
if err != nil {
|
||||
return ans, fmt.Errorf("invalid height %#v in header: %w", payload, err)
|
||||
}
|
||||
ans.height = uint(w)
|
||||
case "MAXVAL":
|
||||
w, err := strconv.ParseUint(payload, 10, 0)
|
||||
if err != nil {
|
||||
return ans, fmt.Errorf("invalid maxval %#v in header: %w", payload, err)
|
||||
}
|
||||
ans.maxval = uint32(w)
|
||||
case "DEPTH":
|
||||
w, err := strconv.ParseUint(payload, 10, 0)
|
||||
if err != nil {
|
||||
return ans, fmt.Errorf("invalid depth %#v in header: %w", payload, err)
|
||||
}
|
||||
if w == 0 || w > 4 {
|
||||
return ans, fmt.Errorf("invalid depth %d in header", w)
|
||||
}
|
||||
ans.num_channels = uint(w)
|
||||
case "TUPLTYPE":
|
||||
switch payload {
|
||||
case "BLACKANDWHITE":
|
||||
ans.data_type = blackwhite
|
||||
case "BLACKANDWHITE_ALPHA":
|
||||
ans.has_alpha = true
|
||||
ans.data_type = blackwhite
|
||||
case "GRAYSCALE":
|
||||
ans.data_type = grayscale
|
||||
case "GRAYSCALE_ALPHA":
|
||||
ans.has_alpha = true
|
||||
ans.data_type = grayscale
|
||||
case "RGB":
|
||||
case "RGB_ALPHA":
|
||||
ans.has_alpha = true
|
||||
default:
|
||||
return ans, fmt.Errorf("invalid TUPLTYPE in header: %#v", payload)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ans.width == 0 || ans.height == 0 || ans.maxval == 0 {
|
||||
return ans, fmt.Errorf("header does not specify width, height and maximum value")
|
||||
}
|
||||
ok := true
|
||||
switch ans.data_type {
|
||||
case rgb:
|
||||
ok = (!ans.has_alpha && ans.num_channels == 3) || (ans.has_alpha && ans.num_channels == 4)
|
||||
case blackwhite, grayscale:
|
||||
ok = (!ans.has_alpha && ans.num_channels == 1) || (ans.has_alpha && ans.num_channels == 2)
|
||||
}
|
||||
if !ok {
|
||||
return ans, fmt.Errorf("header specified depth: %d does not match TUPLTYPE", ans.num_channels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func read_header(br *bufio.Reader) (ans header, err error) {
|
||||
b := []byte{0, 0}
|
||||
if _, err = io.ReadFull(br, b); err != nil {
|
||||
return ans, err
|
||||
}
|
||||
magic := string(b)
|
||||
switch magic {
|
||||
case "P1", "P2", "P3", "P4", "P5", "P6":
|
||||
return read_ppm_header(br, magic)
|
||||
case "P7":
|
||||
return read_pam_header(br)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported netPBM format: %#v", magic)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func ascii_range_over_values(br *bufio.Reader, h header, callback func(uint32, []uint8) []uint8) (ans []uint8, err error) {
|
||||
anssz := h.width * h.height * h.num_bytes_per_pixel()
|
||||
ans = make([]uint8, 0, anssz)
|
||||
for uint(len(ans)) < anssz {
|
||||
token, err := br.ReadString(' ')
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
for field := range strings.FieldsSeq(token) {
|
||||
if val, perr := strconv.ParseUint(field, 10, 16); perr == nil {
|
||||
ans = callback(uint32(val), ans)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func decode_rgb_ascii(br *bufio.Reader, h header) (ans []byte, err error) {
|
||||
mult := uint32(255)
|
||||
if h.maxval > 255 {
|
||||
mult = 65535
|
||||
}
|
||||
anssz := h.width * h.height * h.num_bytes_per_pixel()
|
||||
if mult == 255 {
|
||||
ans, err = ascii_range_over_values(br, h, func(val uint32, ans []uint8) []uint8 {
|
||||
ch := (uint32(val) * mult) / h.maxval
|
||||
return append(ans, uint8(ch))
|
||||
})
|
||||
} else {
|
||||
ans, err = ascii_range_over_values(br, h, func(val uint32, ans []uint8) []uint8 {
|
||||
ch := (uint32(val) * mult) / h.maxval
|
||||
ans = append(ans, uint8(ch))
|
||||
if len(ans)%6 == 0 { // alpha is always 255
|
||||
ans = append(ans, 255, 255)
|
||||
}
|
||||
return ans
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if uint(len(ans)) < anssz {
|
||||
return nil, errors.New("insufficient color data present in PPM file")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func DecodeNetPBMConfig(r io.Reader) (cfg image.Config, err error) {
|
||||
br := bufio.NewReader(r)
|
||||
h, err := read_header(br)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Width = int(h.width)
|
||||
cfg.Height = int(h.height)
|
||||
cfg.ColorModel = NRGBModel
|
||||
switch h.data_type {
|
||||
case blackwhite, grayscale:
|
||||
if h.has_alpha {
|
||||
if h.maxval > 255 {
|
||||
cfg.ColorModel = color.NRGBA64Model
|
||||
} else {
|
||||
cfg.ColorModel = color.NRGBAModel
|
||||
}
|
||||
} else {
|
||||
if h.maxval > 255 {
|
||||
cfg.ColorModel = color.Gray16Model
|
||||
} else {
|
||||
cfg.ColorModel = color.GrayModel
|
||||
}
|
||||
}
|
||||
default:
|
||||
if h.has_alpha {
|
||||
if h.maxval > 255 {
|
||||
cfg.ColorModel = color.NRGBA64Model
|
||||
} else {
|
||||
cfg.ColorModel = color.NRGBAModel
|
||||
}
|
||||
} else {
|
||||
if h.maxval > 255 {
|
||||
cfg.ColorModel = color.NRGBA64Model
|
||||
} else {
|
||||
cfg.ColorModel = NRGBModel
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func decode_black_white_ascii(br *bufio.Reader, h header) (img image.Image, err error) {
|
||||
r := image.Rect(0, 0, int(h.width), int(h.height))
|
||||
g := &image.Gray{Stride: r.Dx(), Rect: r}
|
||||
g.Pix, err = ascii_range_over_values(br, h, func(val uint32, ans []uint8) []uint8 {
|
||||
var c uint8 = 255 * uint8(1-(val&1))
|
||||
return append(ans, c)
|
||||
})
|
||||
return g, err
|
||||
}
|
||||
|
||||
func decode_grayscale_ascii(br *bufio.Reader, h header) (img image.Image, err error) {
|
||||
r := image.Rect(0, 0, int(h.width), int(h.height))
|
||||
if h.maxval > 255 {
|
||||
g := &image.Gray16{Stride: 2 * r.Dx(), Rect: r}
|
||||
g.Pix, err = ascii_range_over_values(br, h, func(val uint32, ans []uint8) []uint8 {
|
||||
c := uint16(val * 65535 / h.maxval)
|
||||
return append(ans, uint8(c>>8), uint8(c))
|
||||
})
|
||||
return g, err
|
||||
} else {
|
||||
g := &image.Gray{Stride: r.Dx(), Rect: r}
|
||||
g.Pix, err = ascii_range_over_values(br, h, func(val uint32, ans []uint8) []uint8 {
|
||||
c := uint8(val * 255 / h.maxval)
|
||||
return append(ans, c)
|
||||
})
|
||||
return g, err
|
||||
}
|
||||
}
|
||||
|
||||
// Consume whitespace after header (per spec, it's a single whitespace, but can be more)
|
||||
func skip_whitespace_before_pixel_data(br *bufio.Reader, num_of_bytes_needed uint) ([]uint8, error) {
|
||||
for {
|
||||
b, err := br.Peek(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b[0] == '\n' || b[0] == '\r' || b[0] == '\t' || b[0] == ' ' {
|
||||
br.ReadByte()
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
ans := make([]byte, num_of_bytes_needed)
|
||||
_, err := io.ReadFull(br, ans)
|
||||
return ans, err
|
||||
}
|
||||
|
||||
func rescale(v uint32, num, den uint32) uint32 {
|
||||
return (v * num) / den
|
||||
}
|
||||
|
||||
func rescale_binary_data(b []uint8, num, den uint32) error {
|
||||
return run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
b[i] = uint8(rescale(uint32(b[i]), num, den))
|
||||
}
|
||||
}, 0, len(b))
|
||||
}
|
||||
|
||||
func rescale_binary_data16(b []uint8, num, den uint32) error {
|
||||
if len(b)&1 != 0 {
|
||||
return fmt.Errorf("pixel data is not a multiple of two but uses 16 bits per channel")
|
||||
}
|
||||
return run_in_parallel_over_range(0, func(start, end int) {
|
||||
start *= 2
|
||||
end *= 2
|
||||
for i := start; i < end; i += 2 {
|
||||
v := uint32((uint16(b[i]) << 8) | uint16(b[i+1]))
|
||||
v = rescale(v, num, den)
|
||||
b[i] = uint8(v >> 8)
|
||||
b[i+1] = uint8(v)
|
||||
}
|
||||
}, 0, len(b)/2)
|
||||
}
|
||||
|
||||
func decode_binary_data(br *bufio.Reader, h header) (ans image.Image, err error) {
|
||||
var binary_data []uint8
|
||||
if binary_data, err = skip_whitespace_before_pixel_data(br, h.width*h.height*h.num_bytes_per_pixel()); err != nil {
|
||||
return
|
||||
}
|
||||
if n := h.num_bytes_per_pixel() * h.width * h.height; uint(len(binary_data)) < n {
|
||||
return nil, fmt.Errorf(
|
||||
"insufficient pixel data for image area and num_channels (%d): %f < %d",
|
||||
h.num_channels, float64(len(binary_data))/float64(h.width*h.height), n/(h.width*h.height))
|
||||
}
|
||||
switch {
|
||||
case h.maxval < 255:
|
||||
if err = rescale_binary_data(binary_data, 255, h.maxval); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case 255 < h.maxval && h.maxval < 65535:
|
||||
if err = rescale_binary_data16(binary_data, 65535, h.maxval); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := image.Rect(0, 0, int(h.width), int(h.height))
|
||||
switch h.num_channels {
|
||||
case 1:
|
||||
// bw or gray without alpha
|
||||
if h.maxval > 255 {
|
||||
return &image.Gray16{Rect: r, Stride: r.Dx() * 2, Pix: binary_data}, nil
|
||||
}
|
||||
return &image.Gray{Rect: r, Stride: r.Dx(), Pix: binary_data}, nil
|
||||
case 2:
|
||||
// bw or gray with alpha
|
||||
if h.maxval > 255 {
|
||||
g := image.NewNRGBA64(r)
|
||||
b := g.Pix
|
||||
if err = run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
src := binary_data[i*4 : i*4+4]
|
||||
dest := b[i*8 : i*8+8]
|
||||
gray1, gray2 := src[0], src[1]
|
||||
dest[0], dest[1], dest[2], dest[3], dest[4], dest[5] = gray1, gray2, gray1, gray2, gray1, gray2
|
||||
dest[6], dest[7] = src[2], src[3]
|
||||
}
|
||||
}, 0, int(h.width*h.height)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
g := image.NewNRGBA(r)
|
||||
b := g.Pix
|
||||
if err = run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
src := binary_data[i*2 : i*2+2]
|
||||
dest := b[i*4 : i*4+4]
|
||||
dest[0], dest[1], dest[2], dest[3] = src[0], src[0], src[0], src[1]
|
||||
}
|
||||
}, 0, int(h.width*h.height)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g, nil
|
||||
case 3:
|
||||
// RGB without alpha
|
||||
if h.maxval > 255 {
|
||||
g := image.NewNRGBA64(r)
|
||||
b := g.Pix
|
||||
if err = run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
src := binary_data[i*6 : i*6+6]
|
||||
dest := b[i*8 : i*8+8]
|
||||
copy(dest[:6], src)
|
||||
dest[6], dest[7] = 255, 255
|
||||
}
|
||||
}, 0, int(h.width*h.height)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
return NewNRGBWithContiguousRGBPixels(binary_data, 0, 0, r.Dx(), r.Dy())
|
||||
case 4:
|
||||
// RGB with alpha
|
||||
if h.maxval <= 255 {
|
||||
return &image.NRGBA{Rect: r, Stride: r.Dx() * int(h.num_bytes_per_pixel()), Pix: binary_data}, nil
|
||||
}
|
||||
return &image.NRGBA64{Rect: r, Stride: r.Dx() * int(h.num_bytes_per_pixel()), Pix: binary_data}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported number of channels: %d", h.num_channels)
|
||||
}
|
||||
}
|
||||
|
||||
// Decode decodes a PPM image from r and returns it as an image.Image.
|
||||
// Supports both P3 (ASCII) and P6 (binary) variants.
|
||||
func DecodeNetPBM(r io.Reader) (img image.Image, err error) {
|
||||
br := bufio.NewReader(r)
|
||||
h, err := read_header(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var binary_data []uint8
|
||||
switch h.format {
|
||||
case "P1":
|
||||
return decode_black_white_ascii(br, h)
|
||||
case "P2":
|
||||
return decode_grayscale_ascii(br, h)
|
||||
case "P3":
|
||||
vals, err := decode_rgb_ascii(br, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.maxval <= 255 {
|
||||
return NewNRGBWithContiguousRGBPixels(vals, 0, 0, int(h.width), int(h.height))
|
||||
}
|
||||
return &image.NRGBA64{Pix: vals, Stride: int(h.width) * 8, Rect: image.Rect(0, 0, int(h.width), int(h.height))}, nil
|
||||
case "P4":
|
||||
bytes_per_row := (h.width + 7) / 8
|
||||
if binary_data, err = skip_whitespace_before_pixel_data(br, h.height*bytes_per_row); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans := image.NewGray(image.Rect(0, 0, int(h.width), int(h.height)))
|
||||
i := 0
|
||||
for range h.height {
|
||||
for x := range h.width {
|
||||
byteIdx := x / 8
|
||||
bitIdx := 7 - uint(x%8)
|
||||
bit := (binary_data[byteIdx] >> bitIdx) & 1
|
||||
ans.Pix[i] = (1 - bit) * 255
|
||||
i++
|
||||
}
|
||||
binary_data = binary_data[bytes_per_row:]
|
||||
}
|
||||
if len(binary_data) > 0 {
|
||||
return nil, fmt.Errorf("insufficient color data in netPBM file, need %d more bytes", len(binary_data))
|
||||
}
|
||||
return ans, nil
|
||||
case "P5", "P6", "P7":
|
||||
return decode_binary_data(br, h)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid format for PPM: %#v", h.format)
|
||||
}
|
||||
}
|
||||
|
||||
// Register this decoder with Go's image package
|
||||
func init() {
|
||||
image.RegisterFormat("pbm", "P1", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pgm", "P2", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("ppm", "P3", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pbm", "P4", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pgm", "P5", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("ppm", "P6", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pam", "P7", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
}
|
||||
440
vendor/github.com/kovidgoyal/imaging/nrgb.go
generated
vendored
Normal file
440
vendor/github.com/kovidgoyal/imaging/nrgb.go
generated
vendored
Normal file
@@ -0,0 +1,440 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type NRGBColor struct {
|
||||
R, G, B uint8
|
||||
}
|
||||
|
||||
func (c NRGBColor) AsSharp() string {
|
||||
return fmt.Sprintf("#%02X%02X%02X", c.R, c.G, c.B)
|
||||
}
|
||||
|
||||
func (c NRGBColor) String() string {
|
||||
return fmt.Sprintf("NRGBColor{%02X %02X %02X}", c.R, c.G, c.B)
|
||||
}
|
||||
|
||||
func (c NRGBColor) RGBA() (r, g, b, a uint32) {
|
||||
r = uint32(c.R)
|
||||
r |= r << 8
|
||||
g = uint32(c.G)
|
||||
g |= g << 8
|
||||
b = uint32(c.B)
|
||||
b |= b << 8
|
||||
a = 65535 // (255 << 8 | 255)
|
||||
return
|
||||
}
|
||||
|
||||
// NRGB is an in-memory image whose At method returns NRGBColor values.
|
||||
type NRGB struct {
|
||||
// Pix holds the image's pixels, in R, G, B order. The pixel at
|
||||
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*3].
|
||||
Pix []uint8
|
||||
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
|
||||
Stride int
|
||||
// Rect is the image's bounds.
|
||||
Rect image.Rectangle
|
||||
}
|
||||
|
||||
func nrgbModel(c color.Color) color.Color {
|
||||
if _, ok := c.(NRGBColor); ok {
|
||||
return c
|
||||
}
|
||||
r, g, b, a := c.RGBA()
|
||||
switch a {
|
||||
case 0xffff:
|
||||
return NRGBColor{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)}
|
||||
case 0:
|
||||
return NRGBColor{0, 0, 0}
|
||||
default:
|
||||
// Since Color.RGBA returns an alpha-premultiplied color, we should have r <= a && g <= a && b <= a.
|
||||
r = (r * 0xffff) / a
|
||||
g = (g * 0xffff) / a
|
||||
b = (b * 0xffff) / a
|
||||
return NRGBColor{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)}
|
||||
}
|
||||
}
|
||||
|
||||
var NRGBModel color.Model = color.ModelFunc(nrgbModel)
|
||||
|
||||
func (p *NRGB) ColorModel() color.Model { return NRGBModel }
|
||||
|
||||
func (p *NRGB) Bounds() image.Rectangle { return p.Rect }
|
||||
|
||||
func (p *NRGB) At(x, y int) color.Color {
|
||||
return p.NRGBAt(x, y)
|
||||
}
|
||||
|
||||
func (p *NRGB) NRGBAt(x, y int) NRGBColor {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return NRGBColor{}
|
||||
}
|
||||
i := p.PixOffset(x, y)
|
||||
s := p.Pix[i : i+3 : i+3] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
return NRGBColor{s[0], s[1], s[2]}
|
||||
}
|
||||
|
||||
// PixOffset returns the index of the first element of Pix that corresponds to
|
||||
// the pixel at (x, y).
|
||||
func (p *NRGB) PixOffset(x, y int) int {
|
||||
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3
|
||||
}
|
||||
|
||||
func (p *NRGB) Set(x, y int, c color.Color) {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return
|
||||
}
|
||||
i := p.PixOffset(x, y)
|
||||
c1 := NRGBModel.Convert(c).(NRGBColor)
|
||||
s := p.Pix[i : i+3 : i+3] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
s[0] = c1.R
|
||||
s[1] = c1.G
|
||||
s[2] = c1.B
|
||||
}
|
||||
|
||||
func (p *NRGB) SetRGBA64(x, y int, c color.RGBA64) {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return
|
||||
}
|
||||
r, g, b, a := uint32(c.R), uint32(c.G), uint32(c.B), uint32(c.A)
|
||||
if (a != 0) && (a != 0xffff) {
|
||||
r = (r * 0xffff) / a
|
||||
g = (g * 0xffff) / a
|
||||
b = (b * 0xffff) / a
|
||||
}
|
||||
i := p.PixOffset(x, y)
|
||||
s := p.Pix[i : i+3 : i+3] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
s[0] = uint8(r >> 8)
|
||||
s[1] = uint8(g >> 8)
|
||||
s[2] = uint8(b >> 8)
|
||||
}
|
||||
|
||||
func (p *NRGB) SetNRGBA(x, y int, c color.NRGBA) {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return
|
||||
}
|
||||
i := p.PixOffset(x, y)
|
||||
s := p.Pix[i : i+3 : i+3] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
s[0] = c.R
|
||||
s[1] = c.G
|
||||
s[2] = c.B
|
||||
}
|
||||
|
||||
// SubImage returns an image representing the portion of the image p visible
|
||||
// through r. The returned value shares pixels with the original image.
|
||||
func (p *NRGB) SubImage(r image.Rectangle) image.Image {
|
||||
r = r.Intersect(p.Rect)
|
||||
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
|
||||
// either r1 or r2 if the intersection is empty. Without explicitly checking for
|
||||
// this, the Pix[i:] expression below can panic.
|
||||
if r.Empty() {
|
||||
return &NRGB{}
|
||||
}
|
||||
i := p.PixOffset(r.Min.X, r.Min.Y)
|
||||
return &NRGB{
|
||||
Pix: p.Pix[i:],
|
||||
Stride: p.Stride,
|
||||
Rect: r,
|
||||
}
|
||||
}
|
||||
|
||||
// Opaque scans the entire image and reports whether it is fully opaque.
|
||||
func (p *NRGB) Opaque() bool { return true }
|
||||
|
||||
type scanner_rgb struct {
|
||||
image image.Image
|
||||
w, h int
|
||||
palette []NRGBColor
|
||||
opaque_base []float64
|
||||
opaque_base_uint []uint8
|
||||
}
|
||||
|
||||
func (s scanner_rgb) Bytes_per_channel() int { return 1 }
|
||||
func (s scanner_rgb) Num_of_channels() int { return 3 }
|
||||
func (s scanner_rgb) Bounds() image.Rectangle { return s.image.Bounds() }
|
||||
|
||||
func blend(dest []uint8, base []float64, r, g, b, a uint8) {
|
||||
alpha := float64(a) / 255.0
|
||||
dest[0] = uint8(alpha*float64(r) + (1.0-alpha)*base[0])
|
||||
dest[1] = uint8(alpha*float64(g) + (1.0-alpha)*base[1])
|
||||
dest[2] = uint8(alpha*float64(b) + (1.0-alpha)*base[2])
|
||||
}
|
||||
|
||||
func newScannerRGB(img image.Image, opaque_base NRGBColor) *scanner_rgb {
|
||||
s := &scanner_rgb{
|
||||
image: img, w: img.Bounds().Dx(), h: img.Bounds().Dy(),
|
||||
opaque_base: []float64{float64(opaque_base.R), float64(opaque_base.G), float64(opaque_base.B)}[0:3:3],
|
||||
opaque_base_uint: []uint8{opaque_base.R, opaque_base.G, opaque_base.B}[0:3:3],
|
||||
}
|
||||
if img, ok := img.(*image.Paletted); ok {
|
||||
s.palette = make([]NRGBColor, max(256, len(img.Palette)))
|
||||
d := [3]uint8{0, 0, 0}
|
||||
ds := d[:]
|
||||
for i := 0; i < len(img.Palette); i++ {
|
||||
r, g, b, a := img.Palette[i].RGBA()
|
||||
switch a {
|
||||
case 0:
|
||||
s.palette[i] = opaque_base
|
||||
case 0xffff:
|
||||
s.palette[i] = NRGBColor{R: uint8(r >> 8), G: uint8(g >> 8), B: uint8(b >> 8)}
|
||||
default:
|
||||
blend(ds, s.opaque_base, uint8((r*0xffff/a)>>8), uint8((g*0xffff/a)>>8), uint8((b*0xffff/a)>>8), uint8(a>>8))
|
||||
s.palette[i] = NRGBColor{R: d[0], G: d[1], B: d[2]}
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// scan scans the given rectangular region of the image into dst.
|
||||
func (s *scanner_rgb) Scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
switch img := s.image.(type) {
|
||||
case *image.NRGBA:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*4
|
||||
for x := x1; x < x2; x++ {
|
||||
blend(dst[j:j+3:j+3], s.opaque_base, img.Pix[i], img.Pix[i+1], img.Pix[i+2], img.Pix[i+3])
|
||||
j += 3
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
|
||||
case *image.NRGBA64:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*8
|
||||
for x := x1; x < x2; x++ {
|
||||
blend(dst[j:j+3:j+3], s.opaque_base, img.Pix[i], img.Pix[i+2], img.Pix[i+4], img.Pix[i+6])
|
||||
j += 3
|
||||
i += 8
|
||||
}
|
||||
}
|
||||
|
||||
case *image.RGBA:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*4
|
||||
for x := x1; x < x2; x++ {
|
||||
d := dst[j : j+3 : j+3]
|
||||
a := img.Pix[i+3]
|
||||
switch a {
|
||||
case 0:
|
||||
d[0] = s.opaque_base_uint[0]
|
||||
d[1] = s.opaque_base_uint[1]
|
||||
d[2] = s.opaque_base_uint[2]
|
||||
case 0xff:
|
||||
s := img.Pix[i : i+3 : i+3]
|
||||
d[0] = s[0]
|
||||
d[1] = s[1]
|
||||
d[2] = s[2]
|
||||
default:
|
||||
r16 := uint16(img.Pix[i])
|
||||
g16 := uint16(img.Pix[i+1])
|
||||
b16 := uint16(img.Pix[i+2])
|
||||
a16 := uint16(a)
|
||||
blend(d, s.opaque_base, uint8(r16*0xff/a16), uint8(g16*0xff/a16), uint8(b16*0xff/a16), a)
|
||||
}
|
||||
j += 3
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
|
||||
case *image.RGBA64:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*8
|
||||
for x := x1; x < x2; x++ {
|
||||
src := img.Pix[i : i+8 : i+8]
|
||||
d := dst[j : j+3 : j+3]
|
||||
a := src[6]
|
||||
switch a {
|
||||
case 0:
|
||||
d[0] = s.opaque_base_uint[0]
|
||||
d[1] = s.opaque_base_uint[1]
|
||||
d[2] = s.opaque_base_uint[2]
|
||||
case 0xff:
|
||||
d[0] = src[0]
|
||||
d[1] = src[2]
|
||||
d[2] = src[4]
|
||||
default:
|
||||
r32 := uint32(src[0])<<8 | uint32(src[1])
|
||||
g32 := uint32(src[2])<<8 | uint32(src[3])
|
||||
b32 := uint32(src[4])<<8 | uint32(src[5])
|
||||
a32 := uint32(src[6])<<8 | uint32(src[7])
|
||||
blend(d, s.opaque_base, uint8((r32*0xffff/a32)>>8), uint8((g32*0xffff/a32)>>8), uint8((b32*0xffff/a32)>>8), a)
|
||||
}
|
||||
j += 3
|
||||
i += 8
|
||||
}
|
||||
}
|
||||
|
||||
case *image.Gray:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1
|
||||
for x := x1; x < x2; x++ {
|
||||
c := img.Pix[i]
|
||||
d := dst[j : j+3 : j+3]
|
||||
d[0] = c
|
||||
d[1] = c
|
||||
d[2] = c
|
||||
j += 3
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
case *image.Gray16:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*2
|
||||
for x := x1; x < x2; x++ {
|
||||
c := img.Pix[i]
|
||||
d := dst[j : j+3 : j+3]
|
||||
d[0] = c
|
||||
d[1] = c
|
||||
d[2] = c
|
||||
j += 3
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
|
||||
case *image.YCbCr:
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
}
|
||||
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
}
|
||||
|
||||
yy1 := int32(img.Y[iy]) * 0x10101
|
||||
cb1 := int32(img.Cb[ic]) - 128
|
||||
cr1 := int32(img.Cr[ic]) - 128
|
||||
|
||||
r := yy1 + 91881*cr1
|
||||
if uint32(r)&0xff000000 == 0 {
|
||||
r >>= 16
|
||||
} else {
|
||||
r = ^(r >> 31)
|
||||
}
|
||||
|
||||
g := yy1 - 22554*cb1 - 46802*cr1
|
||||
if uint32(g)&0xff000000 == 0 {
|
||||
g >>= 16
|
||||
} else {
|
||||
g = ^(g >> 31)
|
||||
}
|
||||
|
||||
b := yy1 + 116130*cb1
|
||||
if uint32(b)&0xff000000 == 0 {
|
||||
b >>= 16
|
||||
} else {
|
||||
b = ^(b >> 31)
|
||||
}
|
||||
|
||||
d := dst[j : j+3 : j+3]
|
||||
d[0] = uint8(r)
|
||||
d[1] = uint8(g)
|
||||
d[2] = uint8(b)
|
||||
|
||||
iy++
|
||||
j += 3
|
||||
}
|
||||
}
|
||||
|
||||
case *image.Paletted:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1
|
||||
for x := x1; x < x2; x++ {
|
||||
c := s.palette[img.Pix[i]]
|
||||
d := dst[j : j+3 : j+3]
|
||||
d[0] = c.R
|
||||
d[1] = c.G
|
||||
d[2] = c.B
|
||||
j += 3
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
j := 0
|
||||
b := s.image.Bounds()
|
||||
x1 += b.Min.X
|
||||
x2 += b.Min.X
|
||||
y1 += b.Min.Y
|
||||
y2 += b.Min.Y
|
||||
for y := y1; y < y2; y++ {
|
||||
for x := x1; x < x2; x++ {
|
||||
r16, g16, b16, a16 := s.image.At(x, y).RGBA()
|
||||
d := dst[j : j+3 : j+3]
|
||||
switch a16 {
|
||||
case 0xffff:
|
||||
d[0] = uint8(r16 >> 8)
|
||||
d[1] = uint8(g16 >> 8)
|
||||
d[2] = uint8(b16 >> 8)
|
||||
case 0:
|
||||
d[0] = s.opaque_base_uint[0]
|
||||
d[1] = s.opaque_base_uint[1]
|
||||
d[2] = s.opaque_base_uint[2]
|
||||
default:
|
||||
blend(d, s.opaque_base, uint8(((r16*0xffff)/a16)>>8), uint8(((g16*0xffff)/a16)>>8), uint8(((b16*0xffff)/a16)>>8), uint8(a16>>8))
|
||||
}
|
||||
j += 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewNRGB(r image.Rectangle) *NRGB {
|
||||
return &NRGB{
|
||||
Pix: make([]uint8, 3*r.Dx()*r.Dy()),
|
||||
Stride: 3 * r.Dx(),
|
||||
Rect: r,
|
||||
}
|
||||
}
|
||||
|
||||
func NewNRGBWithContiguousRGBPixels(p []byte, left, top, width, height int) (*NRGB, error) {
|
||||
const bpp = 3
|
||||
if expected := bpp * width * height; expected != len(p) {
|
||||
return nil, fmt.Errorf("the image width and height dont match the size of the specified pixel data: width=%d height=%d sz=%d != %d", width, height, len(p), expected)
|
||||
}
|
||||
return &NRGB{
|
||||
Pix: p,
|
||||
Stride: bpp * width,
|
||||
Rect: image.Rectangle{image.Point{left, top}, image.Point{left + width, top + height}},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewNRGBScanner(source_image image.Image, opaque_base NRGBColor) Scanner {
|
||||
return newScannerRGB(source_image, opaque_base)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user