Merge pull request #6668 from owncloud/ainmosni/feature/frontend-tracing

Migrate frontend to new service tracing setup.
This commit is contained in:
Daniël Franke
2023-07-04 16:29:41 +02:00
committed by GitHub
70 changed files with 6958 additions and 496 deletions

View File

@@ -14,6 +14,7 @@ github.com/creack/pty v1.1.15/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/efficientgo/core v1.0.0-rc.0 h1:jJoA0N+C4/knWYVZ6GrdHOtDyrg8Y/TR4vFpTaqTsqs=
github.com/efficientgo/core v1.0.0-rc.0/go.mod h1:kQa0V74HNYMfuJH6jiPiwNdpWXl4xd/K4tzlrcvYDQI=
github.com/efficientgo/tools/core v0.0.0-20210201220623-8118984754c2 h1:GD19G/vhEa8amDJDBYcTaFXZjxKed67Ev0ZFPHdd/LQ=
github.com/efficientgo/tools/core v0.0.0-20210201220623-8118984754c2/go.mod h1:cFZoHUhKg31xkPnPjhPKFtevnx0Xcg67ptBRxbpaxtk=
github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ=

View File

@@ -69,6 +69,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -92,6 +93,7 @@ github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -99,6 +101,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -185,10 +188,13 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a h1:d4+I1YEKVmWZrgkt6jpXBnLgV2ZjO0YxEtLDdfIZfH4=
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw=
github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f h1:BNuUg9k2EiJmlMwjoef3e8vZLHplbVw6DrjGFjLL+Yo=
github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q=
github.com/jhump/protoreflect v1.9.1-0.20210817181203-db1a327a393e h1:Yb4fEGk+GtBSNuvy5rs0ZJt/jtopc/z9azQaj3xbies=
github.com/jhump/protoreflect v1.9.1-0.20210817181203-db1a327a393e/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg=
github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4 h1:E2CdxLXYSn6Zrj2+u8DWrwMJW3YZLSWtM/7kIL8OL18=
github.com/jhump/protoreflect v1.11.1-0.20220213155251-0c2aedc66cf4/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
@@ -198,6 +204,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@@ -223,6 +230,7 @@ github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM=
@@ -235,6 +243,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
@@ -246,6 +255,7 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -280,15 +290,18 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -370,6 +383,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210907225631-ff17edfbf26d h1:kuk8nKPQ25KCDODLCDXt99tnTVeOyOM8HGvtJ0NzAvw=
golang.org/x/net v0.0.0-20210907225631-ff17edfbf26d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -440,10 +454,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210908143011-c212e7322662 h1:2+M7sCYQcvlpag1ug05BCZa5B9jbazrHdgsOdwqlfE8=
golang.org/x/sys v0.0.0-20210908143011-c212e7322662/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -589,6 +605,7 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb h1:0m9wktIpOxGw+SSKmydXWB3Z3GTfcPP6+q75HCQa6HI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -613,6 +630,7 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0-dev.0.20210907181116-2f3355d2244e h1:HKKXKZmOaf1UtYn+/ga7+QSLvK7l6K5Mppj9yGgXYCo=
google.golang.org/grpc v1.41.0-dev.0.20210907181116-2f3355d2244e/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -629,6 +647,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=

4
go.mod
View File

@@ -13,7 +13,7 @@ require (
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/coreos/go-oidc/v3 v3.6.0
github.com/cs3org/go-cs3apis v0.0.0-20230516150832-730ac860c71d
github.com/cs3org/reva/v2 v2.14.1-0.20230623085734-919a9585f147
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806
github.com/disintegration/imaging v1.6.2
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
github.com/egirna/icap-client v0.1.1
@@ -336,4 +336,4 @@ require (
replace github.com/cs3org/go-cs3apis => github.com/2403905/go-cs3apis v0.0.0-20230517122726-727045414fd1
replace github.com/cs3org/reva/v2 => github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108
// replace github.com/cs3org/reva/v2 => github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108

4
go.sum
View File

@@ -625,6 +625,8 @@ github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo
github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4=
github.com/crewjam/saml v0.4.13 h1:TYHggH/hwP7eArqiXSJUvtOPNzQDyQ7vwmwEqlFWhMc=
github.com/crewjam/saml v0.4.13/go.mod h1:igEejV+fihTIlHXYP8zOec3V5A8y3lws5bQBFsTm4gA=
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806 h1:3fPvPnnZib/cMA4f0GXJvX7lhQs7O31ZmDuSDHxQnVk=
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806/go.mod h1:E32krZG159YflDSjDWfx/QGIC2529PS5LiPnGNHu3d0=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8 h1:Z9lwXumT5ACSmJ7WGnFl+OMLLjpz5uR2fyz7dC255FI=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8/go.mod h1:4abs/jPXcmJzYoYGF91JF9Uq9s/KL5n1jvFDix8KcqY=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -1274,8 +1276,6 @@ github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b h1:Q53idHrTuQD
github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b/go.mod h1:KirJrATYGbTyUwVR26xIkaipRqRcMRXBf8N5dacvGus=
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 h1:Z/i1e+gTZrmcGeZyWckaLfucYG6KYOXLWo4co8pZYNY=
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103/go.mod h1:o9YPB5aGP8ob35Vy6+vyq3P3bWe7NQWzf+JLiXCiMaE=
github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108 h1:nb3VTDiZ8AHDdaRjNAsKgbFH93SSq/en2Y6hbMRpX38=
github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108/go.mod h1:E32krZG159YflDSjDWfx/QGIC2529PS5LiPnGNHu3d0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.40/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=

View File

@@ -113,7 +113,7 @@ func check(c *cli.Context) error {
})
treeSize, err := walkTree(ctx, tree, lu, n, repairFlag)
treesizeFromMetadata, err := n.GetTreeSize()
treesizeFromMetadata, err := n.GetTreeSize(c.Context)
if err != nil {
fmt.Printf("failed to read treesize of node: %s: %s\n", n.ID, err)
}
@@ -123,14 +123,14 @@ func check(c *cli.Context) error {
if repairFlag {
fmt.Printf("Fixing tree size for node: %s. Calculated treesize: %d\n",
n.ID, treeSize)
n.SetTreeSize(treeSize)
n.SetTreeSize(c.Context, treeSize)
}
}
return nil
}
func walkTree(ctx context.Context, tree *tree.Tree, lu *lookup.Lookup, root *node.Node, repair bool) (uint64, error) {
if root.Type() != provider.ResourceType_RESOURCE_TYPE_CONTAINER {
if root.Type(ctx) != provider.ResourceType_RESOURCE_TYPE_CONTAINER {
return 0, errors.New("can't travers non-container nodes")
}
children, err := tree.ListFolder(ctx, root)
@@ -141,14 +141,14 @@ func walkTree(ctx context.Context, tree *tree.Tree, lu *lookup.Lookup, root *nod
var treesize uint64
for _, child := range children {
switch child.Type() {
switch child.Type(ctx) {
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
subtreesize, err := walkTree(ctx, tree, lu, child, repair)
if err != nil {
fmt.Printf("error calculating tree size of node: %s: %s\n", child.ID, err)
return 0, err
}
treesizeFromMetadata, err := child.GetTreeSize()
treesizeFromMetadata, err := child.GetTreeSize(ctx)
if err != nil {
fmt.Printf("failed to read tree size of node: %s: %s\n", child.ID, err)
return 0, err
@@ -163,19 +163,19 @@ func walkTree(ctx context.Context, tree *tree.Tree, lu *lookup.Lookup, root *nod
if repair {
fmt.Printf("Fixing tree size for node: %s. Calculated treesize: %d\n",
child.ID, subtreesize)
child.SetTreeSize(subtreesize)
child.SetTreeSize(ctx, subtreesize)
}
}
treesize += subtreesize
case provider.ResourceType_RESOURCE_TYPE_FILE:
blobsize, err := child.GetBlobSize()
blobsize, err := child.GetBlobSize(ctx)
if err != nil {
fmt.Printf("error reading blobsize of node: %s: %s\n", child.ID, err)
return 0, err
}
treesize += blobsize
default:
fmt.Printf("Ignoring type: %v, node: %s %s\n", child.Type(), child.Name, child.ID)
fmt.Printf("Ignoring type: %v, node: %s %s\n", child.Type(ctx), child.Name, child.ID)
}
}
@@ -215,7 +215,7 @@ func dumpCmd(cfg *config.Config) *cli.Command {
return err
}
attribs, err := backend.All(path)
attribs, err := backend.All(c.Context, path)
if err != nil {
fmt.Println("Error reading attributes")
return err
@@ -244,7 +244,7 @@ func getCmd(cfg *config.Config) *cli.Command {
return err
}
attribs, err := backend.All(path)
attribs, err := backend.All(c.Context, path)
if err != nil {
fmt.Println("Error reading attributes")
return err
@@ -297,7 +297,7 @@ func setCmd(cfg *config.Config) *cli.Command {
}
}
err = backend.Set(path, c.String("attribute"), []byte(v))
err = backend.Set(c.Context, path, c.String("attribute"), []byte(v))
if err != nil {
fmt.Println("Error setting attribute")
return err
@@ -359,7 +359,7 @@ func printAttribs(attribs map[string][]byte, onlyAttribute string) {
}
names := []string{}
for k, _ := range attribs {
for k := range attribs {
names = append(names, k)
}

View File

@@ -489,13 +489,13 @@ type Bundle struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // @gotags: yaml:"id"
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // @gotags: yaml:"name"
Type Bundle_Type `protobuf:"varint,3,opt,name=type,proto3,enum=ocis.messages.settings.v0.Bundle_Type" json:"type,omitempty"` // @gotags: yaml:"type"
Extension string `protobuf:"bytes,4,opt,name=extension,proto3" json:"extension,omitempty"` // @gotags: yaml:"extension"
DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` // @gotags: yaml:"display_name"
Settings []*Setting `protobuf:"bytes,6,rep,name=settings,proto3" json:"settings,omitempty"` // @gotags: yaml:"settings"
Resource *Resource `protobuf:"bytes,7,opt,name=resource,proto3" json:"resource,omitempty"` // @gotags: yaml:"resource"
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" yaml:"id"` // @gotags: yaml:"id"
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty" yaml:"name"` // @gotags: yaml:"name"
Type Bundle_Type `protobuf:"varint,3,opt,name=type,proto3,enum=ocis.messages.settings.v0.Bundle_Type" json:"type,omitempty" yaml:"type"` // @gotags: yaml:"type"
Extension string `protobuf:"bytes,4,opt,name=extension,proto3" json:"extension,omitempty" yaml:"extension"` // @gotags: yaml:"extension"
DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty" yaml:"display_name"` // @gotags: yaml:"display_name"
Settings []*Setting `protobuf:"bytes,6,rep,name=settings,proto3" json:"settings,omitempty" yaml:"settings"` // @gotags: yaml:"settings"
Resource *Resource `protobuf:"bytes,7,opt,name=resource,proto3" json:"resource,omitempty" yaml:"resource"` // @gotags: yaml:"resource"
}
func (x *Bundle) Reset() {
@@ -584,10 +584,10 @@ type Setting struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // @gotags: yaml:"id"
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // @gotags: yaml:"name"
DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` // @gotags: yaml:"display_name"
Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` // @gotags: yaml:"description"
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" yaml:"id"` // @gotags: yaml:"id"
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty" yaml:"name"` // @gotags: yaml:"name"
DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty" yaml:"display_name"` // @gotags: yaml:"display_name"
Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty" yaml:"description"` // @gotags: yaml:"description"
// Types that are assignable to Value:
//
// *Setting_IntValue
@@ -597,7 +597,7 @@ type Setting struct {
// *Setting_MultiChoiceValue
// *Setting_PermissionValue
Value isSetting_Value `protobuf_oneof:"value"`
Resource *Resource `protobuf:"bytes,11,opt,name=resource,proto3" json:"resource,omitempty"` // @gotags: yaml:"resource"
Resource *Resource `protobuf:"bytes,11,opt,name=resource,proto3" json:"resource,omitempty" yaml:"resource"` // @gotags: yaml:"resource"
}
func (x *Setting) Reset() {
@@ -721,27 +721,27 @@ type isSetting_Value interface {
}
type Setting_IntValue struct {
IntValue *Int `protobuf:"bytes,5,opt,name=int_value,json=intValue,proto3,oneof"` // @gotags: yaml:"int_value"
IntValue *Int `protobuf:"bytes,5,opt,name=int_value,json=intValue,proto3,oneof" yaml:"int_value"` // @gotags: yaml:"int_value"
}
type Setting_StringValue struct {
StringValue *String `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` // @gotags: yaml:"string_value"
StringValue *String `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof" yaml:"string_value"` // @gotags: yaml:"string_value"
}
type Setting_BoolValue struct {
BoolValue *Bool `protobuf:"bytes,7,opt,name=bool_value,json=boolValue,proto3,oneof"` // @gotags: yaml:"bool_value"
BoolValue *Bool `protobuf:"bytes,7,opt,name=bool_value,json=boolValue,proto3,oneof" yaml:"bool_value"` // @gotags: yaml:"bool_value"
}
type Setting_SingleChoiceValue struct {
SingleChoiceValue *SingleChoiceList `protobuf:"bytes,8,opt,name=single_choice_value,json=singleChoiceValue,proto3,oneof"` // @gotags: yaml:"single_choice_value"
SingleChoiceValue *SingleChoiceList `protobuf:"bytes,8,opt,name=single_choice_value,json=singleChoiceValue,proto3,oneof" yaml:"single_choice_value"` // @gotags: yaml:"single_choice_value"
}
type Setting_MultiChoiceValue struct {
MultiChoiceValue *MultiChoiceList `protobuf:"bytes,9,opt,name=multi_choice_value,json=multiChoiceValue,proto3,oneof"` // @gotags: yaml:"multi_choice_value"
MultiChoiceValue *MultiChoiceList `protobuf:"bytes,9,opt,name=multi_choice_value,json=multiChoiceValue,proto3,oneof" yaml:"multi_choice_value"` // @gotags: yaml:"multi_choice_value"
}
type Setting_PermissionValue struct {
PermissionValue *Permission `protobuf:"bytes,10,opt,name=permission_value,json=permissionValue,proto3,oneof"` // @gotags: yaml:"permission_value"
PermissionValue *Permission `protobuf:"bytes,10,opt,name=permission_value,json=permissionValue,proto3,oneof" yaml:"permission_value"` // @gotags: yaml:"permission_value"
}
func (*Setting_IntValue) isSetting_Value() {}
@@ -761,11 +761,11 @@ type Int struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Default int64 `protobuf:"varint,1,opt,name=default,proto3" json:"default,omitempty"` // @gotags: yaml:"default"
Min int64 `protobuf:"varint,2,opt,name=min,proto3" json:"min,omitempty"` // @gotags: yaml:"min"
Max int64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` // @gotags: yaml:"max"
Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` // @gotags: yaml:"step"
Placeholder string `protobuf:"bytes,5,opt,name=placeholder,proto3" json:"placeholder,omitempty"` // @gotags: yaml:"placeholder"
Default int64 `protobuf:"varint,1,opt,name=default,proto3" json:"default,omitempty" yaml:"default"` // @gotags: yaml:"default"
Min int64 `protobuf:"varint,2,opt,name=min,proto3" json:"min,omitempty" yaml:"min"` // @gotags: yaml:"min"
Max int64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty" yaml:"max"` // @gotags: yaml:"max"
Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty" yaml:"step"` // @gotags: yaml:"step"
Placeholder string `protobuf:"bytes,5,opt,name=placeholder,proto3" json:"placeholder,omitempty" yaml:"placeholder"` // @gotags: yaml:"placeholder"
}
func (x *Int) Reset() {
@@ -840,11 +840,11 @@ type String struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Default string `protobuf:"bytes,1,opt,name=default,proto3" json:"default,omitempty"` // @gotags: yaml:"default"
Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` // @gotags: yaml:"required"
MinLength int32 `protobuf:"varint,3,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` // @gotags: yaml:"min_length"
MaxLength int32 `protobuf:"varint,4,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` // @gotags: yaml:"max_length"
Placeholder string `protobuf:"bytes,5,opt,name=placeholder,proto3" json:"placeholder,omitempty"` // @gotags: yaml:"placeholder"
Default string `protobuf:"bytes,1,opt,name=default,proto3" json:"default,omitempty" yaml:"default"` // @gotags: yaml:"default"
Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty" yaml:"required"` // @gotags: yaml:"required"
MinLength int32 `protobuf:"varint,3,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty" yaml:"min_length"` // @gotags: yaml:"min_length"
MaxLength int32 `protobuf:"varint,4,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty" yaml:"max_length"` // @gotags: yaml:"max_length"
Placeholder string `protobuf:"bytes,5,opt,name=placeholder,proto3" json:"placeholder,omitempty" yaml:"placeholder"` // @gotags: yaml:"placeholder"
}
func (x *String) Reset() {
@@ -919,8 +919,8 @@ type Bool struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Default bool `protobuf:"varint,1,opt,name=default,proto3" json:"default,omitempty"` // @gotags: yaml:"default"
Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"` // @gotags: yaml:"label"
Default bool `protobuf:"varint,1,opt,name=default,proto3" json:"default,omitempty" yaml:"default"` // @gotags: yaml:"default"
Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty" yaml:"label"` // @gotags: yaml:"label"
}
func (x *Bool) Reset() {
@@ -974,7 +974,7 @@ type SingleChoiceList struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Options []*ListOption `protobuf:"bytes,1,rep,name=options,proto3" json:"options,omitempty"` // @gotags: yaml:"options"
Options []*ListOption `protobuf:"bytes,1,rep,name=options,proto3" json:"options,omitempty" yaml:"options"` // @gotags: yaml:"options"
}
func (x *SingleChoiceList) Reset() {
@@ -1021,7 +1021,7 @@ type MultiChoiceList struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Options []*ListOption `protobuf:"bytes,1,rep,name=options,proto3" json:"options,omitempty"` // @gotags: yaml:"options"
Options []*ListOption `protobuf:"bytes,1,rep,name=options,proto3" json:"options,omitempty" yaml:"options"` // @gotags: yaml:"options"
}
func (x *MultiChoiceList) Reset() {
@@ -1068,9 +1068,9 @@ type ListOption struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Value *ListOptionValue `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` // @gotags: yaml:"value"
Default bool `protobuf:"varint,2,opt,name=default,proto3" json:"default,omitempty"` // @gotags: yaml:"default"
DisplayValue string `protobuf:"bytes,3,opt,name=display_value,json=displayValue,proto3" json:"display_value,omitempty"` // @gotags: yaml:"display_value"
Value *ListOptionValue `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty" yaml:"value"` // @gotags: yaml:"value"
Default bool `protobuf:"varint,2,opt,name=default,proto3" json:"default,omitempty" yaml:"default"` // @gotags: yaml:"default"
DisplayValue string `protobuf:"bytes,3,opt,name=display_value,json=displayValue,proto3" json:"display_value,omitempty" yaml:"display_value"` // @gotags: yaml:"display_value"
}
func (x *ListOption) Reset() {
@@ -1131,8 +1131,8 @@ type Permission struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Operation Permission_Operation `protobuf:"varint,1,opt,name=operation,proto3,enum=ocis.messages.settings.v0.Permission_Operation" json:"operation,omitempty"` // @gotags: yaml:"operation"
Constraint Permission_Constraint `protobuf:"varint,2,opt,name=constraint,proto3,enum=ocis.messages.settings.v0.Permission_Constraint" json:"constraint,omitempty"` // @gotags: yaml:"constraint"
Operation Permission_Operation `protobuf:"varint,1,opt,name=operation,proto3,enum=ocis.messages.settings.v0.Permission_Operation" json:"operation,omitempty" yaml:"operation"` // @gotags: yaml:"operation"
Constraint Permission_Constraint `protobuf:"varint,2,opt,name=constraint,proto3,enum=ocis.messages.settings.v0.Permission_Constraint" json:"constraint,omitempty" yaml:"constraint"` // @gotags: yaml:"constraint"
}
func (x *Permission) Reset() {
@@ -1187,12 +1187,12 @@ type Value struct {
unknownFields protoimpl.UnknownFields
// id is the id of the Value. It is generated on saving it.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // @gotags: yaml:"id"
BundleId string `protobuf:"bytes,2,opt,name=bundle_id,json=bundleId,proto3" json:"bundle_id,omitempty"` // @gotags: yaml:"bundle_id"
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" yaml:"id"` // @gotags: yaml:"id"
BundleId string `protobuf:"bytes,2,opt,name=bundle_id,json=bundleId,proto3" json:"bundle_id,omitempty" yaml:"bundle_id"` // @gotags: yaml:"bundle_id"
// setting_id is the id of the setting from within its bundle.
SettingId string `protobuf:"bytes,3,opt,name=setting_id,json=settingId,proto3" json:"setting_id,omitempty"` // @gotags: yaml:"setting_id"
AccountUuid string `protobuf:"bytes,4,opt,name=account_uuid,json=accountUuid,proto3" json:"account_uuid,omitempty"` // @gotags: yaml:"account_uuid"
Resource *Resource `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"` // @gotags: yaml:"resource"
SettingId string `protobuf:"bytes,3,opt,name=setting_id,json=settingId,proto3" json:"setting_id,omitempty" yaml:"setting_id"` // @gotags: yaml:"setting_id"
AccountUuid string `protobuf:"bytes,4,opt,name=account_uuid,json=accountUuid,proto3" json:"account_uuid,omitempty" yaml:"account_uuid"` // @gotags: yaml:"account_uuid"
Resource *Resource `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty" yaml:"resource"` // @gotags: yaml:"resource"
// Types that are assignable to Value:
//
// *Value_BoolValue
@@ -1309,19 +1309,19 @@ type isValue_Value interface {
}
type Value_BoolValue struct {
BoolValue bool `protobuf:"varint,6,opt,name=bool_value,json=boolValue,proto3,oneof"` // @gotags: yaml:"bool_value"
BoolValue bool `protobuf:"varint,6,opt,name=bool_value,json=boolValue,proto3,oneof" yaml:"bool_value"` // @gotags: yaml:"bool_value"
}
type Value_IntValue struct {
IntValue int64 `protobuf:"varint,7,opt,name=int_value,json=intValue,proto3,oneof"` // @gotags: yaml:"int_value"
IntValue int64 `protobuf:"varint,7,opt,name=int_value,json=intValue,proto3,oneof" yaml:"int_value"` // @gotags: yaml:"int_value"
}
type Value_StringValue struct {
StringValue string `protobuf:"bytes,8,opt,name=string_value,json=stringValue,proto3,oneof"` // @gotags: yaml:"string_value"
StringValue string `protobuf:"bytes,8,opt,name=string_value,json=stringValue,proto3,oneof" yaml:"string_value"` // @gotags: yaml:"string_value"
}
type Value_ListValue struct {
ListValue *ListValue `protobuf:"bytes,9,opt,name=list_value,json=listValue,proto3,oneof"` // @gotags: yaml:"list_value"
ListValue *ListValue `protobuf:"bytes,9,opt,name=list_value,json=listValue,proto3,oneof" yaml:"list_value"` // @gotags: yaml:"list_value"
}
func (*Value_BoolValue) isValue_Value() {}
@@ -1337,7 +1337,7 @@ type ListValue struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Values []*ListOptionValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` // @gotags: yaml:"values"
Values []*ListOptionValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty" yaml:"values"` // @gotags: yaml:"values"
}
func (x *ListValue) Reset() {
@@ -1449,11 +1449,11 @@ type isListOptionValue_Option interface {
}
type ListOptionValue_StringValue struct {
StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` // @gotags: yaml:"string_value"
StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" yaml:"string_value"` // @gotags: yaml:"string_value"
}
type ListOptionValue_IntValue struct {
IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` // @gotags: yaml:"int_value"
IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" yaml:"int_value"` // @gotags: yaml:"int_value"
}
func (*ListOptionValue_StringValue) isListOptionValue_Option() {}

View File

@@ -18,7 +18,6 @@ import (
"github.com/owncloud/ocis/v2/services/frontend/pkg/logging"
"github.com/owncloud/ocis/v2/services/frontend/pkg/revaconfig"
"github.com/owncloud/ocis/v2/services/frontend/pkg/server/debug"
"github.com/owncloud/ocis/v2/services/frontend/pkg/tracing"
"github.com/urfave/cli/v2"
)
@@ -33,10 +32,10 @@ func Server(cfg *config.Config) *cli.Command {
},
Action: func(c *cli.Context) error {
logger := logging.Configure(cfg.Service.Name, cfg.Log)
err := tracing.Configure(cfg, logger)
if err != nil {
return err
}
// tracingProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name)
// if err != nil {
// return err
// }
gr := run.Group{}
ctx, cancel := defineContext(cfg)
@@ -54,6 +53,7 @@ func Server(cfg *config.Config) *cli.Command {
runtime.RunWithOptions(rCfg, pidFile,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
// runtime.WithTraceProvider(tracingProvider),
)
return nil
@@ -71,7 +71,6 @@ func Server(cfg *config.Config) *cli.Command {
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err

View File

@@ -55,12 +55,6 @@ type Config struct {
Supervised bool `yaml:"-"`
Context context.Context `yaml:"-"`
}
type Tracing struct {
Enabled bool `yaml:"enabled" env:"OCIS_TRACING_ENABLED;FRONTEND_TRACING_ENABLED" desc:"Activates tracing."`
Type string `yaml:"type" env:"OCIS_TRACING_TYPE;FRONTEND_TRACING_TYPE" desc:"The type of tracing. Defaults to \"\", which is the same as \"jaeger\". Allowed tracing types are \"jaeger\" and \"\" as of now."`
Endpoint string `yaml:"endpoint" env:"OCIS_TRACING_ENDPOINT;FRONTEND_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent."`
Collector string `yaml:"collector" env:"OCIS_TRACING_COLLECTOR;FRONTEND_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset."`
}
type Log struct {
Level string `yaml:"level" env:"OCIS_LOG_LEVEL;FRONTEND_LOG_LEVEL" desc:"The log level. Valid values are: \"panic\", \"fatal\", \"error\", \"warn\", \"info\", \"debug\", \"trace\"."`

View File

@@ -0,0 +1,21 @@
package config
import "github.com/owncloud/ocis/v2/ocis-pkg/tracing"
// Tracing sets the tracing parameters for the frontend service.
type Tracing struct {
Enabled bool `yaml:"enabled" env:"OCIS_TRACING_ENABLED;FRONTEND_TRACING_ENABLED" desc:"Activates tracing."`
Type string `yaml:"type" env:"OCIS_TRACING_TYPE;FRONTEND_TRACING_TYPE" desc:"The type of tracing. Defaults to '', which is the same as 'jaeger'. Allowed tracing types are 'jaeger' and '' as of now."`
Endpoint string `yaml:"endpoint" env:"OCIS_TRACING_ENDPOINT;FRONTEND_TRACING_ENDPOINT" desc:"The endpoint of the tracing agent."`
Collector string `yaml:"collector" env:"OCIS_TRACING_COLLECTOR;FRONTEND_TRACING_COLLECTOR" desc:"The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. Only used if the tracing endpoint is unset."`
}
// Convert Tracing to the tracing package's Config struct.
func (t Tracing) Convert() tracing.Config {
return tracing.Config{
Enabled: t.Enabled,
Type: t.Type,
Endpoint: t.Endpoint,
Collector: t.Collector,
}
}

View File

@@ -12,7 +12,6 @@ import (
// FrontendConfigFromStruct will adapt an oCIS config struct into a reva mapstructure to start a reva service.
func FrontendConfigFromStruct(cfg *config.Config) (map[string]interface{}, error) {
webURL, err := url.Parse(cfg.PublicURL)
if err != nil {
return nil, err
@@ -71,13 +70,6 @@ func FrontendConfigFromStruct(cfg *config.Config) (map[string]interface{}, error
}
return map[string]interface{}{
"core": map[string]interface{}{
"tracing_enabled": cfg.Tracing.Enabled,
"tracing_exporter": cfg.Tracing.Type,
"tracing_endpoint": cfg.Tracing.Endpoint,
"tracing_collector": cfg.Tracing.Collector,
"tracing_service_name": cfg.Service.Name,
},
"shared": map[string]interface{}{
"jwt_secret": cfg.TokenManager.JWTSecret,
"gatewaysvc": cfg.Reva.Address, // Todo or address?

View File

@@ -1,18 +0,0 @@
package tracing
import (
"github.com/owncloud/ocis/v2/ocis-pkg/log"
"github.com/owncloud/ocis/v2/ocis-pkg/tracing"
"github.com/owncloud/ocis/v2/services/frontend/pkg/config"
"go.opentelemetry.io/otel/trace"
)
var (
// TraceProvider is the global trace provider for the service.
TraceProvider = trace.NewNoopTracerProvider()
)
func Configure(cfg *config.Config, logger log.Logger) error {
tracing.Configure(cfg.Tracing.Enabled, cfg.Tracing.Type, logger)
return nil
}

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.14.1. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
@@ -22,13 +22,16 @@ func (_m *Extractor) Extract(ctx context.Context, ri *providerv1beta1.ResourceIn
ret := _m.Called(ctx, ri)
var r0 content.Document
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta1.ResourceInfo) (content.Document, error)); ok {
return rf(ctx, ri)
}
if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta1.ResourceInfo) content.Document); ok {
r0 = rf(ctx, ri)
} else {
r0 = ret.Get(0).(content.Document)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta1.ResourceInfo) error); ok {
r1 = rf(ctx, ri)
} else {

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.14.1. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
@@ -21,6 +21,10 @@ func (_m *Retriever) Retrieve(ctx context.Context, rID *providerv1beta1.Resource
ret := _m.Called(ctx, rID)
var r0 io.ReadCloser
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta1.ResourceId) (io.ReadCloser, error)); ok {
return rf(ctx, rID)
}
if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta1.ResourceId) io.ReadCloser); ok {
r0 = rf(ctx, rID)
} else {
@@ -29,7 +33,6 @@ func (_m *Retriever) Retrieve(ctx context.Context, rID *providerv1beta1.Resource
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta1.ResourceId) error); ok {
r1 = rf(ctx, rID)
} else {

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.14.1. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
@@ -35,13 +35,16 @@ func (_m *Engine) DocCount() (uint64, error) {
ret := _m.Called()
var r0 uint64
var r1 error
if rf, ok := ret.Get(0).(func() (uint64, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
@@ -98,6 +101,10 @@ func (_m *Engine) Search(ctx context.Context, req *v0.SearchIndexRequest) (*v0.S
ret := _m.Called(ctx, req)
var r0 *v0.SearchIndexResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *v0.SearchIndexRequest) (*v0.SearchIndexResponse, error)); ok {
return rf(ctx, req)
}
if rf, ok := ret.Get(0).(func(context.Context, *v0.SearchIndexRequest) *v0.SearchIndexResponse); ok {
r0 = rf(ctx, req)
} else {
@@ -106,7 +113,6 @@ func (_m *Engine) Search(ctx context.Context, req *v0.SearchIndexRequest) (*v0.S
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v0.SearchIndexRequest) error); ok {
r1 = rf(ctx, req)
} else {

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.14.1. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
@@ -47,6 +47,10 @@ func (_m *Searcher) Search(ctx context.Context, req *v0.SearchRequest) (*v0.Sear
ret := _m.Called(ctx, req)
var r0 *v0.SearchResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *v0.SearchRequest) (*v0.SearchResponse, error)); ok {
return rf(ctx, req)
}
if rf, ok := ret.Get(0).(func(context.Context, *v0.SearchRequest) *v0.SearchResponse); ok {
r0 = rf(ctx, req)
} else {
@@ -55,7 +59,6 @@ func (_m *Searcher) Search(ctx context.Context, req *v0.SearchRequest) (*v0.Sear
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v0.SearchRequest) error); ok {
r1 = rf(ctx, req)
} else {

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.14.1. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
@@ -54,13 +54,16 @@ func (_m *Client) Compare(dn string, attribute string, value string) (bool, erro
ret := _m.Called(dn, attribute, value)
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(string, string, string) (bool, error)); ok {
return rf(dn, attribute, value)
}
if rf, ok := ret.Get(0).(func(string, string, string) bool); ok {
r0 = rf(dn, attribute, value)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string, string) error); ok {
r1 = rf(dn, attribute, value)
} else {
@@ -145,6 +148,10 @@ func (_m *Client) ModifyWithResult(_a0 *ldap.ModifyRequest) (*ldap.ModifyResult,
ret := _m.Called(_a0)
var r0 *ldap.ModifyResult
var r1 error
if rf, ok := ret.Get(0).(func(*ldap.ModifyRequest) (*ldap.ModifyResult, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(*ldap.ModifyRequest) *ldap.ModifyResult); ok {
r0 = rf(_a0)
} else {
@@ -153,7 +160,6 @@ func (_m *Client) ModifyWithResult(_a0 *ldap.ModifyRequest) (*ldap.ModifyResult,
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*ldap.ModifyRequest) error); ok {
r1 = rf(_a0)
} else {
@@ -182,6 +188,10 @@ func (_m *Client) PasswordModify(_a0 *ldap.PasswordModifyRequest) (*ldap.Passwor
ret := _m.Called(_a0)
var r0 *ldap.PasswordModifyResult
var r1 error
if rf, ok := ret.Get(0).(func(*ldap.PasswordModifyRequest) (*ldap.PasswordModifyResult, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(*ldap.PasswordModifyRequest) *ldap.PasswordModifyResult); ok {
r0 = rf(_a0)
} else {
@@ -190,7 +200,6 @@ func (_m *Client) PasswordModify(_a0 *ldap.PasswordModifyRequest) (*ldap.Passwor
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*ldap.PasswordModifyRequest) error); ok {
r1 = rf(_a0)
} else {
@@ -205,6 +214,10 @@ func (_m *Client) Search(_a0 *ldap.SearchRequest) (*ldap.SearchResult, error) {
ret := _m.Called(_a0)
var r0 *ldap.SearchResult
var r1 error
if rf, ok := ret.Get(0).(func(*ldap.SearchRequest) (*ldap.SearchResult, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(*ldap.SearchRequest) *ldap.SearchResult); ok {
r0 = rf(_a0)
} else {
@@ -213,7 +226,6 @@ func (_m *Client) Search(_a0 *ldap.SearchRequest) (*ldap.SearchResult, error) {
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*ldap.SearchRequest) error); ok {
r1 = rf(_a0)
} else {
@@ -228,6 +240,10 @@ func (_m *Client) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize
ret := _m.Called(searchRequest, pagingSize)
var r0 *ldap.SearchResult
var r1 error
if rf, ok := ret.Get(0).(func(*ldap.SearchRequest, uint32) (*ldap.SearchResult, error)); ok {
return rf(searchRequest, pagingSize)
}
if rf, ok := ret.Get(0).(func(*ldap.SearchRequest, uint32) *ldap.SearchResult); ok {
r0 = rf(searchRequest, pagingSize)
} else {
@@ -236,7 +252,6 @@ func (_m *Client) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*ldap.SearchRequest, uint32) error); ok {
r1 = rf(searchRequest, pagingSize)
} else {
@@ -256,6 +271,10 @@ func (_m *Client) SimpleBind(_a0 *ldap.SimpleBindRequest) (*ldap.SimpleBindResul
ret := _m.Called(_a0)
var r0 *ldap.SimpleBindResult
var r1 error
if rf, ok := ret.Get(0).(func(*ldap.SimpleBindRequest) (*ldap.SimpleBindResult, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(*ldap.SimpleBindRequest) *ldap.SimpleBindResult); ok {
r0 = rf(_a0)
} else {
@@ -264,7 +283,6 @@ func (_m *Client) SimpleBind(_a0 *ldap.SimpleBindRequest) (*ldap.SimpleBindResul
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*ldap.SimpleBindRequest) error); ok {
r1 = rf(_a0)
} else {
@@ -298,13 +316,16 @@ func (_m *Client) TLSConnectionState() (tls.ConnectionState, bool) {
ret := _m.Called()
var r0 tls.ConnectionState
var r1 bool
if rf, ok := ret.Get(0).(func() (tls.ConnectionState, bool)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() tls.ConnectionState); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(tls.ConnectionState)
}
var r1 bool
if rf, ok := ret.Get(1).(func() bool); ok {
r1 = rf()
} else {

View File

@@ -21,6 +21,7 @@ package runtime
import (
"github.com/rs/zerolog"
"go-micro.dev/v4/registry"
"go.opentelemetry.io/otel/trace"
)
// Option defines a single option function.
@@ -28,8 +29,9 @@ type Option func(o *Options)
// Options defines the available options for this package.
type Options struct {
Logger *zerolog.Logger
Registry registry.Registry
Logger *zerolog.Logger
Registry registry.Registry
TraceProvider trace.TracerProvider
}
// newOptions initializes the available default options.
@@ -56,3 +58,10 @@ func WithRegistry(r registry.Registry) Option {
o.Registry = r
}
}
// WithTraceProvider provides a function to set the trace provider.
func WithTraceProvider(tp trace.TracerProvider) Option {
return func(o *Options) {
o.TraceProvider = tp
}
}

View File

@@ -58,7 +58,7 @@ func RunWithOptions(mainConf map[string]interface{}, pidFile string, opts ...Opt
panic(err)
}
run(mainConf, coreConf, options.Logger, pidFile)
run(mainConf, coreConf, options.Logger, options.TraceProvider, pidFile)
}
type coreConf struct {
@@ -74,11 +74,21 @@ type coreConf struct {
TracingService string `mapstructure:"tracing_service"`
}
func run(mainConf map[string]interface{}, coreConf *coreConf, logger *zerolog.Logger, filename string) {
func run(
mainConf map[string]interface{},
coreConf *coreConf,
logger *zerolog.Logger,
tp trace.TracerProvider,
filename string,
) {
host, _ := os.Hostname()
logger.Info().Msgf("host info: %s", host)
tp := initTracing(coreConf)
// Only initialise tracing if we didn't get a tracer provider.
if tp == nil {
logger.Debug().Msg("No pre-existing tracer given, initializing tracing")
tp = initTracing(coreConf)
}
initCPUCount(coreConf, logger)
servers := initServers(mainConf, logger, tp)
@@ -241,7 +251,7 @@ func getWriter(out string) (io.Writer, error) {
return os.Stdout, nil
}
fd, err := os.OpenFile(out, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
fd, err := os.OpenFile(out, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
err = errors.Wrap(err, "error creating log file: "+out)
return nil, err

View File

@@ -24,7 +24,7 @@ import (
"github.com/cs3org/reva/v2/pkg/appctx"
"github.com/rs/zerolog"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
)

View File

@@ -38,7 +38,7 @@ import (
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"

View File

@@ -45,7 +45,7 @@ func (s *svc) CreatePublicShare(ctx context.Context, req *link.CreatePublicShare
}
if res.GetShare() != nil {
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
}
return res, nil
}
@@ -63,7 +63,7 @@ func (s *svc) RemovePublicShare(ctx context.Context, req *link.RemovePublicShare
return nil, err
}
// TODO: How to find out the resourceId? -> get public share first, then delete
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), nil)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), nil)
return res, nil
}
@@ -142,7 +142,7 @@ func (s *svc) UpdatePublicShare(ctx context.Context, req *link.UpdatePublicShare
return nil, errors.Wrap(err, "error updating share")
}
if res.GetShare() != nil {
s.statCache.RemoveStat(
s.statCache.RemoveStatContext(ctx,
&userprovider.UserId{
OpaqueId: res.Share.Owner.GetOpaqueId(),
},

View File

@@ -326,7 +326,7 @@ func (s *svc) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorag
if res.Status.Code == rpc.Code_CODE_OK {
id := res.StorageSpace.Root
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.providerCache.RemoveListStorageProviders(id)
}
return res, nil
@@ -363,7 +363,7 @@ func (s *svc) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorag
}
id := &provider.ResourceId{OpaqueId: req.Id.OpaqueId}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.providerCache.RemoveListStorageProviders(id)
if dsRes.Status.Code != rpc.Code_CODE_OK {
@@ -608,7 +608,7 @@ func (s *svc) InitiateFileUpload(ctx context.Context, req *provider.InitiateFile
}
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return &gateway.InitiateFileUploadResponse{
Opaque: storageRes.Opaque,
Status: storageRes.Status,
@@ -645,7 +645,7 @@ func (s *svc) CreateContainer(ctx context.Context, req *provider.CreateContainer
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -688,7 +688,7 @@ func (s *svc) Delete(ctx context.Context, req *provider.DeleteRequest) (*provide
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -715,8 +715,8 @@ func (s *svc) Move(ctx context.Context, req *provider.MoveRequest) (*provider.Mo
req.Source = sref
req.Destination = dref
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Source.ResourceId)
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Destination.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Source.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Destination.ResourceId)
return c.Move(ctx, req)
}
@@ -739,7 +739,7 @@ func (s *svc) SetArbitraryMetadata(ctx context.Context, req *provider.SetArbitra
return nil, errors.Wrap(err, "gateway: error calling SetArbitraryMetadata")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -761,7 +761,7 @@ func (s *svc) UnsetArbitraryMetadata(ctx context.Context, req *provider.UnsetArb
}
return nil, errors.Wrap(err, "gateway: error calling UnsetArbitraryMetadata")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -785,7 +785,7 @@ func (s *svc) SetLock(ctx context.Context, req *provider.SetLockRequest) (*provi
return nil, errors.Wrap(err, "gateway: error calling SetLock")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -826,7 +826,7 @@ func (s *svc) RefreshLock(ctx context.Context, req *provider.RefreshLockRequest)
return nil, errors.Wrap(err, "gateway: error calling RefreshLock")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -847,7 +847,7 @@ func (s *svc) Unlock(ctx context.Context, req *provider.UnlockRequest) (*provide
return nil, errors.Wrap(err, "gateway: error calling Unlock")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -927,7 +927,7 @@ func (s *svc) RestoreFileVersion(ctx context.Context, req *provider.RestoreFileV
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -983,7 +983,7 @@ func (s *svc) RestoreRecycleItem(ctx context.Context, req *provider.RestoreRecyc
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -1006,7 +1006,7 @@ func (s *svc) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleReques
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}

View File

@@ -130,7 +130,7 @@ func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareReq
}
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
return res, nil
}
@@ -213,7 +213,7 @@ func (s *svc) UpdateReceivedShare(ctx context.Context, req *collaboration.Update
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Share.Share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Share.Share.ResourceId)
return c.UpdateReceivedShare(ctx, req)
/*
TODO: Leftover from master merge. Do we need this?
@@ -504,7 +504,7 @@ func (s *svc) addShare(ctx context.Context, req *collaboration.CreateShareReques
switch status.Code {
case rpc.Code_CODE_OK:
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
case rpc.Code_CODE_UNIMPLEMENTED:
appctx.GetLogger(ctx).Debug().Interface("status", status).Interface("req", req).Msg("storing grants not supported, ignoring")
rollBackFn(status)
@@ -548,7 +548,7 @@ func (s *svc) addSpaceShare(ctx context.Context, req *collaboration.CreateShareR
switch st.Code {
case rpc.Code_CODE_OK:
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
s.providerCache.RemoveListStorageProviders(req.ResourceInfo.Id)
case rpc.Code_CODE_UNIMPLEMENTED:
appctx.GetLogger(ctx).Debug().Interface("status", st).Interface("req", req).Msg("storing grants not supported, ignoring")
@@ -618,7 +618,7 @@ func (s *svc) removeShare(ctx context.Context, req *collaboration.RemoveShareReq
}
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), share.ResourceId)
return res, nil
}
@@ -651,7 +651,7 @@ func (s *svc) removeSpaceShare(ctx context.Context, ref *provider.ResourceId, gr
Status: removeGrantStatus,
}, err
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), ref)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), ref)
s.providerCache.RemoveListStorageProviders(ref)
return &collaboration.RemoveShareResponse{Status: status.NewOK(ctx)}, nil
}

View File

@@ -662,13 +662,15 @@ func (s *service) CreateContainer(ctx context.Context, req *provider.CreateConta
func (s *service) TouchFile(ctx context.Context, req *provider.TouchFileRequest) (*provider.TouchFileResponse, error) {
// FIXME these should be part of the TouchFileRequest object
var mtime string
if req.Opaque != nil {
if e, ok := req.Opaque.Map["lockid"]; ok && e.Decoder == "plain" {
ctx = ctxpkg.ContextSetLockID(ctx, string(e.Value))
}
mtime = utils.ReadPlainFromOpaque(req.Opaque, "X-OC-Mtime")
}
err := s.storage.TouchFile(ctx, req.Ref, utils.ExistsInOpaque(req.Opaque, "markprocessing"))
err := s.storage.TouchFile(ctx, req.Ref, utils.ExistsInOpaque(req.Opaque, "markprocessing"), mtime)
return &provider.TouchFileResponse{
Status: status.NewStatusFromErrType(ctx, "touch file", err),

View File

@@ -48,7 +48,7 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/rs/zerolog"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc/metadata"
)

View File

@@ -36,8 +36,17 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"go.opentelemetry.io/otel"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
}
const (
// TokenTransportHeader holds the header key for the reva transfer token
TokenTransportHeader = "X-Reva-Transfer"
@@ -116,6 +125,14 @@ func (s *svc) Unprotected() []string {
func (s *svc) setHandler() {
s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, span := tracer.Start(ctx, "HandlerFunc")
defer span.End()
span.SetAttributes(
semconv.HTTPMethodKey.String(r.Method),
semconv.HTTPURLKey.String(r.URL.String()),
)
r = r.WithContext(ctx)
switch r.Method {
case "HEAD":
addCorsHeader(w)

View File

@@ -54,7 +54,7 @@ import (
"github.com/rs/zerolog"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/types/known/fieldmaskpb"
)

View File

@@ -40,6 +40,7 @@ import (
"github.com/cs3org/reva/v2/pkg/storagespace"
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/rs/zerolog"
"go.opentelemetry.io/otel/propagation"
)
func sufferMacOSFinder(r *http.Request) bool {
@@ -154,9 +155,17 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ
w.WriteHeader(http.StatusInternalServerError)
return
}
opaque := &typespb.Opaque{}
if mtime := r.Header.Get(net.HeaderOCMtime); mtime != "" {
utils.AppendPlainToOpaque(opaque, net.HeaderOCMtime, mtime)
// TODO: find a way to check if the storage really accepted the value
w.Header().Set(net.HeaderOCMtime, "accepted")
}
if length == 0 {
tfRes, err := client.TouchFile(ctx, &provider.TouchFileRequest{
Ref: ref,
Opaque: opaque,
Ref: ref,
})
if err != nil {
log.Error().Err(err).Msg("error sending grpc touch file request")
@@ -191,22 +200,7 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ
return
}
opaqueMap := map[string]*typespb.OpaqueEntry{
net.HeaderUploadLength: {
Decoder: "plain",
Value: []byte(strconv.FormatInt(length, 10)),
},
}
if mtime := r.Header.Get(net.HeaderOCMtime); mtime != "" {
opaqueMap[net.HeaderOCMtime] = &typespb.OpaqueEntry{
Decoder: "plain",
Value: []byte(mtime),
}
// TODO: find a way to check if the storage really accepted the value
w.Header().Set(net.HeaderOCMtime, "accepted")
}
utils.AppendPlainToOpaque(opaque, net.HeaderUploadLength, strconv.FormatInt(length, 10))
// curl -X PUT https://demo.owncloud.com/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef'
@@ -231,16 +225,13 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ
// we do not check the algorithm here, because it might depend on the storage
if len(cparts) == 2 {
// Translate into TUS style Upload-Checksum header
opaqueMap[net.HeaderUploadChecksum] = &typespb.OpaqueEntry{
Decoder: "plain",
// algorithm is always lowercase, checksum is separated by space
Value: []byte(strings.ToLower(cparts[0]) + " " + cparts[1]),
}
// algorithm is always lowercase, checksum is separated by space
utils.AppendPlainToOpaque(opaque, net.HeaderUploadChecksum, strings.ToLower(cparts[0])+" "+cparts[1])
}
uReq := &provider.InitiateFileUploadRequest{
Ref: ref,
Opaque: &typespb.Opaque{Map: opaqueMap},
Opaque: opaque,
}
if ifMatch := r.Header.Get(net.HeaderIfMatch); ifMatch != "" {
uReq.Options = &provider.InitiateFileUploadRequest_IfMatch{IfMatch: ifMatch}
@@ -312,6 +303,7 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))
httpReq.Header.Set(datagateway.TokenTransportHeader, token)
httpRes, err := s.client.Do(httpReq)

View File

@@ -42,6 +42,13 @@ import (
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/rs/zerolog"
tusd "github.com/tus/tusd/pkg/handler"
"go.opentelemetry.io/otel/propagation"
)
// Propagator ensures the importer module uses the same trace propagation strategy.
var Propagator = propagation.NewCompositeTextMapPropagator(
propagation.Baggage{},
propagation.TraceContext{},
)
func (s *svc) handlePathTusPost(w http.ResponseWriter, r *http.Request, ns string) {
@@ -253,6 +260,7 @@ func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http.
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))
httpReq.Header.Set(net.HeaderContentType, r.Header.Get(net.HeaderContentType))
httpReq.Header.Set(net.HeaderContentLength, r.Header.Get(net.HeaderContentLength))

View File

@@ -21,6 +21,7 @@
package datatx
import (
"context"
"net/http"
userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
@@ -55,5 +56,5 @@ func EmitFileUploadedEvent(spaceOwnerOrManager, executant *userv1beta1.UserId, r
// InvalidateCache is a helper function which invalidates the stat cache
func InvalidateCache(owner *userv1beta1.UserId, ref *provider.Reference, statCache cache.StatCache) {
statCache.RemoveStat(owner, ref.GetResourceId())
statCache.RemoveStatContext(context.TODO(), owner, ref.GetResourceId())
}

View File

@@ -19,6 +19,7 @@
package cache
import (
"context"
"fmt"
"strings"
"sync"
@@ -67,6 +68,7 @@ type Cache interface {
type StatCache interface {
Cache
RemoveStat(userID *userpb.UserId, res *provider.ResourceId)
RemoveStatContext(ctx context.Context, userID *userpb.UserId, res *provider.ResourceId)
GetKey(userID *userpb.UserId, ref *provider.Reference, metaDataKeys, fieldMaskPaths []string) string
}

View File

@@ -19,6 +19,7 @@
package cache
import (
"context"
"strings"
"sync"
"time"
@@ -26,8 +27,18 @@ import (
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"go-micro.dev/v4/store"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
}
// NewStatCache creates a new StatCache
func NewStatCache(store string, nodes []string, database, table string, ttl time.Duration, size int) StatCache {
c := statCache{}
@@ -42,12 +53,21 @@ type statCache struct {
cacheStore
}
// RemoveStat removes a reference from the stat cache
func (c statCache) RemoveStat(userID *userpb.UserId, res *provider.ResourceId) {
func (c statCache) RemoveStatContext(ctx context.Context, userID *userpb.UserId, res *provider.ResourceId) {
_, span := tracer.Start(ctx, "RemoveStatContext")
defer span.End()
span.SetAttributes(semconv.EnduserIDKey.String(userID.GetOpaqueId()))
uid := "uid:" + userID.GetOpaqueId()
sid := ""
oid := ""
if res != nil {
span.SetAttributes(
attribute.String("space.id", res.SpaceId),
attribute.String("node.id", res.OpaqueId),
)
sid = "sid:" + res.SpaceId
oid = "oid:" + res.OpaqueId
}
@@ -75,6 +95,11 @@ func (c statCache) RemoveStat(userID *userpb.UserId, res *provider.ResourceId) {
wg.Wait()
}
// RemoveStatContext(ctx, removes a reference from the stat cache
func (c statCache) RemoveStat(userID *userpb.UserId, res *provider.ResourceId) {
c.RemoveStatContext(context.Background(), userID, res)
}
// generates a user specific key pointing to ref - used for statcache
// a key looks like: uid:1234-1233!sid:5678-5677!oid:9923-9934!path:/path/to/source
// as you see it adds "uid:"/"sid:"/"oid:" prefixes to the uuids so they can be differentiated

View File

@@ -165,7 +165,7 @@ func (fs *cephfs) CreateDir(ctx context.Context, ref *provider.Reference) error
}
// TouchFile as defined in the storage.FS interface
func (fs *cephfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool) error {
func (fs *cephfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
return fmt.Errorf("unimplemented: TouchFile")
}

View File

@@ -266,7 +266,7 @@ func (nc *StorageDriver) CreateDir(ctx context.Context, ref *provider.Reference)
}
// TouchFile as defined in the storage.FS interface
func (nc *StorageDriver) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool) error {
func (nc *StorageDriver) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
return fmt.Errorf("unimplemented: TouchFile")
}

View File

@@ -794,7 +794,7 @@ func (fs *owncloudsqlfs) CreateDir(ctx context.Context, ref *provider.Reference)
}
// TouchFile as defined in the storage.FS interface
func (fs *owncloudsqlfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool) error {
func (fs *owncloudsqlfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
ip, err := fs.resolve(ctx, ref)
if err != nil {
return err
@@ -830,15 +830,25 @@ func (fs *owncloudsqlfs) TouchFile(ctx context.Context, ref *provider.Reference,
if err != nil {
return err
}
mtime := time.Now().Unix()
storageMtime := time.Now().Unix()
mt := storageMtime
if mtime != "" {
t, err := strconv.Atoi(mtime)
if err != nil {
log.Info().
Str("owncloudsql", ip).
Msg("error mtime conversion. mtine set to system time")
}
mt = time.Unix(int64(t), 0).Unix()
}
data := map[string]interface{}{
"path": fs.toDatabasePath(ip),
"etag": calcEtag(ctx, fi),
"mimetype": mime.Detect(false, ip),
"permissions": int(conversions.RoleFromResourcePermissions(parentPerms, false).OCSPermissions()), // inherit permissions of parent
"mtime": mtime,
"storage_mtime": mtime,
"mtime": mt,
"storage_mtime": storageMtime,
}
storageID, err := fs.getStorage(ctx, ip)
if err != nil {

View File

@@ -348,7 +348,7 @@ func (fs *s3FS) CreateDir(ctx context.Context, ref *provider.Reference) error {
}
// TouchFile as defined in the storage.FS interface
func (fs *s3FS) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool) error {
func (fs *s3FS) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
return fmt.Errorf("unimplemented: TouchFile")
}

View File

@@ -38,7 +38,7 @@ type FS interface {
GetHome(ctx context.Context) (string, error)
CreateHome(ctx context.Context) error
CreateDir(ctx context.Context, ref *provider.Reference) error
TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool) error
TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error
Delete(ctx context.Context, ref *provider.Reference) error
Move(ctx context.Context, oldRef, newRef *provider.Reference) error
GetMD(ctx context.Context, ref *provider.Reference, mdKeys, fieldMask []string) (*provider.ResourceInfo, error)

View File

@@ -37,7 +37,6 @@ import (
rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/cs3org/reva/v2/pkg/appctx"
ctxpkg "github.com/cs3org/reva/v2/pkg/ctx"
"github.com/cs3org/reva/v2/pkg/errtypes"
"github.com/cs3org/reva/v2/pkg/events"
@@ -62,11 +61,16 @@ import (
"github.com/jellydator/ttlcache/v2"
"github.com/pkg/errors"
microstore "go-micro.dev/v4/store"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
)
// name is the Tracer name used to identify this instrumentation library.
const tracerName = "decomposedfs"
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs")
}
// Tree is used to manage a tree hierarchy
type Tree interface {
@@ -76,7 +80,7 @@ type Tree interface {
ListFolder(ctx context.Context, node *node.Node) ([]*node.Node, error)
// CreateHome(owner *userpb.UserId) (n *node.Node, err error)
CreateDir(ctx context.Context, node *node.Node) (err error)
TouchFile(ctx context.Context, node *node.Node, markprocessing bool) error
TouchFile(ctx context.Context, node *node.Node, markprocessing bool, mtime string) error
// CreateReference(ctx context.Context, node *node.Node, targetURI *url.URL) error
Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error)
Delete(ctx context.Context, node *node.Node) (err error)
@@ -202,7 +206,9 @@ func New(o *options.Options, lu *lookup.Lookup, p Permissions, tp Tree, es event
// Postprocessing starts the postprocessing result collector
func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
ctx := context.TODO()
ctx := context.TODO() // we should pass the trace id in the event and initialize the trace provider here
ctx, span := tracer.Start(ctx, "Postprocessing")
defer span.End()
log := logger.New()
for event := range ch {
switch ev := event.Event.(type) {
@@ -247,7 +253,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read parent")
} else {
// update parent tmtime to propagate etag change
_ = p.SetTMTime(&now)
_ = p.SetTMTime(ctx, &now)
if err := fs.tp.Propagate(ctx, p, 0); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not propagate etag change")
}
@@ -256,7 +262,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
upload.Cleanup(up, failed, keepUpload)
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
if err := events.Publish(
fs.stream,
@@ -331,7 +337,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
}
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
continue
}
@@ -369,7 +375,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
}
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
continue
}
*/
@@ -390,13 +396,13 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
n = no
}
if err := n.SetScanData(res.Description, res.Scandate); err != nil {
if err := n.SetScanData(ctx, res.Description, res.Scandate); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Interface("resourceID", res.ResourceID).Msg("Failed to set scan results")
continue
}
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
default:
log.Error().Interface("event", ev).Msg("Unknown event")
}
@@ -412,6 +418,8 @@ func (fs *Decomposedfs) Shutdown(ctx context.Context) error {
// GetQuota returns the quota available
// TODO Document in the cs3 should we return quota or free space?
func (fs *Decomposedfs) GetQuota(ctx context.Context, ref *provider.Reference) (total uint64, inUse uint64, remaining uint64, err error) {
ctx, span := tracer.Start(ctx, "GetQuota")
defer span.End()
var n *node.Node
if ref == nil {
err = errtypes.BadRequest("no space given")
@@ -487,6 +495,8 @@ func (fs *Decomposedfs) calculateTotalUsedRemaining(quotaStr string, inUse, rema
// CreateHome creates a new home node for the given user
func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) {
ctx, span := tracer.Start(ctx, "CreateHome")
defer span.End()
if fs.o.UserLayout == "" {
return errtypes.NotSupported("Decomposedfs: CreateHome() home supported disabled")
}
@@ -519,6 +529,8 @@ func isAlreadyExists(err error) bool {
// GetHome is called to look up the home path for a user
// It is NOT supposed to return the internal path but the external path
func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) {
ctx, span := tracer.Start(ctx, "GetHome")
defer span.End()
if fs.o.UserLayout == "" {
return "", errtypes.NotSupported("Decomposedfs: GetHome() home supported disabled")
}
@@ -529,6 +541,8 @@ func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) {
// GetPathByID returns the fn pointed by the file id, without the internal namespace
func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) {
ctx, span := tracer.Start(ctx, "GetPathByID")
defer span.End()
n, err := fs.lu.NodeFromID(ctx, id)
if err != nil {
return "", err
@@ -557,6 +571,9 @@ func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId
// CreateDir creates the specified directory
func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference) (err error) {
ctx, span := tracer.Start(ctx, "CreateDir")
defer span.End()
name := path.Base(ref.Path)
if name == "" || name == "." || name == "/" {
return errtypes.BadRequest("Invalid path: " + ref.Path)
@@ -616,7 +633,9 @@ func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference)
}
// TouchFile as defined in the storage.FS interface
func (fs *Decomposedfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool) error {
func (fs *Decomposedfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
ctx, span := tracer.Start(ctx, "TouchFile")
defer span.End()
parentRef := &provider.Reference{
ResourceId: ref.ResourceId,
Path: path.Dir(ref.Path),
@@ -655,7 +674,7 @@ func (fs *Decomposedfs) TouchFile(ctx context.Context, ref *provider.Reference,
if err := n.CheckLock(ctx); err != nil {
return err
}
return fs.tp.TouchFile(ctx, n, markprocessing)
return fs.tp.TouchFile(ctx, n, markprocessing, mtime)
}
// CreateReference creates a reference as a node folder with the target stored in extended attributes
@@ -669,6 +688,8 @@ func (fs *Decomposedfs) CreateReference(ctx context.Context, p string, targetURI
// Move moves a resource from one reference to another
func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) {
ctx, span := tracer.Start(ctx, "Move")
defer span.End()
var oldNode, newNode *node.Node
if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil {
return
@@ -703,13 +724,13 @@ func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Refer
switch {
case err != nil:
return err
case oldNode.IsDir() && !rp.CreateContainer:
case oldNode.IsDir(ctx) && !rp.CreateContainer:
f, _ := storagespace.FormatReference(newRef)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
case !oldNode.IsDir() && !rp.InitiateFileUpload:
case !oldNode.IsDir(ctx) && !rp.InitiateFileUpload:
f, _ := storagespace.FormatReference(newRef)
if rp.Stat {
return errtypes.PermissionDenied(f)
@@ -735,6 +756,8 @@ func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Refer
// GetMD returns the metadata for the specified resource
func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string, fieldMask []string) (ri *provider.ResourceInfo, err error) {
ctx, span := tracer.Start(ctx, "GetMD")
defer span.End()
var node *node.Node
if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
return
@@ -777,14 +800,13 @@ func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKe
// ListFolder returns a list of resources in the specified folder
func (fs *Decomposedfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string, fieldMask []string) ([]*provider.ResourceInfo, error) {
ctx, span := tracer.Start(ctx, "ListFolder")
defer span.End()
n, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return nil, err
}
ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "ListFolder")
defer span.End()
if !n.Exists {
return nil, errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
}
@@ -872,6 +894,8 @@ func (fs *Decomposedfs) ListFolder(ctx context.Context, ref *provider.Reference,
// Delete deletes the specified resource
func (fs *Decomposedfs) Delete(ctx context.Context, ref *provider.Reference) (err error) {
ctx, span := tracer.Start(ctx, "Delete")
defer span.End()
var node *node.Node
if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
return
@@ -904,6 +928,8 @@ func (fs *Decomposedfs) Delete(ctx context.Context, ref *provider.Reference) (er
// Download returns a reader to the specified resource
func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) {
ctx, span := tracer.Start(ctx, "Download")
defer span.End()
// check if we are trying to download a revision
// TODO the CS3 api should allow initiating a revision download
if ref.ResourceId != nil && strings.Contains(ref.ResourceId.OpaqueId, node.RevisionIDDelimiter) {
@@ -941,6 +967,8 @@ func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) (
// GetLock returns an existing lock on the given reference
func (fs *Decomposedfs) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) {
ctx, span := tracer.Start(ctx, "GetLock")
defer span.End()
node, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: error resolving ref")
@@ -968,6 +996,8 @@ func (fs *Decomposedfs) GetLock(ctx context.Context, ref *provider.Reference) (*
// SetLock puts a lock on the given reference
func (fs *Decomposedfs) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
ctx, span := tracer.Start(ctx, "SetLock")
defer span.End()
node, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return errors.Wrap(err, "Decomposedfs: error resolving ref")
@@ -994,6 +1024,8 @@ func (fs *Decomposedfs) SetLock(ctx context.Context, ref *provider.Reference, lo
// RefreshLock refreshes an existing lock on the given reference
func (fs *Decomposedfs) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error {
ctx, span := tracer.Start(ctx, "RefreshLock")
defer span.End()
if lock.LockId == "" {
return errtypes.BadRequest("missing lockid")
}
@@ -1024,6 +1056,8 @@ func (fs *Decomposedfs) RefreshLock(ctx context.Context, ref *provider.Reference
// Unlock removes an existing lock from the given reference
func (fs *Decomposedfs) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
ctx, span := tracer.Start(ctx, "Unlock")
defer span.End()
if lock.LockId == "" {
return errtypes.BadRequest("missing lockid")
}

View File

@@ -135,7 +135,7 @@ func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference)
}
log := appctx.GetLogger(ctx)
var attrs node.Attributes
if attrs, err = grantNode.Xattrs(); err != nil {
if attrs, err = grantNode.Xattrs(ctx); err != nil {
log.Error().Err(err).Msg("error listing attributes")
return nil, err
}
@@ -208,7 +208,7 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference
attr = prefixes.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId
}
if err = grantNode.RemoveXattr(attr); err != nil {
if err = grantNode.RemoveXattr(ctx, attr); err != nil {
return err
}
@@ -326,7 +326,7 @@ func (fs *Decomposedfs) storeGrant(ctx context.Context, n *node.Node, g *provide
// set the grant
e := ace.FromGrant(g)
principal, value := e.Marshal()
if err := n.SetXattr(prefixes.GrantPrefix+principal, value); err != nil {
if err := n.SetXattr(ctx, prefixes.GrantPrefix+principal, value); err != nil {
appctx.GetLogger(ctx).Error().Err(err).
Str("principal", principal).Msg("Could not set grant for principal")
return err

View File

@@ -34,8 +34,16 @@ import (
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
}
// Lookup implements transformations from filepath to node and back
type Lookup struct {
Options *options.Options
@@ -57,8 +65,8 @@ func (lu *Lookup) MetadataBackend() metadata.Backend {
}
// ReadBlobSizeAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobSizeAttr(path string) (int64, error) {
blobSize, err := lu.metadataBackend.GetInt64(path, prefixes.BlobsizeAttr)
func (lu *Lookup) ReadBlobSizeAttr(ctx context.Context, path string) (int64, error) {
blobSize, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.BlobsizeAttr)
if err != nil {
return 0, errors.Wrapf(err, "error reading blobsize xattr")
}
@@ -66,8 +74,8 @@ func (lu *Lookup) ReadBlobSizeAttr(path string) (int64, error) {
}
// ReadBlobIDAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobIDAttr(path string) (string, error) {
attr, err := lu.metadataBackend.Get(path, prefixes.BlobIDAttr)
func (lu *Lookup) ReadBlobIDAttr(ctx context.Context, path string) (string, error) {
attr, err := lu.metadataBackend.Get(ctx, path, prefixes.BlobIDAttr)
if err != nil {
return "", errors.Wrapf(err, "error reading blobid xattr")
}
@@ -75,9 +83,9 @@ func (lu *Lookup) ReadBlobIDAttr(path string) (string, error) {
}
// TypeFromPath returns the type of the node at the given path
func (lu *Lookup) TypeFromPath(path string) provider.ResourceType {
func (lu *Lookup) TypeFromPath(ctx context.Context, path string) provider.ResourceType {
// Try to read from xattrs
typeAttr, err := lu.metadataBackend.GetInt64(path, prefixes.TypeAttr)
typeAttr, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.TypeAttr)
if err == nil {
return provider.ResourceType(int32(typeAttr))
}
@@ -91,7 +99,7 @@ func (lu *Lookup) TypeFromPath(path string) provider.ResourceType {
switch {
case fi.IsDir():
if _, err = lu.metadataBackend.Get(path, prefixes.ReferenceAttr); err == nil {
if _, err = lu.metadataBackend.Get(ctx, path, prefixes.ReferenceAttr); err == nil {
t = provider.ResourceType_RESOURCE_TYPE_REFERENCE
} else {
t = provider.ResourceType_RESOURCE_TYPE_CONTAINER
@@ -108,6 +116,9 @@ func (lu *Lookup) TypeFromPath(path string) provider.ResourceType {
// NodeFromResource takes in a request path or request id and converts it to a Node
func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) {
ctx, span := tracer.Start(ctx, "NodeFromResource")
defer span.End()
if ref.ResourceId != nil {
// check if a storage space reference is used
// currently, the decomposed fs uses the root node id as the space id
@@ -136,6 +147,8 @@ func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference)
// NodeFromID returns the internal path for the id
func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) {
ctx, span := tracer.Start(ctx, "NodeFromID")
defer span.End()
if id == nil {
return nil, fmt.Errorf("invalid resource id %+v", id)
}
@@ -178,7 +191,7 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node, hasPermission node.Per
root := n.SpaceRoot
for n.ID != root.ID {
p = filepath.Join(n.Name, p)
if n, err = n.Parent(); err != nil {
if n, err = n.Parent(ctx); err != nil {
appctx.GetLogger(ctx).
Error().Err(err).
Str("path", p).
@@ -207,7 +220,7 @@ func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, followRe
}
if followReferences {
if attrBytes, err := r.Xattr(prefixes.ReferenceAttr); err == nil {
if attrBytes, err := r.Xattr(ctx, prefixes.ReferenceAttr); err == nil {
realNodeID := attrBytes
ref, err := refFromCS3(realNodeID)
if err != nil {
@@ -220,7 +233,7 @@ func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, followRe
}
}
}
if r.IsSpaceRoot() {
if r.IsSpaceRoot(ctx) {
r.SpaceRoot = r
}
@@ -268,7 +281,7 @@ func refFromCS3(b []byte) (*provider.Reference, error) {
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
// For the source file, a shared lock is acquired.
// NOTE: target resource will be write locked!
func (lu *Lookup) CopyMetadata(src, target string, filter func(attributeName string) bool) (err error) {
func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string) bool) (err error) {
// Acquire a read log on the source node
// write lock existing node before reading treesize or tree time
f, err := lockedfile.Open(lu.MetadataBackend().MetadataPath(src))
@@ -288,14 +301,14 @@ func (lu *Lookup) CopyMetadata(src, target string, filter func(attributeName str
}
}()
return lu.CopyMetadataWithSourceLock(src, target, filter, f)
return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, f)
}
// CopyMetadataWithSourceLock copies all extended attributes from source to target.
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
// For the source file, a matching lockedfile is required.
// NOTE: target resource will be write locked!
func (lu *Lookup) CopyMetadataWithSourceLock(sourcePath, targetPath string, filter func(attributeName string) bool, lockedSource *lockedfile.File) (err error) {
func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string) bool, lockedSource *lockedfile.File) (err error) {
switch {
case lockedSource == nil:
return errors.New("no lock provided")
@@ -303,7 +316,7 @@ func (lu *Lookup) CopyMetadataWithSourceLock(sourcePath, targetPath string, filt
return errors.New("lockpath does not match filepath")
}
attrs, err := lu.metadataBackend.AllWithLockedSource(sourcePath, lockedSource)
attrs, err := lu.metadataBackend.AllWithLockedSource(ctx, sourcePath, lockedSource)
if err != nil {
return err
}
@@ -315,7 +328,7 @@ func (lu *Lookup) CopyMetadataWithSourceLock(sourcePath, targetPath string, filt
}
}
return lu.MetadataBackend().SetMultiple(targetPath, newAttrs, true)
return lu.MetadataBackend().SetMultiple(ctx, targetPath, newAttrs, true)
}
// DetectBackendOnDisk returns the name of the metadata backend being used on disk

View File

@@ -93,7 +93,7 @@ func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.
delete(md.Metadata, node.FavoriteKey)
if u, ok := ctxpkg.ContextGetUser(ctx); ok {
if uid := u.GetId(); uid != nil {
if err := n.SetFavorite(uid, val); err != nil {
if err := n.SetFavorite(ctx, uid, val); err != nil {
sublog.Error().Err(err).
Interface("user", u).
Msg("could not set favorite flag")
@@ -111,7 +111,7 @@ func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.
}
for k, v := range md.Metadata {
attrName := prefixes.MetadataPrefix + k
if err = n.SetXattrString(attrName, v); err != nil {
if err = n.SetXattrString(ctx, attrName, v); err != nil {
errs = append(errs, errors.Wrap(err, "Decomposedfs: could not set metadata attribute "+attrName+" to "+k))
}
}
@@ -184,7 +184,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide
continue
}
fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp())
if err := n.RemoveXattr(fa); err != nil {
if err := n.RemoveXattr(ctx, fa); err != nil {
if metadata.IsAttrUnset(err) {
continue // already gone, ignore
}
@@ -195,7 +195,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide
errs = append(errs, errors.Wrap(err, "could not unset favorite flag"))
}
default:
if err = n.RemoveXattr(prefixes.MetadataPrefix + k); err != nil {
if err = n.RemoveXattr(ctx, prefixes.MetadataPrefix+k); err != nil {
if metadata.IsAttrUnset(err) {
continue // already gone, ignore
}

View File

@@ -19,6 +19,7 @@
package metadata
import (
"context"
"io"
"os"
"path/filepath"
@@ -30,6 +31,7 @@ import (
"github.com/pkg/xattr"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/shamaton/msgpack/v2"
"go.opentelemetry.io/otel/codes"
)
// MessagePackBackend persists the attributes in messagepack format inside the file
@@ -48,7 +50,7 @@ type readWriteCloseSeekTruncater interface {
func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend {
return MessagePackBackend{
rootPath: filepath.Clean(rootPath),
metaCache: cache.GetFileMetadataCache(o.Store, o.Nodes, o.Database, "filemetadata", time.Duration(o.TTL)*time.Second, o.Size),
metaCache: cache.GetFileMetadataCache(o.Store, o.Nodes, o.Database, "filemetadata:", time.Duration(o.TTL)*time.Second, o.Size),
}
}
@@ -56,13 +58,13 @@ func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend {
func (MessagePackBackend) Name() string { return "messagepack" }
// All reads all extended attributes for a node
func (b MessagePackBackend) All(path string) (map[string][]byte, error) {
return b.loadAttributes(path, nil)
func (b MessagePackBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return b.loadAttributes(ctx, path, nil)
}
// Get an extended attribute value for the given key
func (b MessagePackBackend) Get(path, key string) ([]byte, error) {
attribs, err := b.loadAttributes(path, nil)
func (b MessagePackBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return []byte{}, err
}
@@ -74,8 +76,8 @@ func (b MessagePackBackend) Get(path, key string) ([]byte, error) {
}
// GetInt64 reads a string as int64 from the xattrs
func (b MessagePackBackend) GetInt64(path, key string) (int64, error) {
attribs, err := b.loadAttributes(path, nil)
func (b MessagePackBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return 0, err
}
@@ -92,8 +94,8 @@ func (b MessagePackBackend) GetInt64(path, key string) (int64, error) {
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (b MessagePackBackend) List(path string) ([]string, error) {
attribs, err := b.loadAttributes(path, nil)
func (b MessagePackBackend) List(ctx context.Context, path string) ([]string, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return nil, err
}
@@ -105,36 +107,50 @@ func (b MessagePackBackend) List(path string) ([]string, error) {
}
// Set sets one attribute for the given path
func (b MessagePackBackend) Set(path, key string, val []byte) error {
return b.SetMultiple(path, map[string][]byte{key: val}, true)
func (b MessagePackBackend) Set(ctx context.Context, path, key string, val []byte) error {
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
}
// SetMultiple sets a set of attribute for the given path
func (b MessagePackBackend) SetMultiple(path string, attribs map[string][]byte, acquireLock bool) error {
return b.saveAttributes(path, attribs, nil, acquireLock)
func (b MessagePackBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
return b.saveAttributes(ctx, path, attribs, nil, acquireLock)
}
// Remove an extended attribute key
func (b MessagePackBackend) Remove(path, key string) error {
return b.saveAttributes(path, nil, []string{key}, true)
func (b MessagePackBackend) Remove(ctx context.Context, path, key string) error {
return b.saveAttributes(ctx, path, nil, []string{key}, true)
}
// AllWithLockedSource reads all extended attributes from the given reader (if possible).
// The path argument is used for storing the data in the cache
func (b MessagePackBackend) AllWithLockedSource(path string, source io.Reader) (map[string][]byte, error) {
return b.loadAttributes(path, source)
func (b MessagePackBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
return b.loadAttributes(ctx, path, source)
}
func (b MessagePackBackend) saveAttributes(path string, setAttribs map[string][]byte, deleteAttribs []string, acquireLock bool) error {
func (b MessagePackBackend) saveAttributes(ctx context.Context, path string, setAttribs map[string][]byte, deleteAttribs []string, acquireLock bool) error {
var (
f readWriteCloseSeekTruncater
err error
f readWriteCloseSeekTruncater
)
ctx, span := tracer.Start(ctx, "saveAttributes")
defer func() {
if err != nil {
span.SetStatus(codes.Error, err.Error())
} else {
span.SetStatus(codes.Ok, "")
}
span.End()
}()
metaPath := b.MetadataPath(path)
if acquireLock {
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
f, err = lockedfile.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
} else {
_, subspan := tracer.Start(ctx, "os.OpenFile")
f, err = os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
}
if err != nil {
return err
@@ -142,10 +158,15 @@ func (b MessagePackBackend) saveAttributes(path string, setAttribs map[string][]
defer f.Close()
// Invalidate cache early
_, subspan := tracer.Start(ctx, "metaCache.RemoveMetadata")
_ = b.metaCache.RemoveMetadata(b.cacheKey(path))
subspan.End()
// Read current state
msgBytes, err := io.ReadAll(f)
_, subspan = tracer.Start(ctx, "io.ReadAll")
var msgBytes []byte
msgBytes, err = io.ReadAll(f)
subspan.End()
if err != nil {
return err
}
@@ -170,25 +191,35 @@ func (b MessagePackBackend) saveAttributes(path string, setAttribs map[string][]
if err != nil {
return err
}
_, subspan = tracer.Start(ctx, "f.Truncate")
err = f.Truncate(0)
subspan.End()
if err != nil {
return err
}
// Write new metadata to file
d, err := msgpack.Marshal(attribs)
var d []byte
d, err = msgpack.Marshal(attribs)
if err != nil {
return err
}
_, subspan = tracer.Start(ctx, "f.Write")
_, err = f.Write(d)
subspan.End()
if err != nil {
return err
}
return b.metaCache.PushToCache(b.cacheKey(path), attribs)
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
subspan.End()
return err
}
func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[string][]byte, error) {
func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
ctx, span := tracer.Start(ctx, "loadAttributes")
defer span.End()
attribs := map[string][]byte{}
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
if err == nil {
@@ -197,14 +228,18 @@ func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[s
metaPath := b.MetadataPath(path)
if source == nil {
_, subspan := tracer.Start(ctx, "lockedfile.Open")
source, err = lockedfile.Open(metaPath)
subspan.End()
// // No cached entry found. Read from storage and store in cache
if err != nil {
if os.IsNotExist(err) {
// some of the caller rely on ENOTEXISTS to be returned when the
// actual file (not the metafile) does not exist in order to
// determine whether a node exists or not -> stat the actual node
_, subspan := tracer.Start(ctx, "os.Stat")
_, err := os.Stat(path)
subspan.End()
if err != nil {
return nil, err
}
@@ -214,7 +249,9 @@ func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[s
defer source.(*lockedfile.File).Close()
}
_, subspan := tracer.Start(ctx, "io.ReadAll")
msgBytes, err := io.ReadAll(source)
subspan.End()
if err != nil {
return nil, err
}
@@ -225,7 +262,9 @@ func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[s
}
}
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
subspan.End()
if err != nil {
return nil, err
}

View File

@@ -19,31 +19,41 @@
package metadata
import (
"context"
"errors"
"io"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/metadata")
}
var errUnconfiguredError = errors.New("no metadata backend configured. Bailing out")
// Backend defines the interface for file attribute backends
type Backend interface {
Name() string
All(path string) (map[string][]byte, error)
Get(path, key string) ([]byte, error)
All(ctx context.Context, path string) (map[string][]byte, error)
Get(ctx context.Context, path, key string) ([]byte, error)
GetInt64(path, key string) (int64, error)
List(path string) (attribs []string, err error)
Set(path, key string, val []byte) error
SetMultiple(path string, attribs map[string][]byte, acquireLock bool) error
Remove(path, key string) error
GetInt64(ctx context.Context, path, key string) (int64, error)
List(ctx context.Context, path string) (attribs []string, err error)
Set(ctx context.Context, path, key string, val []byte) error
SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error
Remove(ctx context.Context, path, key string) error
Purge(path string) error
Rename(oldPath, newPath string) error
IsMetaFile(path string) bool
MetadataPath(path string) string
AllWithLockedSource(path string, source io.Reader) (map[string][]byte, error)
AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error)
}
// NullBackend is the default stub backend, used to enforce the configuration of a proper backend
@@ -53,28 +63,40 @@ type NullBackend struct{}
func (NullBackend) Name() string { return "null" }
// All reads all extended attributes for a node
func (NullBackend) All(path string) (map[string][]byte, error) { return nil, errUnconfiguredError }
func (NullBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return nil, errUnconfiguredError
}
// Get an extended attribute value for the given key
func (NullBackend) Get(path, key string) ([]byte, error) { return []byte{}, errUnconfiguredError }
func (NullBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
return []byte{}, errUnconfiguredError
}
// GetInt64 reads a string as int64 from the xattrs
func (NullBackend) GetInt64(path, key string) (int64, error) { return 0, errUnconfiguredError }
func (NullBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
return 0, errUnconfiguredError
}
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (NullBackend) List(path string) ([]string, error) { return nil, errUnconfiguredError }
func (NullBackend) List(ctx context.Context, path string) ([]string, error) {
return nil, errUnconfiguredError
}
// Set sets one attribute for the given path
func (NullBackend) Set(path string, key string, val []byte) error { return errUnconfiguredError }
func (NullBackend) Set(ctx context.Context, path string, key string, val []byte) error {
return errUnconfiguredError
}
// SetMultiple sets a set of attribute for the given path
func (NullBackend) SetMultiple(path string, attribs map[string][]byte, acquireLock bool) error {
func (NullBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
return errUnconfiguredError
}
// Remove removes an extended attribute key
func (NullBackend) Remove(path string, key string) error { return errUnconfiguredError }
func (NullBackend) Remove(ctx context.Context, path string, key string) error {
return errUnconfiguredError
}
// IsMetaFile returns whether the given path represents a meta file
func (NullBackend) IsMetaFile(path string) bool { return false }
@@ -90,6 +112,6 @@ func (NullBackend) MetadataPath(path string) string { return "" }
// AllWithLockedSource reads all extended attributes from the given reader
// The path argument is used for storing the data in the cache
func (NullBackend) AllWithLockedSource(path string, source io.Reader) (map[string][]byte, error) {
func (NullBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
return nil, errUnconfiguredError
}

View File

@@ -19,6 +19,7 @@
package metadata
import (
"context"
"io"
"os"
"path/filepath"
@@ -39,13 +40,13 @@ func (XattrsBackend) Name() string { return "xattrs" }
// Get an extended attribute value for the given key
// No file locking is involved here as reading a single xattr is
// considered to be atomic.
func (b XattrsBackend) Get(filePath, key string) ([]byte, error) {
func (b XattrsBackend) Get(ctx context.Context, filePath, key string) ([]byte, error) {
return xattr.Get(filePath, key)
}
// GetInt64 reads a string as int64 from the xattrs
func (b XattrsBackend) GetInt64(filePath, key string) (int64, error) {
attr, err := b.Get(filePath, key)
func (b XattrsBackend) GetInt64(ctx context.Context, filePath, key string) (int64, error) {
attr, err := b.Get(ctx, filePath, key)
if err != nil {
return 0, err
}
@@ -58,7 +59,7 @@ func (b XattrsBackend) GetInt64(filePath, key string) (int64, error) {
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (XattrsBackend) List(filePath string) (attribs []string, err error) {
func (XattrsBackend) List(ctx context.Context, filePath string) (attribs []string, err error) {
attrs, err := xattr.List(filePath)
if err == nil {
return attrs, nil
@@ -75,8 +76,8 @@ func (XattrsBackend) List(filePath string) (attribs []string, err error) {
// All reads all extended attributes for a node, protected by a
// shared file lock
func (b XattrsBackend) All(filePath string) (attribs map[string][]byte, err error) {
attrNames, err := b.List(filePath)
func (b XattrsBackend) All(ctx context.Context, filePath string) (attribs map[string][]byte, err error) {
attrNames, err := b.List(ctx, filePath)
if err != nil {
return nil, err
@@ -106,12 +107,12 @@ func (b XattrsBackend) All(filePath string) (attribs map[string][]byte, err erro
}
// Set sets one attribute for the given path
func (b XattrsBackend) Set(path string, key string, val []byte) (err error) {
return b.SetMultiple(path, map[string][]byte{key: val}, true)
func (b XattrsBackend) Set(ctx context.Context, path string, key string, val []byte) (err error) {
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
}
// SetMultiple sets a set of attribute for the given path
func (XattrsBackend) SetMultiple(path string, attribs map[string][]byte, acquireLock bool) (err error) {
func (XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) (err error) {
if acquireLock {
err := os.MkdirAll(filepath.Dir(path), 0600)
if err != nil {
@@ -144,7 +145,7 @@ func (XattrsBackend) SetMultiple(path string, attribs map[string][]byte, acquire
}
// Remove an extended attribute key
func (XattrsBackend) Remove(filePath string, key string) (err error) {
func (XattrsBackend) Remove(ctx context.Context, filePath string, key string) (err error) {
lockedFile, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return err
@@ -173,6 +174,6 @@ func cleanupLockfile(f *lockedfile.File) {
// AllWithLockedSource reads all extended attributes from the given reader.
// The path argument is used for storing the data in the cache
func (b XattrsBackend) AllWithLockedSource(path string, _ io.Reader) (map[string][]byte, error) {
return b.All(path)
func (b XattrsBackend) AllWithLockedSource(ctx context.Context, path string, _ io.Reader) (map[string][]byte, error) {
return b.All(ctx, path)
}

View File

@@ -19,6 +19,7 @@
package migrator
import (
"context"
"errors"
"os"
"path/filepath"
@@ -48,7 +49,7 @@ func (m *Migrator) Migration0001() (Result, error) {
for _, n := range nodes {
nodePath := filepath.Join(nodesPath, n.Name())
attr, err := m.lu.MetadataBackend().Get(nodePath, prefixes.ParentidAttr)
attr, err := m.lu.MetadataBackend().Get(context.Background(), nodePath, prefixes.ParentidAttr)
if err == nil && string(attr) == node.RootID {
if err := m.moveNode(n.Name(), n.Name()); err != nil {
m.log.Error().Err(err).

View File

@@ -19,6 +19,7 @@
package migrator
import (
"context"
"errors"
"io/fs"
"os"
@@ -74,7 +75,7 @@ func (m *Migrator) Migration0003() (Result, error) {
return nil
}
attribs, err := xattrs.All(path)
attribs, err := xattrs.All(context.Background(), path)
if err != nil {
m.log.Error().Err(err).Str("path", path).Msg("error converting file")
return err
@@ -83,14 +84,14 @@ func (m *Migrator) Migration0003() (Result, error) {
return nil
}
err = mpk.SetMultiple(path, attribs, false)
err = mpk.SetMultiple(context.Background(), path, attribs, false)
if err != nil {
m.log.Error().Err(err).Str("path", path).Msg("error setting attributes")
return err
}
for k := range attribs {
err = xattrs.Remove(path, k)
err = xattrs.Remove(context.Background(), path, k)
if err != nil {
m.log.Debug().Err(err).Str("path", path).Msg("error removing xattr")
}

View File

@@ -255,6 +255,8 @@ func (n *Node) Unlock(ctx context.Context, lock *provider.Lock) error {
// CheckLock compares the context lock with the node lock
func (n *Node) CheckLock(ctx context.Context) error {
ctx, span := tracer.Start(ctx, "CheckLock")
defer span.End()
contextLock, _ := ctxpkg.ContextGetLockID(ctx)
diskLock, _ := n.ReadLock(ctx, false)
if diskLock != nil {

View File

@@ -48,8 +48,16 @@ import (
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/google/uuid"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node")
}
// Define keys and values used in the node metadata
const (
LockdiscoveryKey = "DAV:lockdiscovery"
@@ -97,8 +105,8 @@ type PathLookup interface {
InternalPath(spaceID, nodeID string) string
Path(ctx context.Context, n *Node, hasPermission PermissionFunc) (path string, err error)
MetadataBackend() metadata.Backend
ReadBlobSizeAttr(path string) (int64, error)
ReadBlobIDAttr(path string) (string, error)
ReadBlobSizeAttr(ctx context.Context, path string) (int64, error)
ReadBlobIDAttr(ctx context.Context, path string) (string, error)
}
// New returns a new instance of Node
@@ -120,7 +128,7 @@ func New(spaceID, id, parentID, name string, blobsize int64, blobID string, t pr
}
// Type returns the node's resource type
func (n *Node) Type() provider.ResourceType {
func (n *Node) Type(ctx context.Context) provider.ResourceType {
if n.nodeType != nil {
return *n.nodeType
}
@@ -128,7 +136,7 @@ func (n *Node) Type() provider.ResourceType {
t := provider.ResourceType_RESOURCE_TYPE_INVALID
// Try to read from xattrs
typeAttr, err := n.XattrInt32(prefixes.TypeAttr)
typeAttr, err := n.XattrInt32(ctx, prefixes.TypeAttr)
if err == nil {
t = provider.ResourceType(typeAttr)
n.nodeType = &t
@@ -143,7 +151,7 @@ func (n *Node) Type() provider.ResourceType {
switch {
case fi.IsDir():
if _, err = n.Xattr(prefixes.ReferenceAttr); err == nil {
if _, err = n.Xattr(ctx, prefixes.ReferenceAttr); err == nil {
t = provider.ResourceType_RESOURCE_TYPE_REFERENCE
} else {
t = provider.ResourceType_RESOURCE_TYPE_CONTAINER
@@ -165,12 +173,12 @@ func (n *Node) SetType(t provider.ResourceType) {
}
// NodeMetadata writes the Node metadata to disk and allows passing additional attributes
func (n *Node) NodeMetadata() Attributes {
func (n *Node) NodeMetadata(ctx context.Context) Attributes {
attribs := Attributes{}
attribs.SetInt64(prefixes.TypeAttr, int64(n.Type()))
attribs.SetInt64(prefixes.TypeAttr, int64(n.Type(ctx)))
attribs.SetString(prefixes.ParentidAttr, n.ParentID)
attribs.SetString(prefixes.NameAttr, n.Name)
if n.Type() == provider.ResourceType_RESOURCE_TYPE_FILE {
if n.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
attribs.SetString(prefixes.BlobIDAttr, n.BlobID)
attribs.SetInt64(prefixes.BlobsizeAttr, n.Blobsize)
}
@@ -206,6 +214,8 @@ func (n *Node) SpaceOwnerOrManager(ctx context.Context) *userpb.UserId {
// ReadNode creates a new instance from an id and checks if it exists
func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canListDisabledSpace bool, spaceRoot *Node, skipParentCheck bool) (*Node, error) {
ctx, span := tracer.Start(ctx, "ReadNode")
defer span.End()
var err error
if spaceRoot == nil {
@@ -216,7 +226,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
ID: spaceID,
}
spaceRoot.SpaceRoot = spaceRoot
spaceRoot.owner, err = spaceRoot.readOwner()
spaceRoot.owner, err = spaceRoot.readOwner(ctx)
switch {
case metadata.IsNotExist(err):
return spaceRoot, nil // swallow not found, the node defaults to exists = false
@@ -226,14 +236,14 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
spaceRoot.Exists = true
// lookup name in extended attributes
spaceRoot.Name, err = spaceRoot.XattrString(prefixes.NameAttr)
spaceRoot.Name, err = spaceRoot.XattrString(ctx, prefixes.NameAttr)
if err != nil {
return nil, err
}
}
// TODO ReadNode should not check permissions
if !canListDisabledSpace && spaceRoot.IsDisabled() {
if !canListDisabledSpace && spaceRoot.IsDisabled(ctx) {
// no permission = not found
return nil, errtypes.NotFound(spaceID)
}
@@ -276,7 +286,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
}
}()
attrs, err := n.Xattrs()
attrs, err := n.Xattrs(ctx)
switch {
case metadata.IsNotExist(err):
return n, nil // swallow not found, the node defaults to exists = false
@@ -305,13 +315,13 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
n.Blobsize = blobSize
}
} else {
n.BlobID, err = lu.ReadBlobIDAttr(nodePath + revisionSuffix)
n.BlobID, err = lu.ReadBlobIDAttr(ctx, nodePath+revisionSuffix)
if err != nil {
return nil, err
}
// Lookup blobsize
n.Blobsize, err = lu.ReadBlobSizeAttr(nodePath + revisionSuffix)
n.Blobsize, err = lu.ReadBlobSizeAttr(ctx, nodePath+revisionSuffix)
if err != nil {
return nil, err
}
@@ -342,6 +352,9 @@ func readChildNodeFromLink(path string) (string, error) {
// Child returns the child node with the given name
func (n *Node) Child(ctx context.Context, name string) (*Node, error) {
ctx, span := tracer.Start(ctx, "Child")
defer span.End()
spaceID := n.SpaceID
if spaceID == "" && n.ParentID == "root" {
spaceID = n.ID
@@ -375,7 +388,7 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) {
}
// ParentWithReader returns the parent node
func (n *Node) ParentWithReader(r io.Reader) (*Node, error) {
func (n *Node) ParentWithReader(ctx context.Context, r io.Reader) (*Node, error) {
if n.ParentID == "" {
return nil, fmt.Errorf("decomposedfs: root has no parent")
}
@@ -387,7 +400,7 @@ func (n *Node) ParentWithReader(r io.Reader) (*Node, error) {
}
// fill metadata cache using the reader
attrs, err := p.XattrsWithReader(r)
attrs, err := p.XattrsWithReader(ctx, r)
switch {
case metadata.IsNotExist(err):
return p, nil // swallow not found, the node defaults to exists = false
@@ -403,8 +416,8 @@ func (n *Node) ParentWithReader(r io.Reader) (*Node, error) {
}
// Parent returns the parent node
func (n *Node) Parent() (p *Node, err error) {
return n.ParentWithReader(nil)
func (n *Node) Parent(ctx context.Context) (p *Node, err error) {
return n.ParentWithReader(ctx, nil)
}
// Owner returns the space owner
@@ -414,14 +427,14 @@ func (n *Node) Owner() *userpb.UserId {
// readOwner reads the owner from the extended attributes of the space root
// in case either owner id or owner idp are unset we return an error and an empty owner object
func (n *Node) readOwner() (*userpb.UserId, error) {
func (n *Node) readOwner(ctx context.Context) (*userpb.UserId, error) {
owner := &userpb.UserId{}
// lookup parent id in extended attributes
var attr string
var err error
// lookup ID in extended attributes
attr, err = n.SpaceRoot.XattrString(prefixes.OwnerIDAttr)
attr, err = n.SpaceRoot.XattrString(ctx, prefixes.OwnerIDAttr)
switch {
case err == nil:
owner.OpaqueId = attr
@@ -432,7 +445,7 @@ func (n *Node) readOwner() (*userpb.UserId, error) {
}
// lookup IDP in extended attributes
attr, err = n.SpaceRoot.XattrString(prefixes.OwnerIDPAttr)
attr, err = n.SpaceRoot.XattrString(ctx, prefixes.OwnerIDPAttr)
switch {
case err == nil:
owner.Idp = attr
@@ -443,7 +456,7 @@ func (n *Node) readOwner() (*userpb.UserId, error) {
}
// lookup type in extended attributes
attr, err = n.SpaceRoot.XattrString(prefixes.OwnerTypeAttr)
attr, err = n.SpaceRoot.XattrString(ctx, prefixes.OwnerTypeAttr)
switch {
case err == nil:
owner.Type = utils.UserTypeMap(attr)
@@ -538,7 +551,7 @@ func (n *Node) SetMtime(mtime time.Time) error {
func (n *Node) SetEtag(ctx context.Context, val string) (err error) {
sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger()
var tmTime time.Time
if tmTime, err = n.GetTMTime(); err != nil {
if tmTime, err = n.GetTMTime(ctx); err != nil {
return
}
var etag string
@@ -555,7 +568,7 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) {
return nil
}
// etag is only valid until the calculated etag changes, is part of propagation
return n.SetXattrString(prefixes.TmpEtagAttr, val)
return n.SetXattrString(ctx, prefixes.TmpEtagAttr, val)
}
// SetFavorite sets the favorite for the current user
@@ -575,15 +588,15 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) {
// 5. app? = a:<aid>: for apps?
// obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem
// public tags can be mapped to extended attributes
func (n *Node) SetFavorite(uid *userpb.UserId, val string) error {
func (n *Node) SetFavorite(ctx context.Context, uid *userpb.UserId, val string) error {
// the favorite flag is specific to the user, so we need to incorporate the userid
fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp())
return n.SetXattrString(fa, val)
return n.SetXattrString(ctx, fa, val)
}
// IsDir returns true if the node is a directory
func (n *Node) IsDir() bool {
attr, _ := n.XattrInt32(prefixes.TypeAttr)
func (n *Node) IsDir(ctx context.Context) bool {
attr, _ := n.XattrInt32(ctx, prefixes.TypeAttr)
return attr == int32(provider.ResourceType_RESOURCE_TYPE_CONTAINER)
}
@@ -592,17 +605,17 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
sublog := appctx.GetLogger(ctx).With().Interface("node", n.ID).Logger()
var fn string
nodeType := n.Type()
nodeType := n.Type(ctx)
var target string
if nodeType == provider.ResourceType_RESOURCE_TYPE_REFERENCE {
target, _ = n.XattrString(prefixes.ReferenceAttr)
target, _ = n.XattrString(ctx, prefixes.ReferenceAttr)
}
id := &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID}
switch {
case n.IsSpaceRoot():
case n.IsSpaceRoot(ctx):
fn = "." // space roots do not have a path as they are referencing themselves
case returnBasename:
fn = n.Name
@@ -629,12 +642,12 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
Name: n.Name,
}
if n.IsProcessing() {
if n.IsProcessing(ctx) {
ri.Opaque = utils.AppendPlainToOpaque(ri.Opaque, "status", "processing")
}
if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER {
ts, err := n.GetTreeSize()
ts, err := n.GetTreeSize(ctx)
if err == nil {
ri.Size = ts
} else {
@@ -646,12 +659,12 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
// TODO make etag of files use fileid and checksum
var tmTime time.Time
if tmTime, err = n.GetTMTime(); err != nil {
if tmTime, err = n.GetTMTime(ctx); err != nil {
sublog.Debug().Err(err).Msg("could not get tmtime")
}
// use temporary etag if it is set
if b, err := n.XattrString(prefixes.TmpEtagAttr); err == nil && b != "" {
if b, err := n.XattrString(ctx, prefixes.TmpEtagAttr); err == nil && b != "" {
ri.Etag = fmt.Sprintf(`"%x"`, b)
} else if ri.Etag, err = calculateEtag(n.ID, tmTime); err != nil {
sublog.Debug().Err(err).Msg("could not calculate etag")
@@ -694,7 +707,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
// the favorite flag is specific to the user, so we need to incorporate the userid
if uid := u.GetId(); uid != nil {
fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp())
if val, err := n.XattrString(fa); err == nil {
if val, err := n.XattrString(ctx, fa); err == nil {
sublog.Debug().
Str("favorite", fa).
Msg("found favorite flag")
@@ -756,7 +769,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
}
// only read the requested metadata attributes
attrs, err := n.Xattrs()
attrs, err := n.Xattrs(ctx)
if err != nil {
sublog.Error().Err(err).Msg("error getting list of extended attributes")
} else {
@@ -778,7 +791,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
}
// add virusscan information
if scanned, _, date := n.ScanData(); scanned {
if scanned, _, date := n.ScanData(ctx); scanned {
ri.Opaque = utils.AppendPlainToOpaque(ri.Opaque, "scantime", date.Format(time.RFC3339Nano))
}
@@ -790,7 +803,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
}
func (n *Node) readChecksumIntoResourceChecksum(ctx context.Context, algo string, ri *provider.ResourceInfo) {
v, err := n.Xattr(prefixes.ChecksumPrefix + algo)
v, err := n.Xattr(ctx, prefixes.ChecksumPrefix+algo)
switch {
case err == nil:
ri.Checksum = &provider.ResourceChecksum{
@@ -805,7 +818,7 @@ func (n *Node) readChecksumIntoResourceChecksum(ctx context.Context, algo string
}
func (n *Node) readChecksumIntoOpaque(ctx context.Context, algo string, ri *provider.ResourceInfo) {
v, err := n.Xattr(prefixes.ChecksumPrefix + algo)
v, err := n.Xattr(ctx, prefixes.ChecksumPrefix+algo)
switch {
case err == nil:
if ri.Opaque == nil {
@@ -826,7 +839,7 @@ func (n *Node) readChecksumIntoOpaque(ctx context.Context, algo string, ri *prov
// quota is always stored on the root node
func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInfo) {
v, err := n.XattrString(prefixes.QuotaAttr)
v, err := n.XattrString(ctx, prefixes.QuotaAttr)
switch {
case err == nil:
// make sure we have a proper signed int
@@ -855,16 +868,16 @@ func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInf
}
// HasPropagation checks if the propagation attribute exists and is set to "1"
func (n *Node) HasPropagation() (propagation bool) {
if b, err := n.XattrString(prefixes.PropagationAttr); err == nil {
func (n *Node) HasPropagation(ctx context.Context) (propagation bool) {
if b, err := n.XattrString(ctx, prefixes.PropagationAttr); err == nil {
return b == "1"
}
return false
}
// GetTMTime reads the tmtime from the extended attributes, falling back to GetMTime()
func (n *Node) GetTMTime() (time.Time, error) {
b, err := n.XattrString(prefixes.TreeMTimeAttr)
func (n *Node) GetTMTime(ctx context.Context) (time.Time, error) {
b, err := n.XattrString(ctx, prefixes.TreeMTimeAttr)
if err == nil {
return time.Parse(time.RFC3339Nano, b)
}
@@ -883,16 +896,16 @@ func (n *Node) GetMTime() (time.Time, error) {
}
// SetTMTime writes the UTC tmtime to the extended attributes or removes the attribute if nil is passed
func (n *Node) SetTMTime(t *time.Time) (err error) {
func (n *Node) SetTMTime(ctx context.Context, t *time.Time) (err error) {
if t == nil {
return n.RemoveXattr(prefixes.TreeMTimeAttr)
return n.RemoveXattr(ctx, prefixes.TreeMTimeAttr)
}
return n.SetXattrString(prefixes.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano))
return n.SetXattrString(ctx, prefixes.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano))
}
// GetDTime reads the dtime from the extended attributes
func (n *Node) GetDTime() (tmTime time.Time, err error) {
b, err := n.XattrString(prefixes.DTimeAttr)
func (n *Node) GetDTime(ctx context.Context) (tmTime time.Time, err error) {
b, err := n.XattrString(ctx, prefixes.DTimeAttr)
if err != nil {
return time.Time{}, err
}
@@ -900,26 +913,28 @@ func (n *Node) GetDTime() (tmTime time.Time, err error) {
}
// SetDTime writes the UTC dtime to the extended attributes or removes the attribute if nil is passed
func (n *Node) SetDTime(t *time.Time) (err error) {
func (n *Node) SetDTime(ctx context.Context, t *time.Time) (err error) {
if t == nil {
return n.RemoveXattr(prefixes.DTimeAttr)
return n.RemoveXattr(ctx, prefixes.DTimeAttr)
}
return n.SetXattrString(prefixes.DTimeAttr, t.UTC().Format(time.RFC3339Nano))
return n.SetXattrString(ctx, prefixes.DTimeAttr, t.UTC().Format(time.RFC3339Nano))
}
// IsDisabled returns true when the node has a dmtime attribute set
// only used to check if a space is disabled
// FIXME confusing with the trash logic
func (n *Node) IsDisabled() bool {
if _, err := n.GetDTime(); err == nil {
func (n *Node) IsDisabled(ctx context.Context) bool {
if _, err := n.GetDTime(ctx); err == nil {
return true
}
return false
}
// GetTreeSize reads the treesize from the extended attributes
func (n *Node) GetTreeSize() (treesize uint64, err error) {
s, err := n.XattrUint64(prefixes.TreesizeAttr)
func (n *Node) GetTreeSize(ctx context.Context) (treesize uint64, err error) {
ctx, span := tracer.Start(ctx, "GetTreeSize")
defer span.End()
s, err := n.XattrUint64(ctx, prefixes.TreesizeAttr)
if err != nil {
return 0, err
}
@@ -927,13 +942,13 @@ func (n *Node) GetTreeSize() (treesize uint64, err error) {
}
// SetTreeSize writes the treesize to the extended attributes
func (n *Node) SetTreeSize(ts uint64) (err error) {
return n.SetXattrString(prefixes.TreesizeAttr, strconv.FormatUint(ts, 10))
func (n *Node) SetTreeSize(ctx context.Context, ts uint64) (err error) {
return n.SetXattrString(ctx, prefixes.TreesizeAttr, strconv.FormatUint(ts, 10))
}
// GetBlobSize reads the blobsize from the extended attributes
func (n *Node) GetBlobSize() (treesize uint64, err error) {
s, err := n.XattrInt64(prefixes.BlobsizeAttr)
func (n *Node) GetBlobSize(ctx context.Context) (treesize uint64, err error) {
s, err := n.XattrInt64(ctx, prefixes.BlobsizeAttr)
if err != nil {
return 0, err
}
@@ -941,13 +956,13 @@ func (n *Node) GetBlobSize() (treesize uint64, err error) {
}
// SetChecksum writes the checksum with the given checksum type to the extended attributes
func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) {
return n.SetXattr(prefixes.ChecksumPrefix+csType, h.Sum(nil))
func (n *Node) SetChecksum(ctx context.Context, csType string, h hash.Hash) (err error) {
return n.SetXattr(ctx, prefixes.ChecksumPrefix+csType, h.Sum(nil))
}
// UnsetTempEtag removes the temporary etag attribute
func (n *Node) UnsetTempEtag() (err error) {
return n.RemoveXattr(prefixes.TmpEtagAttr)
func (n *Node) UnsetTempEtag(ctx context.Context) (err error) {
return n.RemoveXattr(ctx, prefixes.TmpEtagAttr)
}
// ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes
@@ -1070,7 +1085,7 @@ func (n *Node) IsDenied(ctx context.Context) bool {
// We don't want to wast time and memory by creating grantee objects.
// The function will return a list of opaque strings that can be used to make a ReadGrant call
func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error) {
attrs, err := n.Xattrs()
attrs, err := n.Xattrs(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("node", n.ID).Msg("error listing attributes")
return nil, err
@@ -1085,7 +1100,7 @@ func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error)
// ReadGrant reads a CS3 grant
func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant, err error) {
xattr, err := n.Xattr(grantee)
xattr, err := n.Xattr(ctx, grantee)
if err != nil {
return nil, err
}
@@ -1157,7 +1172,7 @@ func parseMTime(v string) (t time.Time, err error) {
// FindStorageSpaceRoot calls n.Parent() and climbs the tree
// until it finds the space root node and adds it to the node
func (n *Node) FindStorageSpaceRoot() error {
func (n *Node) FindStorageSpaceRoot(ctx context.Context) error {
if n.SpaceRoot != nil {
return nil
}
@@ -1165,11 +1180,11 @@ func (n *Node) FindStorageSpaceRoot() error {
// remember the node we ask for and use parent to climb the tree
parent := n
for {
if parent.IsSpaceRoot() {
if parent.IsSpaceRoot(ctx) {
n.SpaceRoot = parent
break
}
if parent, err = parent.Parent(); err != nil {
if parent, err = parent.Parent(ctx); err != nil {
return err
}
}
@@ -1177,38 +1192,38 @@ func (n *Node) FindStorageSpaceRoot() error {
}
// UnmarkProcessing removes the processing flag from the node
func (n *Node) UnmarkProcessing(uploadID string) error {
v, _ := n.XattrString(prefixes.StatusPrefix)
func (n *Node) UnmarkProcessing(ctx context.Context, uploadID string) error {
v, _ := n.XattrString(ctx, prefixes.StatusPrefix)
if v != ProcessingStatus+uploadID {
// file started another postprocessing later - do not remove
return nil
}
return n.RemoveXattr(prefixes.StatusPrefix)
return n.RemoveXattr(ctx, prefixes.StatusPrefix)
}
// IsProcessing returns true if the node is currently being processed
func (n *Node) IsProcessing() bool {
v, err := n.XattrString(prefixes.StatusPrefix)
func (n *Node) IsProcessing(ctx context.Context) bool {
v, err := n.XattrString(ctx, prefixes.StatusPrefix)
return err == nil && strings.HasPrefix(v, ProcessingStatus)
}
// IsSpaceRoot checks if the node is a space root
func (n *Node) IsSpaceRoot() bool {
_, err := n.Xattr(prefixes.SpaceNameAttr)
func (n *Node) IsSpaceRoot(ctx context.Context) bool {
_, err := n.Xattr(ctx, prefixes.SpaceNameAttr)
return err == nil
}
// SetScanData sets the virus scan info to the node
func (n *Node) SetScanData(info string, date time.Time) error {
func (n *Node) SetScanData(ctx context.Context, info string, date time.Time) error {
attribs := Attributes{}
attribs.SetString(prefixes.ScanStatusPrefix, info)
attribs.SetString(prefixes.ScanDatePrefix, date.Format(time.RFC3339Nano))
return n.SetXattrs(attribs, true)
return n.SetXattrsWithContext(ctx, attribs, true)
}
// ScanData returns scanning information of the node
func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) {
ti, _ := n.XattrString(prefixes.ScanDatePrefix)
func (n *Node) ScanData(ctx context.Context) (scanned bool, virus string, scantime time.Time) {
ti, _ := n.XattrString(ctx, prefixes.ScanDatePrefix)
if ti == "" {
return // not scanned yet
}
@@ -1218,7 +1233,7 @@ func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) {
return
}
i, err := n.XattrString(prefixes.ScanStatusPrefix)
i, err := n.XattrString(ctx, prefixes.ScanStatusPrefix)
if err != nil {
return
}
@@ -1231,12 +1246,12 @@ func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) {
// when creating a new file version. In such a case the function will
// reduce the used bytes by the old file size and then add the new size.
// If overwrite is false oldSize will be ignored.
var CheckQuota = func(spaceRoot *Node, overwrite bool, oldSize, newSize uint64) (quotaSufficient bool, err error) {
used, _ := spaceRoot.GetTreeSize()
var CheckQuota = func(ctx context.Context, spaceRoot *Node, overwrite bool, oldSize, newSize uint64) (quotaSufficient bool, err error) {
used, _ := spaceRoot.GetTreeSize(ctx)
if !enoughDiskSpace(spaceRoot.InternalPath(), newSize) {
return false, errtypes.InsufficientStorage("disk full")
}
quotaByteStr, _ := spaceRoot.XattrString(prefixes.QuotaAttr)
quotaByteStr, _ := spaceRoot.XattrString(ctx, prefixes.QuotaAttr)
switch quotaByteStr {
case "":
// if quota is not set, it means unlimited

View File

@@ -149,7 +149,7 @@ func (p *Permissions) assemblePermissions(ctx context.Context, n *Node, failOnTr
// continue with next segment
}
if cn, err = cn.Parent(); err != nil {
if cn, err = cn.Parent(ctx); err != nil {
// We get an error but get a parent, but can not read it from disk (eg. it has been deleted already)
if cn != nil {
return ap, errors.Wrap(err, "Decomposedfs: error getting parent for node "+cn.ID)

View File

@@ -19,6 +19,7 @@
package node
import (
"context"
"io"
"strconv"
@@ -49,45 +50,50 @@ func (md Attributes) SetInt64(key string, val int64) {
}
// SetXattrs sets multiple extended attributes on the write-through cache/node
func (n *Node) SetXattrs(attribs map[string][]byte, acquireLock bool) (err error) {
func (n *Node) SetXattrsWithContext(ctx context.Context, attribs map[string][]byte, acquireLock bool) (err error) {
if n.xattrsCache != nil {
for k, v := range attribs {
n.xattrsCache[k] = v
}
}
return n.lu.MetadataBackend().SetMultiple(n.InternalPath(), attribs, acquireLock)
return n.lu.MetadataBackend().SetMultiple(ctx, n.InternalPath(), attribs, acquireLock)
}
// SetXattrs sets multiple extended attributes on the write-through cache/node
func (n *Node) SetXattrs(attribs map[string][]byte, acquireLock bool) (err error) {
return n.SetXattrsWithContext(context.Background(), attribs, acquireLock)
}
// SetXattr sets an extended attribute on the write-through cache/node
func (n *Node) SetXattr(key string, val []byte) (err error) {
func (n *Node) SetXattr(ctx context.Context, key string, val []byte) (err error) {
if n.xattrsCache != nil {
n.xattrsCache[key] = val
}
return n.lu.MetadataBackend().Set(n.InternalPath(), key, val)
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, val)
}
// SetXattrString sets a string extended attribute on the write-through cache/node
func (n *Node) SetXattrString(key, val string) (err error) {
func (n *Node) SetXattrString(ctx context.Context, key, val string) (err error) {
if n.xattrsCache != nil {
n.xattrsCache[key] = []byte(val)
}
return n.lu.MetadataBackend().Set(n.InternalPath(), key, []byte(val))
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, []byte(val))
}
// RemoveXattr removes an extended attribute from the write-through cache/node
func (n *Node) RemoveXattr(key string) error {
func (n *Node) RemoveXattr(ctx context.Context, key string) error {
if n.xattrsCache != nil {
delete(n.xattrsCache, key)
}
return n.lu.MetadataBackend().Remove(n.InternalPath(), key)
return n.lu.MetadataBackend().Remove(ctx, n.InternalPath(), key)
}
// XattrsWithReader returns the extended attributes of the node. If the attributes have already
// been cached they are not read from disk again.
func (n *Node) XattrsWithReader(r io.Reader) (Attributes, error) {
func (n *Node) XattrsWithReader(ctx context.Context, r io.Reader) (Attributes, error) {
if n.ID == "" {
// Do not try to read the attribute of an empty node. The InternalPath points to the
// base nodes directory in this case.
@@ -101,9 +107,9 @@ func (n *Node) XattrsWithReader(r io.Reader) (Attributes, error) {
var attrs Attributes
var err error
if r != nil {
attrs, err = n.lu.MetadataBackend().AllWithLockedSource(n.InternalPath(), r)
attrs, err = n.lu.MetadataBackend().AllWithLockedSource(ctx, n.InternalPath(), r)
} else {
attrs, err = n.lu.MetadataBackend().All(n.InternalPath())
attrs, err = n.lu.MetadataBackend().All(ctx, n.InternalPath())
}
if err != nil {
return nil, err
@@ -115,13 +121,13 @@ func (n *Node) XattrsWithReader(r io.Reader) (Attributes, error) {
// Xattrs returns the extended attributes of the node. If the attributes have already
// been cached they are not read from disk again.
func (n *Node) Xattrs() (Attributes, error) {
return n.XattrsWithReader(nil)
func (n *Node) Xattrs(ctx context.Context) (Attributes, error) {
return n.XattrsWithReader(ctx, nil)
}
// Xattr returns an extended attribute of the node. If the attributes have already
// been cached it is not read from disk again.
func (n *Node) Xattr(key string) ([]byte, error) {
func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) {
if n.ID == "" {
// Do not try to read the attribute of an empty node. The InternalPath points to the
// base nodes directory in this case.
@@ -129,7 +135,7 @@ func (n *Node) Xattr(key string) ([]byte, error) {
}
if n.xattrsCache == nil {
attrs, err := n.lu.MetadataBackend().All(n.InternalPath())
attrs, err := n.lu.MetadataBackend().All(ctx, n.InternalPath())
if err != nil {
return []byte{}, err
}
@@ -144,8 +150,8 @@ func (n *Node) Xattr(key string) ([]byte, error) {
}
// XattrString returns the string representation of an attribute
func (n *Node) XattrString(key string) (string, error) {
b, err := n.Xattr(key)
func (n *Node) XattrString(ctx context.Context, key string) (string, error) {
b, err := n.Xattr(ctx, key)
if err != nil {
return "", err
}
@@ -153,8 +159,8 @@ func (n *Node) XattrString(key string) (string, error) {
}
// XattrInt32 returns the int32 representation of an attribute
func (n *Node) XattrInt32(key string) (int32, error) {
b, err := n.XattrString(key)
func (n *Node) XattrInt32(ctx context.Context, key string) (int32, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}
@@ -167,8 +173,8 @@ func (n *Node) XattrInt32(key string) (int32, error) {
}
// XattrInt64 returns the int64 representation of an attribute
func (n *Node) XattrInt64(key string) (int64, error) {
b, err := n.XattrString(key)
func (n *Node) XattrInt64(ctx context.Context, key string) (int64, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}
@@ -176,8 +182,8 @@ func (n *Node) XattrInt64(key string) (int64, error) {
}
// XattrUint64 returns the uint64 representation of an attribute
func (n *Node) XattrUint64(key string) (uint64, error) {
b, err := n.XattrString(key)
func (n *Node) XattrUint64(ctx context.Context, key string) (uint64, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}

View File

@@ -88,7 +88,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
}
origin := ""
attrs, err := fs.lu.MetadataBackend().All(originalPath)
attrs, err := fs.lu.MetadataBackend().All(ctx, originalPath)
if err != nil {
return items, err
}
@@ -111,7 +111,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
sublog.Error().Err(err).Msg("could not parse time format, ignoring")
}
nodeType := fs.lu.TypeFromPath(originalPath)
nodeType := fs.lu.TypeFromPath(ctx, originalPath)
if nodeType != provider.ResourceType_RESOURCE_TYPE_CONTAINER {
// this is the case when we want to directly list a file in the trashbin
blobsize, err := strconv.ParseInt(string(attrs[prefixes.BlobsizeAttr]), 10, 64)
@@ -154,16 +154,16 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
size := int64(0)
nodeType = fs.lu.TypeFromPath(resolvedChildPath)
nodeType = fs.lu.TypeFromPath(ctx, resolvedChildPath)
switch nodeType {
case provider.ResourceType_RESOURCE_TYPE_FILE:
size, err = fs.lu.ReadBlobSizeAttr(resolvedChildPath)
size, err = fs.lu.ReadBlobSizeAttr(ctx, resolvedChildPath)
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("invalid blob size, skipping")
continue
}
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
attr, err := fs.lu.MetadataBackend().Get(resolvedChildPath, prefixes.TreesizeAttr)
attr, err := fs.lu.MetadataBackend().Get(ctx, resolvedChildPath, prefixes.TreesizeAttr)
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping")
continue
@@ -235,13 +235,13 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p
continue
}
attrs, err := fs.lu.MetadataBackend().All(nodePath)
attrs, err := fs.lu.MetadataBackend().All(ctx, nodePath)
if err != nil {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not get extended attributes, skipping")
continue
}
nodeType := fs.lu.TypeFromPath(nodePath)
nodeType := fs.lu.TypeFromPath(ctx, nodePath)
if nodeType == provider.ResourceType_RESOURCE_TYPE_INVALID {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("invalid node type, skipping")
continue

View File

@@ -85,7 +85,7 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen
Key: n.ID + node.RevisionIDDelimiter + parts[1],
Mtime: uint64(mtime.Unix()),
}
blobSize, err := fs.lu.ReadBlobSizeAttr(items[i])
blobSize, err := fs.lu.ReadBlobSizeAttr(ctx, items[i])
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0")
}
@@ -147,11 +147,11 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe
contentPath := fs.lu.InternalPath(spaceID, revisionKey)
blobid, err := fs.lu.ReadBlobIDAttr(contentPath)
blobid, err := fs.lu.ReadBlobIDAttr(ctx, contentPath)
if err != nil {
return nil, errors.Wrapf(err, "Decomposedfs: could not read blob id of revision '%s' for node '%s'", n.ID, revisionKey)
}
blobsize, err := fs.lu.ReadBlobSizeAttr(contentPath)
blobsize, err := fs.lu.ReadBlobSizeAttr(ctx, contentPath)
if err != nil {
return nil, errors.Wrapf(err, "Decomposedfs: could not read blob size of revision '%s' for node '%s'", n.ID, revisionKey)
}
@@ -230,7 +230,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
}()
// copy blob metadata from node to new revision node
err = fs.lu.CopyMetadata(nodePath, newRevisionPath, func(attributeName string) bool {
err = fs.lu.CopyMetadata(ctx, nodePath, newRevisionPath, func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || // for checksums
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
@@ -249,7 +249,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
// copy blob metadata from restored revision to node
restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey)
err = fs.lu.CopyMetadata(restoredRevisionPath, nodePath, func(attributeName string) bool {
err = fs.lu.CopyMetadata(ctx, restoredRevisionPath, nodePath, func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
@@ -259,7 +259,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
return errtypes.InternalError("failed to copy blob xattrs to old revision to node")
}
revisionSize, err := fs.lu.MetadataBackend().GetInt64(restoredRevisionPath, prefixes.BlobsizeAttr)
revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, restoredRevisionPath, prefixes.BlobsizeAttr)
if err != nil {
return errtypes.InternalError("failed to read blob size xattr from old revision")
}

View File

@@ -38,6 +38,8 @@ func NewPermissions(item PermissionsChecker, permissionsSelector pool.Selectable
// AssemblePermissions is used to assemble file permissions
func (p Permissions) AssemblePermissions(ctx context.Context, n *node.Node) (provider.ResourcePermissions, error) {
ctx, span := tracer.Start(ctx, "AssemblePermissions")
defer span.End()
return p.item.AssemblePermissions(ctx, n)
}

View File

@@ -146,8 +146,7 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr
metadata.SetString(prefixes.SpaceAliasAttr, alias)
}
// Write node
if err := root.SetXattrs(metadata, true); err != nil {
if err := root.SetXattrsWithContext(ctx, metadata, true); err != nil {
return nil, err
}
@@ -693,7 +692,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up
}
if mapHasKey(metadata, prefixes.QuotaAttr) {
typ, err := spaceNode.SpaceRoot.Xattr(prefixes.SpaceTypeAttr)
typ, err := spaceNode.SpaceRoot.Xattr(ctx, prefixes.SpaceTypeAttr)
if err != nil {
return &provider.UpdateStorageSpaceResponse{
Status: &v1beta11.Status{
@@ -711,13 +710,13 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up
}
metadata[prefixes.TreeMTimeAttr] = []byte(time.Now().UTC().Format(time.RFC3339Nano))
err = spaceNode.SetXattrs(metadata, true)
err = spaceNode.SetXattrsWithContext(ctx, metadata, true)
if err != nil {
return nil, err
}
if restore {
if err := spaceNode.SetDTime(nil); err != nil {
if err := spaceNode.SetDTime(ctx, nil); err != nil {
return nil, err
}
}
@@ -752,7 +751,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
return err
}
st, err := n.SpaceRoot.XattrString(prefixes.SpaceTypeAttr)
st, err := n.SpaceRoot.XattrString(ctx, prefixes.SpaceTypeAttr)
if err != nil {
return errtypes.InternalError(fmt.Sprintf("space %s does not have a spacetype, possible corrupt decompsedfs", n.ID))
}
@@ -761,11 +760,11 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
return err
}
if purge {
if !n.IsDisabled() {
if !n.IsDisabled(ctx) {
return errtypes.NewErrtypeFromStatus(status.NewInvalid(ctx, "can't purge enabled space"))
}
spaceType, err := n.XattrString(prefixes.SpaceTypeAttr)
spaceType, err := n.XattrString(ctx, prefixes.SpaceTypeAttr)
if err != nil {
return err
}
@@ -792,7 +791,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
// mark as disabled by writing a dtime attribute
dtime := time.Now()
return n.SetDTime(&dtime)
return n.SetDTime(ctx, &dtime)
}
func (fs *Decomposedfs) updateIndexes(ctx context.Context, grantee *provider.Grantee, spaceType, spaceID string) error {
@@ -905,7 +904,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
return nil, errtypes.NotFound(fmt.Sprintf("space %s not found", n.ID))
}
if n.SpaceRoot.IsDisabled() {
if n.SpaceRoot.IsDisabled(ctx) {
rp, err := fs.p.AssemblePermissions(ctx, n)
if err != nil || !IsManager(rp) {
return nil, errtypes.PermissionDenied(fmt.Sprintf("user %s is not allowed to list deleted spaces %s", user.Username, n.ID))
@@ -916,7 +915,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
var err error
// TODO apply more filters
var sname string
if sname, err = n.SpaceRoot.XattrString(prefixes.SpaceNameAttr); err != nil {
if sname, err = n.SpaceRoot.XattrString(ctx, prefixes.SpaceNameAttr); err != nil {
// FIXME: Is that a severe problem?
appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a name attribute")
}
@@ -1021,12 +1020,12 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
// Mtime is set either as node.tmtime or as fi.mtime below
}
space.SpaceType, err = n.SpaceRoot.XattrString(prefixes.SpaceTypeAttr)
space.SpaceType, err = n.SpaceRoot.XattrString(ctx, prefixes.SpaceTypeAttr)
if err != nil {
appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a type attribute")
}
if n.SpaceRoot.IsDisabled() {
if n.SpaceRoot.IsDisabled(ctx) {
space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "trashed", "trashed")
}
@@ -1039,7 +1038,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
// we set the space mtime to the root item mtime
// override the stat mtime with a tmtime if it is present
var tmtime time.Time
if tmt, err := n.GetTMTime(); err == nil {
if tmt, err := n.GetTMTime(ctx); err == nil {
tmtime = tmt
un := tmt.UnixNano()
space.Mtime = &types.Timestamp{
@@ -1065,7 +1064,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
Value: []byte(etag),
}
spaceAttributes, err := n.SpaceRoot.Xattrs()
spaceAttributes, err := n.SpaceRoot.Xattrs(ctx)
if err != nil {
return nil, err
}

View File

@@ -47,9 +47,17 @@ import (
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/rs/zerolog/log"
"go-micro.dev/v4/store"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
}
//go:generate make --no-print-directory -C ../../../../.. mockery NAME=Blobstore
// Blobstore defines an interface for storing blobs in a blobstore
@@ -68,9 +76,9 @@ type PathLookup interface {
InternalPath(spaceID, nodeID string) string
Path(ctx context.Context, n *node.Node, hasPermission node.PermissionFunc) (path string, err error)
MetadataBackend() metadata.Backend
ReadBlobSizeAttr(path string) (int64, error)
ReadBlobIDAttr(path string) (string, error)
TypeFromPath(path string) provider.ResourceType
ReadBlobSizeAttr(ctx context.Context, path string) (int64, error)
ReadBlobIDAttr(ctx context.Context, path string) (string, error)
TypeFromPath(ctx context.Context, path string) provider.ResourceType
}
// Tree manages a hierarchical tree
@@ -128,10 +136,10 @@ func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) {
}
// TouchFile creates a new empty file
func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool) error {
func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, mtime string) error {
if n.Exists {
if markprocessing {
return n.SetXattr(prefixes.StatusPrefix, []byte(node.ProcessingStatus))
return n.SetXattr(ctx, prefixes.StatusPrefix, []byte(node.ProcessingStatus))
}
return errtypes.AlreadyExists(n.ID)
@@ -151,11 +159,16 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool)
return errors.Wrap(err, "Decomposedfs: error creating node")
}
attributes := n.NodeMetadata()
attributes := n.NodeMetadata(ctx)
if markprocessing {
attributes[prefixes.StatusPrefix] = []byte(node.ProcessingStatus)
}
err = n.SetXattrs(attributes, true)
if mtime != "" {
if err := n.SetMtimeString(mtime); err != nil {
return errors.Wrap(err, "Decomposedfs: could not set mtime")
}
}
err = n.SetXattrsWithContext(ctx, attributes, true)
if err != nil {
return err
}
@@ -181,6 +194,8 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool)
// CreateDir creates a new directory entry in the tree
func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
ctx, span := tracer.Start(ctx, "CreateDir")
defer span.End()
if n.Exists {
return errtypes.AlreadyExists(n.ID) // path?
}
@@ -198,7 +213,9 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
// make child appear in listings
relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2))
ctx, subspan := tracer.Start(ctx, "os.Symlink")
err = os.Symlink(relativeNodePath, filepath.Join(n.ParentPath(), n.Name))
subspan.End()
if err != nil {
// no better way to check unfortunately
if !strings.Contains(err.Error(), "file exists") {
@@ -206,7 +223,9 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
}
// try to remove the node
ctx, subspan = tracer.Start(ctx, "os.RemoveAll")
e := os.RemoveAll(n.InternalPath())
subspan.End()
if e != nil {
appctx.GetLogger(ctx).Debug().Err(e).Msg("cannot delete node")
}
@@ -256,7 +275,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
}
// update name attribute
if err := oldNode.SetXattrString(prefixes.NameAttr, newNode.Name); err != nil {
if err := oldNode.SetXattrString(ctx, prefixes.NameAttr, newNode.Name); err != nil {
return errors.Wrap(err, "Decomposedfs: could not set name attribute")
}
@@ -279,14 +298,14 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
attribs := node.Attributes{}
attribs.SetString(prefixes.ParentidAttr, newNode.ParentID)
attribs.SetString(prefixes.NameAttr, newNode.Name)
if err := oldNode.SetXattrs(attribs, true); err != nil {
if err := oldNode.SetXattrsWithContext(ctx, attribs, true); err != nil {
return errors.Wrap(err, "Decomposedfs: could not update old node attributes")
}
// the size diff is the current treesize or blobsize of the old/source node
var sizeDiff int64
if oldNode.IsDir() {
treeSize, err := oldNode.GetTreeSize()
if oldNode.IsDir(ctx) {
treeSize, err := oldNode.GetTreeSize(ctx)
if err != nil {
return err
}
@@ -310,7 +329,9 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
return nil
}
func readChildNodeFromLink(path string) (string, error) {
func readChildNodeFromLink(ctx context.Context, path string) (string, error) {
_, span := tracer.Start(ctx, "readChildNodeFromLink")
defer span.End()
link, err := os.Readlink(path)
if err != nil {
return "", err
@@ -322,8 +343,13 @@ func readChildNodeFromLink(path string) (string, error) {
// ListFolder lists the content of a folder node
func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) {
ctx, span := tracer.Start(ctx, "ListFolder")
defer span.End()
dir := n.InternalPath()
_, subspan := tracer.Start(ctx, "os.Open")
f, err := os.Open(dir)
subspan.End()
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, errtypes.NotFound(dir)
@@ -332,7 +358,9 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
}
defer f.Close()
_, subspan = tracer.Start(ctx, "f.Readdirnames")
names, err := f.Readdirnames(0)
subspan.End()
if err != nil {
return nil, err
}
@@ -364,13 +392,13 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
g.Go(func() error {
for name := range work {
path := filepath.Join(dir, name)
nodeID := getNodeIDFromCache(path, t.idCache)
nodeID := getNodeIDFromCache(ctx, path, t.idCache)
if nodeID == "" {
nodeID, err = readChildNodeFromLink(path)
nodeID, err = readChildNodeFromLink(ctx, path)
if err != nil {
return err
}
err = storeNodeIDInCache(path, nodeID, t.idCache)
err = storeNodeIDInCache(ctx, path, nodeID, t.idCache)
if err != nil {
return err
}
@@ -435,13 +463,13 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
// set origin location in metadata
nodePath := n.InternalPath()
if err := n.SetXattrString(prefixes.TrashOriginAttr, origin); err != nil {
if err := n.SetXattrString(ctx, prefixes.TrashOriginAttr, origin); err != nil {
return err
}
var sizeDiff int64
if n.IsDir() {
treesize, err := n.GetTreeSize()
if n.IsDir(ctx) {
treesize, err := n.GetTreeSize(ctx)
if err != nil {
return err // TODO calculate treesize if it is not set
}
@@ -456,7 +484,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
trashLink := filepath.Join(t.options.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2))
if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil {
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return err
}
@@ -469,7 +497,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink)
if err != nil {
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return
}
@@ -482,12 +510,12 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
// To roll back changes
// TODO remove symlink
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return
}
err = t.lookup.MetadataBackend().Rename(nodePath, trashPath)
if err != nil {
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
_ = os.Rename(trashPath, nodePath)
return
}
@@ -501,7 +529,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
// TODO revert the rename
// TODO remove symlink
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return
}
@@ -531,7 +559,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
return nil, nil, nil, err
}
parent, err := targetNode.Parent()
parent, err := targetNode.Parent(ctx)
if err != nil {
return nil, nil, nil, err
}
@@ -571,7 +599,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
}
if err = recycleNode.SetXattrs(attrs, true); err != nil {
if err = recycleNode.SetXattrsWithContext(ctx, attrs, true); err != nil {
return errors.Wrap(err, "Decomposedfs: could not update recycle node")
}
@@ -589,8 +617,8 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
}
var sizeDiff int64
if recycleNode.IsDir() {
treeSize, err := recycleNode.GetTreeSize()
if recycleNode.IsDir(ctx) {
treeSize, err := recycleNode.GetTreeSize(ctx)
if err != nil {
return err
}
@@ -611,7 +639,7 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa
}
fn := func() error {
if err := t.removeNode(deletedNodePath, rn); err != nil {
if err := t.removeNode(ctx, deletedNodePath, rn); err != nil {
return err
}
@@ -635,7 +663,7 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa
return rn, fn, nil
}
func (t *Tree) removeNode(path string, n *node.Node) error {
func (t *Tree) removeNode(ctx context.Context, path string, n *node.Node) error {
// delete the actual node
if err := utils.RemoveItem(path); err != nil {
log.Error().Err(err).Str("path", path).Msg("error purging node")
@@ -666,7 +694,7 @@ func (t *Tree) removeNode(path string, n *node.Node) error {
continue
}
bID, err := t.lookup.ReadBlobIDAttr(rev)
bID, err := t.lookup.ReadBlobIDAttr(ctx, rev)
if err != nil {
log.Error().Err(err).Str("revision", rev).Msg("error reading blobid attribute")
return err
@@ -691,12 +719,15 @@ func (t *Tree) removeNode(path string, n *node.Node) error {
// Propagate propagates changes to the root of the tree
func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err error) {
ctx, span := tracer.Start(ctx, "Propagate")
defer span.End()
sublog := appctx.GetLogger(ctx).With().
Str("method", "tree.Propagate").
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Int64("sizeDiff", sizeDiff).
Logger()
if !t.options.TreeTimeAccounting && (!t.options.TreeSizeAccounting || sizeDiff == 0) {
// no propagation enabled
sublog.Debug().Msg("propagation disabled or nothing to propagate")
@@ -718,6 +749,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
var f *lockedfile.File
// lock parent before reading treesize or tree time
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
var parentFilename string
switch t.lookup.MetadataBackend().(type) {
case metadata.MessagePackBackend:
@@ -729,6 +761,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
parentFilename = n.ParentPath() + filelocks.LockFileSuffix
f, err = lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
}
subspan.End()
if err != nil {
sublog.Error().Err(err).
Str("parent filename", parentFilename).
@@ -744,14 +777,14 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
}
}()
if n, err = n.ParentWithReader(f); err != nil {
if n, err = n.ParentWithReader(ctx, f); err != nil {
sublog.Error().Err(err).
Msg("Propagation failed. Could not read parent node.")
return err
}
// TODO none, sync and async?
if !n.HasPropagation() {
if !n.HasPropagation(ctx) {
sublog.Debug().Str("attr", prefixes.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating")
// if the attribute is not set treat it as false / none / no propagation
return nil
@@ -764,7 +797,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
updateSyncTime := false
var tmTime time.Time
tmTime, err = n.GetTMTime()
tmTime, err = n.GetTMTime(ctx)
switch {
case err != nil:
// missing attribute, or invalid format, overwrite
@@ -798,7 +831,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
var newSize uint64
// read treesize
treeSize, err := n.GetTreeSize()
treeSize, err := n.GetTreeSize(ctx)
switch {
case metadata.IsAttrUnset(err):
// fallback to calculating the treesize
@@ -829,13 +862,15 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
sublog.Debug().Uint64("newSize", newSize).Msg("updated treesize of parent node")
}
if err = n.SetXattrs(attrs, false); err != nil {
if err = n.SetXattrsWithContext(ctx, attrs, false); err != nil {
sublog.Error().Err(err).Msg("Failed to update extend attributes of parent node")
return err
}
// Release node lock early, ignore already closed error
_, subspan = tracer.Start(ctx, "f.Close")
cerr := f.Close()
subspan.End()
if cerr != nil && !errors.Is(cerr, os.ErrClosed) {
sublog.Error().Err(cerr).Msg("Failed to close parent node and release lock")
return cerr
@@ -849,6 +884,8 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
}
func (t *Tree) calculateTreeSize(ctx context.Context, childrenPath string) (uint64, error) {
ctx, span := tracer.Start(ctx, "calculateTreeSize")
defer span.End()
var size uint64
f, err := os.Open(childrenPath)
@@ -872,7 +909,7 @@ func (t *Tree) calculateTreeSize(ctx context.Context, childrenPath string) (uint
}
// raw read of the attributes for performance reasons
attribs, err := t.lookup.MetadataBackend().All(resolvedPath)
attribs, err := t.lookup.MetadataBackend().All(ctx, resolvedPath)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read attributes of child entry")
continue // continue after an error
@@ -920,18 +957,20 @@ func (t *Tree) DeleteBlob(node *node.Node) error {
// TODO check if node exists?
func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
ctx, span := tracer.Start(ctx, "createDirNode")
defer span.End()
// create a directory node
nodePath := n.InternalPath()
if err := os.MkdirAll(nodePath, 0700); err != nil {
return errors.Wrap(err, "Decomposedfs: error creating node")
}
attributes := n.NodeMetadata()
attributes := n.NodeMetadata(ctx)
attributes[prefixes.TreesizeAttr] = []byte("0") // initialize as empty, TODO why bother? if it is not set we could treat it as 0?
if t.options.TreeTimeAccounting || t.options.TreeSizeAccounting {
attributes[prefixes.PropagationAttr] = []byte("1") // mark the node for propagation
}
return n.SetXattrs(attributes, true)
return n.SetXattrsWithContext(ctx, attributes, true)
}
var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`)
@@ -962,32 +1001,32 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
if err != nil {
return
}
recycleNode.SetType(t.lookup.TypeFromPath(deletedNodePath))
recycleNode.SetType(t.lookup.TypeFromPath(ctx, deletedNodePath))
var attrBytes []byte
if recycleNode.Type() == provider.ResourceType_RESOURCE_TYPE_FILE {
if recycleNode.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
// lookup blobID in extended attributes
if attrBytes, err = backend.Get(deletedNodePath, prefixes.BlobIDAttr); err == nil {
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.BlobIDAttr); err == nil {
recycleNode.BlobID = string(attrBytes)
} else {
return
}
// lookup blobSize in extended attributes
if recycleNode.Blobsize, err = backend.GetInt64(deletedNodePath, prefixes.BlobsizeAttr); err != nil {
if recycleNode.Blobsize, err = backend.GetInt64(ctx, deletedNodePath, prefixes.BlobsizeAttr); err != nil {
return
}
}
// lookup parent id in extended attributes
if attrBytes, err = backend.Get(deletedNodePath, prefixes.ParentidAttr); err == nil {
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.ParentidAttr); err == nil {
recycleNode.ParentID = string(attrBytes)
} else {
return
}
// lookup name in extended attributes
if attrBytes, err = backend.Get(deletedNodePath, prefixes.NameAttr); err == nil {
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.NameAttr); err == nil {
recycleNode.Name = string(attrBytes)
} else {
return
@@ -997,7 +1036,7 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
origin = "/"
// lookup origin path in extended attributes
if attrBytes, err = backend.Get(resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
if attrBytes, err = backend.Get(ctx, resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
origin = filepath.Join(string(attrBytes), path)
} else {
log.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /")
@@ -1006,7 +1045,9 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
return
}
func getNodeIDFromCache(path string, cache store.Store) string {
func getNodeIDFromCache(ctx context.Context, path string, cache store.Store) string {
_, span := tracer.Start(ctx, "getNodeIDFromCache")
defer span.End()
recs, err := cache.Read(path)
if err == nil && len(recs) > 0 {
return string(recs[0].Value)
@@ -1014,7 +1055,9 @@ func getNodeIDFromCache(path string, cache store.Store) string {
return ""
}
func storeNodeIDInCache(path string, nodeID string, cache store.Store) error {
func storeNodeIDInCache(ctx context.Context, path string, nodeID string, cache store.Store) error {
_, span := tracer.Start(ctx, "storeNodeIDInCache")
defer span.End()
return cache.Write(&store.Record{
Key: path,
Value: []byte(nodeID),

View File

@@ -196,7 +196,7 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere
log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename")
_, err = node.CheckQuota(n.SpaceRoot, n.Exists, uint64(n.Blobsize), uint64(info.Size))
_, err = node.CheckQuota(ctx, n.SpaceRoot, n.Exists, uint64(n.Blobsize), uint64(info.Size))
if err != nil {
return nil, err
}

View File

@@ -83,7 +83,7 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p
log.Debug().Interface("info", info).Interface("node", n).Msg("Decomposedfs: resolved filename")
// the parent owner will become the new owner
parent, perr := n.Parent()
parent, perr := n.Parent(ctx)
if perr != nil {
return nil, errors.Wrap(perr, "Decomposedfs: error getting parent "+n.ParentID)
}
@@ -117,7 +117,7 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p
}
// are we trying to overwriting a folder with a file?
if n.Exists && n.IsDir() {
if n.Exists && n.IsDir(ctx) {
return nil, errtypes.PreconditionFailed("resource is not a file")
}
@@ -294,7 +294,7 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node,
initAttrs.SetString(prefixes.StatusPrefix, node.ProcessingStatus+upload.Info.ID)
// update node metadata with new blobid etc
err = n.SetXattrs(initAttrs, false)
err = n.SetXattrsWithContext(context.TODO(), initAttrs, false)
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: could not write metadata")
}
@@ -347,7 +347,7 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File,
// nothing to do
}
if _, err := node.CheckQuota(n.SpaceRoot, false, 0, fsize); err != nil {
if _, err := node.CheckQuota(upload.Ctx, n.SpaceRoot, false, 0, fsize); err != nil {
return f, err
}
@@ -374,11 +374,11 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File,
func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint64) (*lockedfile.File, error) {
old, _ := node.ReadNode(upload.Ctx, upload.lu, spaceID, n.ID, false, nil, false)
if _, err := node.CheckQuota(n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil {
if _, err := node.CheckQuota(upload.Ctx, n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil {
return nil, err
}
tmtime, err := old.GetTMTime()
tmtime, err := old.GetTMTime(upload.Ctx)
if err != nil {
return nil, err
}
@@ -414,7 +414,7 @@ func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint
}
// copy blob metadata to version node
if err := upload.lu.CopyMetadataWithSourceLock(targetPath, upload.versionsPath, func(attributeName string) bool {
if err := upload.lu.CopyMetadataWithSourceLock(upload.Ctx, targetPath, upload.versionsPath, func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||

View File

@@ -129,7 +129,7 @@ func Cleanup(upload *Upload, failure bool, keepUpload bool) {
// unset processing status
if upload.Node != nil { // node can be nil when there was an error before it was created (eg. checksum-mismatch)
if err := upload.Node.UnmarkProcessing(upload.Info.ID); err != nil {
if err := upload.Node.UnmarkProcessing(upload.Ctx, upload.Info.ID); err != nil {
upload.log.Info().Str("path", upload.Node.InternalPath()).Err(err).Msg("unmarking processing failed")
}
}
@@ -370,7 +370,7 @@ func (upload *Upload) cleanup(cleanNode, cleanBin, cleanInfo bool) {
upload.Node = nil
default:
if err := upload.lu.CopyMetadata(p, upload.Node.InternalPath(), func(attributeName string) bool {
if err := upload.lu.CopyMetadata(upload.Ctx, p, upload.Node.InternalPath(), func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||

View File

@@ -21,6 +21,7 @@ package eosfs
import (
"context"
"database/sql"
b64 "encoding/base64"
"encoding/json"
"fmt"
"io"
@@ -33,8 +34,6 @@ import (
"strings"
"time"
b64 "encoding/base64"
"github.com/bluele/gcache"
grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
@@ -1673,7 +1672,7 @@ func (fs *eosfs) CreateDir(ctx context.Context, ref *provider.Reference) error {
}
// TouchFile as defined in the storage.FS interface
func (fs *eosfs) TouchFile(ctx context.Context, ref *provider.Reference, _ bool) error {
func (fs *eosfs) TouchFile(ctx context.Context, ref *provider.Reference, _ bool, _ string) error {
log := appctx.GetLogger(ctx)
fn, auth, err := fs.resolveRefAndGetAuth(ctx, ref)

View File

@@ -806,7 +806,7 @@ func (fs *localfs) CreateDir(ctx context.Context, ref *provider.Reference) error
}
// TouchFile as defined in the storage.FS interface
func (fs *localfs) TouchFile(ctx context.Context, ref *provider.Reference, _ bool) error {
func (fs *localfs) TouchFile(ctx context.Context, ref *provider.Reference, _ bool, _ string) error {
return fmt.Errorf("unimplemented: TouchFile")
}

View File

@@ -33,26 +33,24 @@ import (
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var (
// Propagator is the default Reva propagator.
Propagator = propagation.NewCompositeTextMapPropagator(propagation.Baggage{}, propagation.TraceContext{})
defaultProvider = revaDefaultTracerProvider{
provider: trace.NewNoopTracerProvider(),
}
defaultProvider = revaDefaultTracerProvider{}
)
type revaDefaultTracerProvider struct {
mutex sync.RWMutex
initialized bool
provider trace.TracerProvider
}
// NewTracerProvider returns a new TracerProvider, configure for the specified service
@@ -86,9 +84,7 @@ func NewTracerProvider(opts ...Option) trace.TracerProvider {
// SetDefaultTracerProvider sets the default trace provider
func SetDefaultTracerProvider(tp trace.TracerProvider) {
defaultProvider.mutex.Lock()
defer defaultProvider.mutex.Unlock()
defaultProvider.provider = tp
otel.SetTracerProvider(tp)
defaultProvider.initialized = true
}
@@ -99,14 +95,13 @@ func InitDefaultTracerProvider(collector, endpoint string) {
defaultProvider.mutex.Lock()
defer defaultProvider.mutex.Unlock()
if !defaultProvider.initialized {
defaultProvider.provider = getJaegerTracerProvider(Options{
SetDefaultTracerProvider(getJaegerTracerProvider(Options{
Enabled: true,
Collector: collector,
Endpoint: endpoint,
ServiceName: "reva default jaeger provider",
})
}))
}
defaultProvider.initialized = true
}
// DefaultProvider returns the "global" default TracerProvider
@@ -114,7 +109,7 @@ func InitDefaultTracerProvider(collector, endpoint string) {
func DefaultProvider() trace.TracerProvider {
defaultProvider.mutex.RLock()
defer defaultProvider.mutex.RUnlock()
return defaultProvider.provider
return otel.GetTracerProvider()
}
// getJaegerTracerProvider returns a new TracerProvider, configure for the specified service

View File

File diff suppressed because it is too large Load Diff

20
vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
// patterns for OpenTelemetry things. This package represents the conventions
// as of the v1.20.0 version of the OpenTelemetry specification.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"

View File

@@ -0,0 +1,199 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated from semantic convention specification. DO NOT EDIT.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
import "go.opentelemetry.io/otel/attribute"
// This semantic convention defines the attributes used to represent a feature
// flag evaluation as an event.
const (
// FeatureFlagKeyKey is the attribute Key conforming to the
// "feature_flag.key" semantic conventions. It represents the unique
// identifier of the feature flag.
//
// Type: string
// RequirementLevel: Required
// Stability: stable
// Examples: 'logo-color'
FeatureFlagKeyKey = attribute.Key("feature_flag.key")
// FeatureFlagProviderNameKey is the attribute Key conforming to the
// "feature_flag.provider_name" semantic conventions. It represents the
// name of the service provider that performs the flag evaluation.
//
// Type: string
// RequirementLevel: Recommended
// Stability: stable
// Examples: 'Flag Manager'
FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
// FeatureFlagVariantKey is the attribute Key conforming to the
// "feature_flag.variant" semantic conventions. It represents the sHOULD be
// a semantic identifier for a value. If one is unavailable, a stringified
// version of the value can be used.
//
// Type: string
// RequirementLevel: Recommended
// Stability: stable
// Examples: 'red', 'true', 'on'
// Note: A semantic identifier, commonly referred to as a variant, provides
// a means
// for referring to a value without including the value itself. This can
// provide additional context for understanding the meaning behind a value.
// For example, the variant `red` maybe be used for the value `#c05543`.
//
// A stringified version of the value can be used in situations where a
// semantic identifier is unavailable. String representation of the value
// should be determined by the implementer.
FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
)
// FeatureFlagKey returns an attribute KeyValue conforming to the
// "feature_flag.key" semantic conventions. It represents the unique identifier
// of the feature flag.
func FeatureFlagKey(val string) attribute.KeyValue {
return FeatureFlagKeyKey.String(val)
}
// FeatureFlagProviderName returns an attribute KeyValue conforming to the
// "feature_flag.provider_name" semantic conventions. It represents the name of
// the service provider that performs the flag evaluation.
func FeatureFlagProviderName(val string) attribute.KeyValue {
return FeatureFlagProviderNameKey.String(val)
}
// FeatureFlagVariant returns an attribute KeyValue conforming to the
// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
// semantic identifier for a value. If one is unavailable, a stringified
// version of the value can be used.
func FeatureFlagVariant(val string) attribute.KeyValue {
return FeatureFlagVariantKey.String(val)
}
// RPC received/sent message.
const (
// MessageTypeKey is the attribute Key conforming to the "message.type"
// semantic conventions. It represents the whether this is a received or
// sent message.
//
// Type: Enum
// RequirementLevel: Optional
// Stability: stable
MessageTypeKey = attribute.Key("message.type")
// MessageIDKey is the attribute Key conforming to the "message.id"
// semantic conventions. It represents the mUST be calculated as two
// different counters starting from `1` one for sent messages and one for
// received message.
//
// Type: int
// RequirementLevel: Optional
// Stability: stable
// Note: This way we guarantee that the values will be consistent between
// different implementations.
MessageIDKey = attribute.Key("message.id")
// MessageCompressedSizeKey is the attribute Key conforming to the
// "message.compressed_size" semantic conventions. It represents the
// compressed size of the message in bytes.
//
// Type: int
// RequirementLevel: Optional
// Stability: stable
MessageCompressedSizeKey = attribute.Key("message.compressed_size")
// MessageUncompressedSizeKey is the attribute Key conforming to the
// "message.uncompressed_size" semantic conventions. It represents the
// uncompressed size of the message in bytes.
//
// Type: int
// RequirementLevel: Optional
// Stability: stable
MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
)
var (
// sent
MessageTypeSent = MessageTypeKey.String("SENT")
// received
MessageTypeReceived = MessageTypeKey.String("RECEIVED")
)
// MessageID returns an attribute KeyValue conforming to the "message.id"
// semantic conventions. It represents the mUST be calculated as two different
// counters starting from `1` one for sent messages and one for received
// message.
func MessageID(val int) attribute.KeyValue {
return MessageIDKey.Int(val)
}
// MessageCompressedSize returns an attribute KeyValue conforming to the
// "message.compressed_size" semantic conventions. It represents the compressed
// size of the message in bytes.
func MessageCompressedSize(val int) attribute.KeyValue {
return MessageCompressedSizeKey.Int(val)
}
// MessageUncompressedSize returns an attribute KeyValue conforming to the
// "message.uncompressed_size" semantic conventions. It represents the
// uncompressed size of the message in bytes.
func MessageUncompressedSize(val int) attribute.KeyValue {
return MessageUncompressedSizeKey.Int(val)
}
// The attributes used to report a single exception associated with a span.
const (
// ExceptionEscapedKey is the attribute Key conforming to the
// "exception.escaped" semantic conventions. It represents the sHOULD be
// set to true if the exception event is recorded at a point where it is
// known that the exception is escaping the scope of the span.
//
// Type: boolean
// RequirementLevel: Optional
// Stability: stable
// Note: An exception is considered to have escaped (or left) the scope of
// a span,
// if that span is ended while the exception is still logically "in
// flight".
// This may be actually "in flight" in some languages (e.g. if the
// exception
// is passed to a Context manager's `__exit__` method in Python) but will
// usually be caught at the point of recording the exception in most
// languages.
//
// It is usually not possible to determine at the point where an exception
// is thrown
// whether it will escape the scope of a span.
// However, it is trivial to know that an exception
// will escape, if one checks for an active exception just before ending
// the span,
// as done in the [example above](#recording-an-exception).
//
// It follows that an exception may still escape the scope of the span
// even if the `exception.escaped` attribute was not set or set to false,
// since the event might have been recorded at a time where it was not
// clear whether the exception will escape.
ExceptionEscapedKey = attribute.Key("exception.escaped")
)
// ExceptionEscaped returns an attribute KeyValue conforming to the
// "exception.escaped" semantic conventions. It represents the sHOULD be set to
// true if the exception event is recorded at a point where it is known that
// the exception is escaping the scope of the span.
func ExceptionEscaped(val bool) attribute.KeyValue {
return ExceptionEscapedKey.Bool(val)
}

View File

@@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
ExceptionEventName = "exception"
)

View File

@@ -0,0 +1,21 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
// HTTP scheme attributes.
var (
HTTPSchemeHTTP = HTTPSchemeKey.String("http")
HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
)

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"

2610
vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go generated vendored Normal file
View File

File diff suppressed because it is too large Load Diff

4
vendor/modules.txt vendored
View File

@@ -352,7 +352,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
# github.com/cs3org/reva/v2 v2.14.1-0.20230623085734-919a9585f147 => github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108
# github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806
## explicit; go 1.20
github.com/cs3org/reva/v2/cmd/revad/internal/grace
github.com/cs3org/reva/v2/cmd/revad/runtime
@@ -1841,6 +1841,7 @@ go.opentelemetry.io/otel/semconv/internal
go.opentelemetry.io/otel/semconv/v1.10.0
go.opentelemetry.io/otel/semconv/v1.12.0
go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.4.0
# go.opentelemetry.io/otel/exporters/jaeger v1.15.1
## explicit; go 1.19
@@ -2195,4 +2196,3 @@ stash.kopano.io/kgol/oidc-go
## explicit; go 1.13
stash.kopano.io/kgol/rndm
# github.com/cs3org/go-cs3apis => github.com/2403905/go-cs3apis v0.0.0-20230517122726-727045414fd1
# github.com/cs3org/reva/v2 => github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108