Merge pull request #1478 from opencloud-eu/fix-graceful-shutdown

Fix graceful shutdown
This commit is contained in:
Jörn Friedrich Dreyer
2025-09-12 13:00:34 +02:00
committed by GitHub
123 changed files with 5841 additions and 2904 deletions

11
go.mod
View File

@@ -56,7 +56,7 @@ require (
github.com/mitchellh/mapstructure v1.5.0
github.com/mna/pigeon v1.3.0
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
github.com/nats-io/nats-server/v2 v2.11.8
github.com/nats-io/nats-server/v2 v2.11.9
github.com/nats-io/nats.go v1.45.0
github.com/oklog/run v1.2.0
github.com/olekukonko/tablewriter v1.0.9
@@ -65,7 +65,7 @@ require (
github.com/onsi/gomega v1.38.2
github.com/open-policy-agent/opa v1.6.0
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76
github.com/opencloud-eu/reva/v2 v2.37.1-0.20250909074412-f272eeaa2674
github.com/opencloud-eu/reva/v2 v2.37.1-0.20250911072739-9e673e021dca
github.com/opensearch-project/opensearch-go/v4 v4.5.0
github.com/orcaman/concurrent-map v1.0.0
github.com/pkg/errors v0.9.1
@@ -134,6 +134,7 @@ require (
github.com/ajg/form v1.5.1 // indirect
github.com/alexedwards/argon2id v1.0.0 // indirect
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect
github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@@ -323,7 +324,7 @@ require (
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/sethvargo/go-diceware v0.5.0 // indirect
github.com/sethvargo/go-password v0.3.1 // indirect
github.com/shamaton/msgpack/v2 v2.2.3 // indirect
github.com/shamaton/msgpack/v2 v2.3.1 // indirect
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
@@ -363,8 +364,8 @@ require (
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.27.0 // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/time v0.13.0 // indirect
golang.org/x/tools v0.36.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect

20
go.sum
View File

@@ -869,8 +869,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8=
github.com/nats-io/jwt/v2 v2.7.4 h1:jXFuDDxs/GQjGDZGhNgH4tXzSUK6WQi2rsj4xmsNOtI=
github.com/nats-io/jwt/v2 v2.7.4/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA=
github.com/nats-io/nats-server/v2 v2.11.8 h1:7T1wwwd/SKTDWW47KGguENE7Wa8CpHxLD1imet1iW7c=
github.com/nats-io/nats-server/v2 v2.11.8/go.mod h1:C2zlzMA8PpiMMxeXSz7FkU3V+J+H15kiqrkvgtn2kS8=
github.com/nats-io/nats-server/v2 v2.11.9 h1:k7nzHZjUf51W1b08xiQih63Rdxh0yr5O4K892Mx5gQA=
github.com/nats-io/nats-server/v2 v2.11.9/go.mod h1:1MQgsAQX1tVjpf3Yzrk3x2pzdsZiNL/TVP3Amhp3CR8=
github.com/nats-io/nats.go v1.45.0 h1:/wGPbnYXDM0pLKFjZTX+2JOw9TQPoIgTFrUaH97giwA=
github.com/nats-io/nats.go v1.45.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
@@ -916,8 +916,8 @@ github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-202505121527
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a/go.mod h1:pjcozWijkNPbEtX5SIQaxEW/h8VAVZYTLx+70bmB3LY=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76 h1:vD/EdfDUrv4omSFjrinT8Mvf+8D7f9g4vgQ2oiDrVUI=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76/go.mod h1:pzatilMEHZFT3qV7C/X3MqOa3NlRQuYhlRhZTL+hN6Q=
github.com/opencloud-eu/reva/v2 v2.37.1-0.20250909074412-f272eeaa2674 h1:35OSsH9o4GAL9LX+BckhYlpp+EqJ0Bz69MXz+5gpaV4=
github.com/opencloud-eu/reva/v2 v2.37.1-0.20250909074412-f272eeaa2674/go.mod h1:eu0fTK68n+drdu7eT0u1QODDH3VHMugAdrS6G5VhMwA=
github.com/opencloud-eu/reva/v2 v2.37.1-0.20250911072739-9e673e021dca h1:0RrrnhJAlyVmzKfwoG8UvNCNkLa2gxDh2qriv09IfW0=
github.com/opencloud-eu/reva/v2 v2.37.1-0.20250911072739-9e673e021dca/go.mod h1:BFtLyq/7VXuWJhsZKxFocZTsYIVTRoqY4oNI3mXCRu0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -1065,8 +1065,8 @@ github.com/sethvargo/go-diceware v0.5.0 h1:exrQ7GpaBo00GqRVM1N8ChXSsi3oS7tjQiIeh
github.com/sethvargo/go-diceware v0.5.0/go.mod h1:Lg1SyPS7yQO6BBgTN5r4f2MUDkqGfLWsOjHPY0kA8iw=
github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU=
github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs=
github.com/shamaton/msgpack/v2 v2.2.3 h1:uDOHmxQySlvlUYfQwdjxyybAOzjlQsD1Vjy+4jmO9NM=
github.com/shamaton/msgpack/v2 v2.2.3/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI=
github.com/shamaton/msgpack/v2 v2.3.1 h1:R3QNLIGA/tbdczNMZ5PCRxrXvy+fnzsIaHG4kKMgWYo=
github.com/shamaton/msgpack/v2 v2.3.1/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil/v4 v4.25.5 h1:rtd9piuSMGeU8g1RMXjZs9y9luK5BwtnG7dZaQUJAsc=
@@ -1519,8 +1519,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1556,8 +1556,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -2,13 +2,15 @@ package service
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"net/rpc"
"os"
"os/signal"
"sort"
"strings"
"sync"
"time"
"github.com/cenkalti/backoff"
@@ -16,6 +18,7 @@ import (
"github.com/olekukonko/tablewriter"
occfg "github.com/opencloud-eu/opencloud/pkg/config"
"github.com/opencloud-eu/opencloud/pkg/log"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/shared"
activitylog "github.com/opencloud-eu/opencloud/services/activitylog/pkg/command"
@@ -70,6 +73,11 @@ var (
// wait funcs run after the service group has been started.
_waitFuncs = []func(*occfg.Config) error{pingNats, pingGateway, nil, wait(time.Second), nil}
// Use the runner.DefaultInterruptDuration as defaults for the individual service shutdown timeouts.
_defaultShutdownTimeoutDuration = runner.DefaultInterruptDuration
// Use the runner.DefaultGroupInterruptDuration as defaults for the server interruption timeout.
_defaultInterruptTimeoutDuration = runner.DefaultGroupInterruptDuration
)
type serviceFuncMap map[string]func(*occfg.Config) suture.Service
@@ -82,8 +90,6 @@ type Service struct {
Log log.Logger
serviceToken map[string][]suture.ServiceToken
context context.Context
cancel context.CancelFunc
cfg *occfg.Config
}
@@ -105,16 +111,12 @@ func NewService(ctx context.Context, options ...Option) (*Service, error) {
log.Level(opts.Config.Log.Level),
)
globalCtx, cancelGlobal := context.WithCancel(ctx)
s := &Service{
Services: make([]serviceFuncMap, len(_waitFuncs)),
Additional: make(serviceFuncMap),
Log: l,
serviceToken: make(map[string][]suture.ServiceToken),
context: globalCtx,
cancel: cancelGlobal,
cfg: opts.Config,
}
@@ -358,8 +360,12 @@ func Start(ctx context.Context, o ...Option) error {
return err
}
// get a cancel function to stop the service
ctx, cancel := context.WithCancel(ctx)
// cancel the context when a signal is received.
var cancel context.CancelFunc
if ctx == nil {
ctx, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
// tolerance controls backoff cycles from the supervisor.
tolerance := 5
@@ -397,30 +403,17 @@ func Start(ctx context.Context, o ...Option) error {
if err != nil {
s.Log.Fatal().Err(err).Msg("could not start listener")
}
defer func() {
if r := recover(); r != nil {
reason := strings.Builder{}
if _, err = net.Dial("tcp", net.JoinHostPort(s.cfg.Runtime.Host, s.cfg.Runtime.Port)); err != nil {
reason.WriteString("runtime address already in use")
}
fmt.Println(reason.String())
}
}()
srv := new(http.Server)
// prepare the set of services to run
s.generateRunSet(s.cfg)
// there are reasons not to do this, but we have race conditions ourselves. Until we resolve them, mind the following disclaimer:
// There are reasons not to do this, but we have race conditions ourselves. Until we resolve them, mind the following disclaimer:
// Calling ServeBackground will CORRECTLY start the supervisor running in a new goroutine. It is risky to directly run
// go supervisor.Serve()
// because that will briefly create a race condition as it starts up, if you try to .Add() services immediately afterward.
// https://pkg.go.dev/github.com/thejerf/suture/v4@v4.0.0#Supervisor
go s.Supervisor.ServeBackground(s.context)
// trap will block on context done channel for interruptions.
go trap(s, ctx)
go s.Supervisor.ServeBackground(ctx)
for i, service := range s.Services {
scheduleServiceTokens(s, service)
@@ -434,7 +427,14 @@ func Start(ctx context.Context, o ...Option) error {
// schedule services that are optional
scheduleServiceTokens(s, s.Additional)
return http.Serve(l, nil)
go func() {
if err = srv.Serve(l); err != nil && !errors.Is(err, http.ErrServerClosed) {
s.Log.Fatal().Err(err).Msg("could not start rpc server")
}
}()
// trapShutdownCtx will block on the context-done channel for interruptions.
return trapShutdownCtx(s, srv, ctx)
}
// scheduleServiceTokens adds service tokens to the service supervisor.
@@ -501,20 +501,54 @@ func (s *Service) List(_ struct{}, reply *string) error {
return nil
}
// trap blocks on halt channel. When the runtime is interrupted it
// signals the controller to stop any supervised process.
func trap(s *Service, ctx context.Context) {
func trapShutdownCtx(s *Service, srv *http.Server, ctx context.Context) error {
<-ctx.Done()
s.Log.Info().Msg("starting graceful shutdown")
start := time.Now()
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), _defaultShutdownTimeoutDuration)
defer cancel()
s.Log.Debug().Msg("starting runtime listener shutdown")
if err := srv.Shutdown(ctx); err != nil {
s.Log.Error().Err(err).Msg("could not shutdown runtime listener")
return
}
s.Log.Debug().Msg("runtime listener shutdown done")
}()
for sName := range s.serviceToken {
for i := range s.serviceToken[sName] {
if err := s.Supervisor.Remove(s.serviceToken[sName][i]); err != nil {
s.Log.Error().Err(err).Str("service", "runtime service").Msgf("terminating with signal: %v", s)
}
wg.Add(1)
go func() {
s.Log.Debug().Str("service", sName).Msg("starting graceful shutdown for service")
defer wg.Done()
if err := s.Supervisor.RemoveAndWait(s.serviceToken[sName][i], _defaultShutdownTimeoutDuration); err != nil && !errors.Is(err, suture.ErrSupervisorNotRunning) {
s.Log.Error().Err(err).Str("service", sName).Msg("could not shutdown service")
return
}
s.Log.Debug().Str("service", sName).Msg("graceful shutdown for service done")
}()
}
}
s.Log.Debug().Str("service", "runtime service").Msgf("terminating with signal: %v", s)
time.Sleep(3 * time.Second) // give the services time to deregister
os.Exit(0) // FIXME this cause an early exit that prevents services from shitting down properly
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-time.After(_defaultInterruptTimeoutDuration):
s.Log.Error().Dur("timeoutDuration", _defaultInterruptTimeoutDuration).Msg("graceful shutdown timeout reached, terminating")
return errors.New("graceful shutdown timeout reached, terminating")
case <-done:
duration := time.Since(start)
s.Log.Info().Dur("duration", duration).Msg("graceful shutdown done")
return nil
}
}
// pingNats will attempt to connect to nats, blocking until a connection is established
@@ -545,7 +579,7 @@ func pingGateway(cfg *occfg.Config) error {
n := b.NextBackOff()
_, err := pool.GetGatewayServiceClient(cfg.Reva.Address)
if err != nil && n > time.Second {
logger.New().Error().Err(err).Msgf("can't connect to gateway service, retrying in %s", n)
logger.New().Error().Err(err).Dur("backoff", n).Msg("can't connect to gateway service, retrying")
}
return err
}

162
pkg/runner/factory.go Normal file
View File

@@ -0,0 +1,162 @@
package runner
import (
"context"
"errors"
"net"
"net/http"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
ohttp "github.com/opencloud-eu/opencloud/pkg/service/http"
"github.com/opencloud-eu/reva/v2/cmd/revad/runtime"
"google.golang.org/grpc"
)
// NewGoMicroGrpcServerRunner creates a new runner based on the provided go-micro's
// GRPC service. The service is expected to be created via
// "github.com/opencloud-eu/opencloud/pkg/service/grpc".NewService(...) function
//
// The runner will behave as described:
// * The task is to start a server and listen for connections. If the server
// can't start, the task will finish with that error.
// * The stopper will call the server's stop method and send the result to
// the task.
// * The stopper will run asynchronously because the stop method could take a
// while and we don't want to block
func NewGoMicroGrpcServerRunner(name string, server ogrpc.Service, opts ...Option) *Runner {
httpCh := make(chan error, 1)
r := New(name, func() error {
// start the server and return if it fails
if err := server.Server().Start(); err != nil {
return err
}
return <-httpCh // wait for the result
}, func() {
// stop implies deregistering and waiting for request to finish,
// so don't block
go func() {
httpCh <- server.Server().Stop() // stop and send result through channel
close(httpCh)
}()
}, opts...)
return r
}
// NewGoMicroHttpServerRunner creates a new runner based on the provided go-micro's
// HTTP service. The service is expected to be created via
// "github.com/opencloud-eu/opencloud/pkg/service/http".NewService(...) function
//
// The runner will behave as described:
// * The task is to start a server and listen for connections. If the server
// can't start, the task will finish with that error.
// * The stopper will call the server's stop method and send the result to
// the task.
// * The stopper will run asynchronously because the stop method could take a
// while and we don't want to block
func NewGoMicroHttpServerRunner(name string, server ohttp.Service, opts ...Option) *Runner {
httpCh := make(chan error, 1)
r := New(name, func() error {
// start the server and return if it fails
if err := server.Server().Start(); err != nil {
return err
}
return <-httpCh // wait for the result
}, func() {
// stop implies deregistering and waiting for request to finish,
// so don't block
go func() {
httpCh <- server.Server().Stop() // stop and send result through channel
close(httpCh)
}()
}, opts...)
return r
}
// NewGolangHttpServerRunner creates a new runner based on the provided HTTP server.
// The HTTP server is expected to be created via
// "github.com/opencloud-eu/opencloud/pkg/service/debug".NewService(...) function
// and it's expected to be a regular golang HTTP server
//
// The runner will behave as described:
// * The task starts a server and listen for connections. If the server
// can't start, the task will finish with that error. If the server is shutdown
// the task will wait for the shutdown to return that result (task won't finish
// immediately, but wait until shutdown returns)
// * The stopper will call the server's shutdown method and send the result to
// the task. The stopper will wait up to 5 secs for the shutdown.
// * The stopper will run asynchronously because the shutdown could take a
// while and we don't want to block
func NewGolangHttpServerRunner(name string, server *http.Server, opts ...Option) *Runner {
debugCh := make(chan error, 1)
r := New(name, func() error {
// start listening and return if the error is NOT ErrServerClosed.
// ListenAndServe will always return a non-nil error.
// We need to wait and get the result of the Shutdown call.
// App shouldn't exit until Shutdown has returned.
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
return err
}
// wait for the shutdown and return the result
return <-debugCh
}, func() {
// Since Shutdown might take some time, don't block
go func() {
// Use the DefaultInterruptDuration or InterruptDuration as the shutdown timeout.
shutdownCtx, cancel := context.WithTimeout(context.Background(), DefaultInterruptDuration)
defer cancel()
debugCh <- server.Shutdown(shutdownCtx)
close(debugCh)
}()
}, opts...)
return r
}
// NewGolangGrpcServerRunner creates a new runner based on the provided GRPC
// server. The GRPC server is expected to be a regular golang GRPC server,
// created via "google.golang.org/grpc".NewServer(...)
// A listener also needs to be provided for the server to listen there.
//
// The runner will just start the GRPC server in the listener, and the server
// will be gracefully stopped when interrupted
func NewGolangGrpcServerRunner(name string, server *grpc.Server, listener net.Listener, opts ...Option) *Runner {
r := New(name, func() error {
return server.Serve(listener)
}, func() {
// Since GracefulStop might take some time, don't block
go func() {
server.GracefulStop()
}()
}, opts...)
return r
}
// NewRevaServiceRunner creates a new runner based on the provided reva RevaDrivenServer
// The runner will behave as described:
// * The task is to start a server and listen for connections. If the server
// can't start, the task will finish with that error.
// * The stopper will call the server's stop method and send the result to
// the task.
// * The stopper will run asynchronously because the stop method could take a
// while and we don't want to block
func NewRevaServiceRunner(name string, server runtime.RevaDrivenServer, opts ...Option) *Runner {
httpCh := make(chan error, 1)
r := New(name, func() error {
// start the server and return if it fails
if err := server.Start(); err != nil {
return err
}
return <-httpCh // wait for the result
}, func() {
// stop implies deregistering and waiting for the request to finish,
// so don't block
go func() {
httpCh <- server.Stop() // stop and send a result through a channel
close(httpCh)
}()
}, opts...)
return r
}

View File

@@ -21,7 +21,7 @@ import (
//
// The interrupt duration for the group can be set through the
// `WithInterruptDuration` option. If the option isn't supplied, the default
// value (15 secs) will be used.
// value `DefaultGroupInterruptDuration` will be used.
//
// It's recommended that the timeouts are handled by each runner individually,
// meaning that each runner's timeout should be less than the group runner's

View File

@@ -7,10 +7,10 @@ import (
var (
// DefaultInterruptDuration is the default value for the `WithInterruptDuration`
// for the "regular" runners. This global value can be adjusted if needed.
DefaultInterruptDuration = 10 * time.Second
DefaultInterruptDuration = 20 * time.Second
// DefaultGroupInterruptDuration is the default value for the `WithInterruptDuration`
// for the group runners. This global value can be adjusted if needed.
DefaultGroupInterruptDuration = 15 * time.Second
DefaultGroupInterruptDuration = 25 * time.Second
)
// Option defines a single option function.

View File

@@ -32,7 +32,7 @@ type Runner struct {
//
// The interrupt duration, which can be set through the `WithInterruptDuration`
// option, will be used to ensure the runner doesn't block forever. If the
// option isn't supplied, the default value (10 secs) will be used.
// option isn't supplied, the default value `DefaultInterruptDuration` will be used.
// The interrupt duration will be used to start a timeout when the
// runner gets interrupted (either the context of the `Run` method is done
// or this runner's `Interrupt` method is called). If the timeout is reached,

View File

@@ -1,10 +1,16 @@
package runner
import (
"os"
"strings"
"syscall"
"time"
)
var (
StopSignals = []os.Signal{syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT}
)
// Runable represent a task that can be executed by the Runner.
// It expected to be a long running task with an indefinite execution time,
// so it's suitable for servers or services.

View File

@@ -3,12 +3,13 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/log"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/services/antivirus/pkg/config"
"github.com/opencloud-eu/opencloud/services/antivirus/pkg/config/parser"
@@ -26,31 +27,38 @@ func Server(cfg *config.Config) *cli.Command {
return configlog.ReturnFatal(parser.ParseConfig(cfg))
},
Action: func(c *cli.Context) error {
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
logger = log.NewLogger(
log.Name(cfg.Service.Name),
log.Level(cfg.Log.Level),
log.Pretty(cfg.Log.Pretty),
log.Color(cfg.Log.Color),
log.File(cfg.Log.File),
)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
logger := log.NewLogger(
log.Name(cfg.Service.Name),
log.Level(cfg.Log.Level),
log.Pretty(cfg.Log.Pretty),
log.Color(cfg.Log.Color),
log.File(cfg.Log.File),
)
defer cancel()
traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name)
if err != nil {
return err
}
gr := runner.NewGroup()
{
svc, err := service.NewAntivirus(cfg, logger, traceProvider)
if err != nil {
return cli.Exit(err.Error(), 1)
}
gr.Add(svc.Run, func(_ error) {
cancel()
})
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
return svc.Run()
}, func() {
svc.Close()
}))
}
{
@@ -64,13 +72,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -11,6 +11,7 @@ import (
"os"
"slices"
"sync"
"sync/atomic"
"time"
"github.com/opencloud-eu/reva/v2/pkg/bytesize"
@@ -54,7 +55,15 @@ func NewAntivirus(cfg *config.Config, logger log.Logger, tracerProvider trace.Tr
return Antivirus{}, err
}
av := Antivirus{config: cfg, log: logger, tracerProvider: tracerProvider, scanner: scanner, client: rhttp.GetHTTPClient(rhttp.Insecure(true))}
av := Antivirus{
config: cfg,
log: logger,
tracerProvider: tracerProvider,
scanner: scanner,
client: rhttp.GetHTTPClient(rhttp.Insecure(true)),
stopCh: make(chan struct{}, 1),
stopped: new(atomic.Bool),
}
switch mode := cfg.MaxScanSizeMode; mode {
case config.MaxScanSizeModeSkip, config.MaxScanSizeModePartial:
@@ -91,7 +100,9 @@ type Antivirus struct {
maxScanSize uint64
tracerProvider trace.TracerProvider
client *http.Client
client *http.Client
stopCh chan struct{}
stopped *atomic.Bool
}
// Run runs the service
@@ -127,30 +138,52 @@ func (av Antivirus) Run() error {
}
wg := sync.WaitGroup{}
for i := 0; i < av.config.Workers; i++ {
for range av.config.Workers {
wg.Add(1)
go func() {
defer wg.Done()
for e := range ch {
err := av.processEvent(e, natsStream)
if err != nil {
switch {
case errors.Is(err, ErrFatal):
av.log.Fatal().Err(err).Msg("fatal error - exiting")
case errors.Is(err, ErrEvent):
av.log.Error().Err(err).Msg("continuing")
default:
av.log.Fatal().Err(err).Msg("unknown error - exiting")
EventLoop:
for {
select {
case e, ok := <-ch:
if !ok {
break EventLoop
}
err := av.processEvent(e, natsStream)
if err != nil {
switch {
case errors.Is(err, ErrFatal):
av.log.Fatal().Err(err).Msg("fatal error - exiting")
case errors.Is(err, ErrEvent):
av.log.Error().Err(err).Msg("continuing")
default:
av.log.Fatal().Err(err).Msg("unknown error - exiting")
}
}
if av.stopped.Load() {
break EventLoop
}
case <-av.stopCh:
break EventLoop
}
}
}()
}
wg.Wait()
return nil
}
func (av Antivirus) Close() {
if av.stopped.CompareAndSwap(false, true) {
close(av.stopCh)
}
}
func (av Antivirus) processEvent(e events.Event, s events.Publisher) error {
ctx, span := av.tracerProvider.Tracer("antivirus").Start(e.GetTraceContext(context.Background()), "processEvent")
defer span.End()

View File

@@ -3,16 +3,14 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/cmd/revad/runtime"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/app-provider/pkg/config"
@@ -37,66 +35,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.AppProviderConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/app-registry/pkg/config"
@@ -36,67 +34,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.AppRegistryConfigFromStruct(cfg, logger)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,14 +3,15 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/services/audit/pkg/config"
"github.com/opencloud-eu/opencloud/services/audit/pkg/config/parser"
"github.com/opencloud-eu/opencloud/services/audit/pkg/logging"
@@ -29,13 +30,15 @@ func Server(cfg *config.Config) *cli.Command {
return configlog.ReturnFatal(parser.ParseConfig(cfg))
},
Action: func(c *cli.Context) error {
var (
gr = run.Group{}
logger = logging.Configure(cfg.Service.Name, cfg.Log)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
ctx, cancel = context.WithCancel(c.Context)
)
defer cancel()
logger := logging.Configure(cfg.Service.Name, cfg.Log)
gr := runner.NewGroup()
connName := generators.GenerateConnectionName(cfg.Service.Name, generators.NTypeBus)
client, err := stream.NatsFromConfig(connName, false, stream.NatsConfig(cfg.Events))
@@ -47,24 +50,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
svc.AuditLoggerFromConfig(ctx, cfg.Auditlog, evts, logger)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
// we need an additional context for the audit server in order to
// cancel it anytime
svcCtx, svcCancel := context.WithCancel(ctx)
defer svcCancel()
cancel()
})
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
svc.AuditLoggerFromConfig(svcCtx, cfg.Auditlog, evts, logger)
return nil
}, func() {
svcCancel()
}))
{
debugServer, err := debug.Server(
@@ -77,12 +73,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -42,7 +42,11 @@ func StartAuditLogger(ctx context.Context, ch <-chan events.Event, log log.Logge
select {
case <-ctx.Done():
return
case i := <-ch:
case i, ok := <-ch:
if !ok {
return
}
var auditEvent interface{}
switch ev := i.Event.(type) {
case events.ShareCreated:
@@ -113,6 +117,10 @@ func StartAuditLogger(ctx context.Context, ch <-chan events.Event, log log.Logge
auditEvent = types.ScienceMeshInviteTokenGenerated(ev)
default:
log.Error().Interface("event", ev).Msg(fmt.Sprintf("can't handle event of type '%T'", ev))
if ctx.Err() != nil {
// if context is done, do not process more events
return
}
continue
}
@@ -120,12 +128,19 @@ func StartAuditLogger(ctx context.Context, ch <-chan events.Event, log log.Logge
b, err := marshaller(auditEvent)
if err != nil {
log.Error().Err(err).Msg("error marshaling the event")
if ctx.Err() != nil {
return
}
continue
}
for _, l := range logto {
l(b)
}
if ctx.Err() != nil {
return
}
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -44,60 +42,47 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
gr := runner.NewGroup()
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.AuthAppConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner("auth-app_debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
@@ -128,24 +113,32 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
rClient := settingssvc.NewRoleService("eu.opencloud.api.settings", grpcClient)
server, err := http.Server(
http.Logger(logger),
http.Context(ctx),
http.Config(cfg),
http.GatewaySelector(gatewaySelector),
http.RoleClient(rClient),
http.TracerProvider(traceProvider),
)
if err != nil {
logger.Fatal().Err(err).Msg("failed to initialize http server")
{
rClient := settingssvc.NewRoleService("eu.opencloud.api.settings", grpcClient)
server, err := http.Server(
http.Logger(logger),
http.Context(ctx),
http.Config(cfg),
http.GatewaySelector(gatewaySelector),
http.RoleClient(rClient),
http.TracerProvider(traceProvider),
)
if err != nil {
logger.Fatal().Err(err).Msg("failed to initialize http server")
}
gr.Add(runner.NewGoMicroHttpServerRunner("auth-app_http", server))
}
gr.Add(server.Run, func(err error) {
logger.Error().Err(err).Str("server", "http").Msg("shutting down server")
})
grResults := gr.Run(ctx)
return gr.Run()
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,14 +3,12 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/ldap"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/auth-basic/pkg/config"
@@ -37,10 +35,15 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
gr := runner.NewGroup()
// the reva runtime calls `os.Exit` in the case of a failure and there is no way for the OpenCloud
// runtime to catch it and restart a reva service. Therefore, we need to ensure the service has
@@ -54,62 +57,53 @@ func Server(cfg *config.Config) *cli.Command {
}
}
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.AuthBasicConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/auth-bearer/pkg/config"
@@ -36,67 +34,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.AuthBearerConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/auth-machine/pkg/config"
@@ -36,67 +34,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.AuthMachineConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/auth-service/pkg/config"
@@ -36,68 +34,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
reg := registry.GetRegistry()
gr := runner.NewGroup()
rcfg := revaconfig.AuthMachineConfigFromStruct(cfg)
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr.Add(func() error {
runtime.RunWithOptions(rcfg, pidFile,
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.AuthMachineConfigFromStruct(cfg)
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,8 +3,8 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
@@ -13,6 +13,7 @@ import (
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/clientlog/pkg/config"
@@ -61,14 +62,16 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
mtrcs := metrics.New()
mtrcs.BuildInfo.WithLabelValues(version.GetString()).Set(1)
defer cancel()
connName := generators.GenerateConnectionName(cfg.Service.Name, generators.NTypeBus)
s, err := stream.NatsFromConfig(connName, false, stream.NatsConfig(cfg.Events))
if err != nil {
@@ -90,6 +93,7 @@ func Server(cfg *config.Config) *cli.Command {
return fmt.Errorf("could not get reva client selector: %s", err)
}
gr := runner.NewGroup()
{
svc, err := service.NewClientlogService(
service.Logger(logger),
@@ -105,23 +109,11 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
return svc.Run()
}, func(err error) {
if err != nil {
logger.Info().
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
}, func() {
svc.Close()
}))
}
{
@@ -135,13 +127,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"path/filepath"
"reflect"
"sync/atomic"
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
group "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
@@ -33,6 +34,8 @@ type ClientlogService struct {
tracer trace.Tracer
publisher events.Publisher
ch <-chan events.Event
stopCh chan struct{}
stopped atomic.Bool
}
// NewClientlogService returns a clientlog service
@@ -60,6 +63,7 @@ func NewClientlogService(opts ...Option) (*ClientlogService, error) {
tracer: o.TraceProvider.Tracer("github.com/opencloud-eu/opencloud/services/clientlog/pkg/service"),
publisher: o.Stream,
ch: ch,
stopCh: make(chan struct{}, 1),
}
for _, e := range o.RegisteredEvents {
@@ -72,13 +76,32 @@ func NewClientlogService(opts ...Option) (*ClientlogService, error) {
// Run runs the service
func (cl *ClientlogService) Run() error {
for event := range cl.ch {
cl.processEvent(event)
EventLoop:
for {
select {
case event, ok := <-cl.ch:
if !ok {
break EventLoop
}
cl.processEvent(event)
if cl.stopped.Load() {
break EventLoop
}
case <-cl.stopCh:
break EventLoop
}
}
return nil
}
func (cl *ClientlogService) Close() {
if cl.stopped.CompareAndSwap(false, true) {
close(cl.stopCh)
}
}
func (cl *ClientlogService) processEvent(event events.Event) {
gwc, err := cl.gatewaySelector.Next()
if err != nil {

View File

@@ -4,14 +4,15 @@ import (
"context"
"fmt"
"net"
"os/signal"
"time"
"github.com/oklog/run"
"github.com/urfave/cli/v2"
microstore "go-micro.dev/v4/store"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/services/collaboration/pkg/config"
"github.com/opencloud-eu/opencloud/services/collaboration/pkg/config/parser"
@@ -41,9 +42,12 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// prepare components
if err := helpers.RegisterOpenCloudService(ctx, cfg, logger); err != nil {
@@ -89,6 +93,8 @@ func Server(cfg *config.Config) *cli.Command {
store.Authentication(cfg.Store.AuthUsername, cfg.Store.AuthPassword),
)
gr := runner.NewGroup()
// start GRPC server
grpcServer, teardown, err := grpc.Server(
grpc.AppURLs(appUrls),
@@ -103,28 +109,11 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
l, err := net.Listen("tcp", cfg.GRPC.Addr)
if err != nil {
return err
}
return grpcServer.Serve(l)
},
func(err error) {
if err != nil {
logger.Info().
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
l, err := net.Listen("tcp", cfg.GRPC.Addr)
if err != nil {
return err
}
gr.Add(runner.NewGolangGrpcServerRunner(cfg.Service.Name+".grpc", grpcServer, l))
// start debug server
debugServer, err := debug.Server(
@@ -136,11 +125,7 @@ func Server(cfg *config.Config) *cli.Command {
logger.Error().Err(err).Str("transport", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
// start HTTP server
httpServer, err := http.Server(
@@ -152,14 +137,20 @@ func Server(cfg *config.Config) *cli.Command {
http.Store(st),
)
if err != nil {
logger.Error().Err(err).Str("transport", "http").Msg("Failed to initialize server")
logger.Info().Err(err).Str("transport", "http").Msg("Failed to initialize server")
return err
}
gr.Add(httpServer.Run, func(_ error) {
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner("collaboration_http", httpServer))
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,8 +3,8 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
"github.com/opencloud-eu/reva/v2/pkg/store"
"github.com/urfave/cli/v2"
@@ -12,6 +12,7 @@ import (
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -46,16 +47,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
m = metrics.New()
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
m := metrics.New()
m.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
connName := generators.GenerateConnectionName(cfg.Service.Name, generators.NTypeBus)
consumer, err := stream.NatsFromConfig(connName, false, stream.NatsConfig(cfg.Events))
if err != nil {
@@ -84,21 +87,7 @@ func Server(cfg *config.Config) *cli.Command {
grpc.TraceProvider(traceProvider),
)
gr.Add(service.Run, func(err error) {
if err == nil {
logger.Info().
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroGrpcServerRunner(cfg.Service.Name+".grpc", service))
{
debugServer, err := debug.Server(
@@ -111,13 +100,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,16 +3,14 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/cmd/revad/runtime"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/frontend/pkg/config"
@@ -37,64 +35,52 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
rCfg, err := revaconfig.FrontendConfigFromStruct(cfg, logger)
if err != nil {
return err
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
{
rCfg, err := revaconfig.FrontendConfigFromStruct(cfg, logger)
if err != nil {
return err
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
// run the appropriate reva servers based on the config
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
httpSvc := registry.BuildHTTPService(cfg.HTTP.Namespace+"."+cfg.Service.Name, cfg.HTTP.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, httpSvc, cfg.Debug.Addr); err != nil {
@@ -102,13 +88,23 @@ func Server(cfg *config.Config) *cli.Command {
}
// add event handler
gr.Add(func() error {
return ListenForEvents(ctx, cfg, logger)
}, func(_ error) {
cancel()
})
gr.Add(runner.New(cfg.Service.Name+".event",
func() error {
return ListenForEvents(ctx, cfg, logger)
}, func() {
logger.Info().Msg("stopping event handler")
},
))
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/gateway/pkg/config"
@@ -36,69 +34,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.GatewayConfigFromStruct(cfg, logger)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
logger.Info().
Str("server", cfg.Service.Name).
Msg("reva runtime exited")
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/graph/pkg/config"
@@ -33,14 +34,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
mtrcs := metrics.New()
defer cancel()
mtrcs.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
{
server, err := http.Server(
http.Logger(logger),
@@ -54,23 +58,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -84,13 +72,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(server.ListenAndServe, func(_ error) {
_ = server.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", server))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,14 +3,12 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/ldap"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/groups/pkg/config"
@@ -37,10 +35,6 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
// the reva runtime calls os.Exit in the case of a failure and there is no way for the OpenCloud
// runtime to catch it and restart a reva service. Therefore we need to ensure the service has
@@ -54,62 +48,62 @@ func Server(cfg *config.Config) *cli.Command {
}
}
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
gr := runner.NewGroup()
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.GroupsConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -7,18 +7,19 @@ import (
"fmt"
"html/template"
"os"
"os/signal"
"strings"
"github.com/go-ldap/ldif"
"github.com/libregraph/idm/pkg/ldappassword"
"github.com/libregraph/idm/pkg/ldbbolt"
"github.com/libregraph/idm/server"
"github.com/oklog/run"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
pkgcrypto "github.com/opencloud-eu/opencloud/pkg/crypto"
"github.com/opencloud-eu/opencloud/pkg/log"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/services/idm"
"github.com/opencloud-eu/opencloud/services/idm/pkg/config"
"github.com/opencloud-eu/opencloud/services/idm/pkg/config/parser"
@@ -36,14 +37,16 @@ func Server(cfg *config.Config) *cli.Command {
return configlog.ReturnFatal(parser.ParseConfig(cfg))
},
Action: func(c *cli.Context) error {
var (
gr = run.Group{}
logger = logging.Configure(cfg.Service.Name, cfg.Log)
ctx, cancel = context.WithCancel(c.Context)
)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
defer cancel()
logger := logging.Configure(cfg.Service.Name, cfg.Log)
gr := runner.NewGroup()
{
servercfg := server.Config{
Logger: log.LogrusWrap(logger.Logger),
@@ -75,30 +78,16 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
err := make(chan error, 1)
select {
case <-ctx.Done():
return nil
// we need an additional context for the idm server in order to
// cancel it anytime
svcCtx, svcCancel := context.WithCancel(ctx)
defer svcCancel()
case err <- svc.Serve(ctx):
return <-err
}
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
return svc.Serve(svcCtx)
}, func() {
svcCancel()
}))
}
{
@@ -112,14 +101,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
//return start(ctx, logger, cfg)
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -12,10 +12,11 @@ import (
"io"
"io/fs"
"os"
"os/signal"
"path/filepath"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/idp/pkg/config"
@@ -57,16 +58,18 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
metrics = metrics.New()
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
metrics := metrics.New()
metrics.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
{
server, err := http.Server(
http.Logger(logger),
@@ -84,23 +87,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -114,13 +101,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/invitations/pkg/config"
@@ -34,16 +35,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
metrics = metrics.New(metrics.Logger(logger))
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
metrics := metrics.New(metrics.Logger(logger))
metrics.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
{
svc, err := service.New(
@@ -74,23 +76,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err != nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -104,13 +90,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -4,14 +4,13 @@ import (
"context"
"crypto/tls"
"fmt"
"time"
"github.com/oklog/run"
"os/signal"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
pkgcrypto "github.com/opencloud-eu/opencloud/pkg/crypto"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/services/nats/pkg/config"
"github.com/opencloud-eu/opencloud/services/nats/pkg/config/parser"
"github.com/opencloud-eu/opencloud/services/nats/pkg/logging"
@@ -31,11 +30,14 @@ func Server(cfg *config.Config) *cli.Command {
Action: func(c *cli.Context) error {
logger := logging.Configure(cfg.Service.Name, cfg.Log)
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
gr := runner.NewGroup()
{
debugServer, err := debug.Server(
debug.Logger(logger),
@@ -47,10 +49,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
var tlsConf *tls.Config
@@ -77,8 +76,7 @@ func Server(cfg *config.Config) *cli.Command {
}
}
natsServer, err := nats.NewNATSServer(
ctx,
logger,
logging.NewLogWrapper(logger),
nats.Host(cfg.Nats.Host),
nats.Port(cfg.Nats.Port),
nats.ClusterID(cfg.Nats.ClusterID),
@@ -90,40 +88,21 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
err := make(chan error, 1)
select {
case <-ctx.Done():
return nil
case err <- natsServer.ListenAndServe():
return <-err
}
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "nats").
Str("server", cfg.Service.Name).
Msg("letting other services deregister")
time.Sleep(3 * time.Second)
logger.Info().
Str("transport", "nats").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "nats").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
return natsServer.ListenAndServe()
}, func() {
natsServer.Shutdown()
cancel()
})
}))
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -1,24 +1,19 @@
package nats
import (
"context"
"time"
nserver "github.com/nats-io/nats-server/v2/server"
"github.com/opencloud-eu/opencloud/pkg/log"
"github.com/opencloud-eu/opencloud/services/nats/pkg/logging"
"github.com/rs/zerolog"
)
var NATSListenAndServeLoopTimer = 1 * time.Second
type NATSServer struct {
ctx context.Context
server *nserver.Server
}
// NatsOption configures the new NATSServer instance
func NewNATSServer(ctx context.Context, logger log.Logger, opts ...NatsOption) (*NATSServer, error) {
func NewNATSServer(logger nserver.Logger, opts ...NatsOption) (*NATSServer, error) {
natsOpts := &nserver.Options{}
for _, o := range opts {
@@ -35,19 +30,17 @@ func NewNATSServer(ctx context.Context, logger log.Logger, opts ...NatsOption) (
return nil, err
}
nLogger := logging.NewLogWrapper(logger)
server.SetLoggerV2(nLogger, logger.GetLevel() <= zerolog.DebugLevel, logger.GetLevel() <= zerolog.TraceLevel, false)
server.SetLoggerV2(logger, true, true, false)
return &NATSServer{
ctx: ctx,
server: server,
}, nil
}
// ListenAndServe runs the NATSServer in a blocking way until the server is shutdown or an error occurs
func (n *NATSServer) ListenAndServe() (err error) {
go n.server.Start()
<-n.ctx.Done()
n.server.Start() // it won't block
n.server.WaitForShutdown() // block until the server is fully shutdown
return nil
}

View File

@@ -3,13 +3,13 @@ package command
import (
"context"
"fmt"
"os/signal"
"reflect"
ehsvc "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/services/eventhistory/v0"
"github.com/opencloud-eu/reva/v2/pkg/store"
microstore "go-micro.dev/v4/store"
"github.com/oklog/run"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/reva/v2/pkg/events"
@@ -19,6 +19,7 @@ import (
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
settingssvc "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/services/settings/v0"
@@ -57,11 +58,14 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
gr := runner.NewGroup()
{
debugServer, err := debug.Server(
debug.Logger(logger),
@@ -73,10 +77,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
// evs defines a list of events to subscribe to
@@ -139,11 +140,21 @@ func Server(cfg *config.Config) *cli.Command {
cfg.Notifications.EmailTemplatePath, cfg.Notifications.DefaultLanguage, cfg.WebUIURL,
cfg.Notifications.TranslationPath, cfg.Notifications.SMTP.Sender, notificationStore, historyClient, registeredEvents)
gr.Add(svc.Run, func(error) {
cancel()
})
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
return svc.Run()
}, func() {
svc.Close()
}))
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -5,11 +5,10 @@ import (
"errors"
"fmt"
"net/url"
"os"
"os/signal"
"path"
"strings"
"syscall"
"sync"
"sync/atomic"
ehsvc "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/services/eventhistory/v0"
"go-micro.dev/v4/store"
@@ -44,6 +43,7 @@ func init() {
// Service should be named `Runner`
type Service interface {
Run() error
Close()
}
// NewEventsNotifier provides a new eventsNotifier
@@ -62,7 +62,6 @@ func NewEventsNotifier(
logger: logger,
channel: channel,
events: events,
signals: make(chan os.Signal, 1),
gatewaySelector: gatewaySelector,
valueService: valueService,
serviceAccountID: serviceAccountID,
@@ -76,6 +75,8 @@ func NewEventsNotifier(
splitter: newIntervalSplitter(logger, valueService),
userEventStore: newUserEventStore(logger, store, historyClient),
registeredEvents: registeredEvents,
stopCh: make(chan struct{}, 1),
stopped: new(atomic.Bool),
}
}
@@ -83,7 +84,6 @@ type eventsNotifier struct {
logger log.Logger
channel channels.Channel
events <-chan events.Event
signals chan os.Signal
gatewaySelector pool.Selectable[gateway.GatewayAPIClient]
valueService settingssvc.ValueService
emailTemplatePath string
@@ -97,16 +97,27 @@ type eventsNotifier struct {
splitter *intervalSplitter
userEventStore *userEventStore
registeredEvents map[string]events.Unmarshaller
stopCh chan struct{}
stopped *atomic.Bool
}
func (s eventsNotifier) Run() error {
signal.Notify(s.signals, syscall.SIGINT, syscall.SIGTERM)
var wg sync.WaitGroup
s.logger.Debug().
Msg("eventsNotifier started")
EventLoop:
for {
select {
case evt := <-s.events:
case evt, ok := <-s.events:
if !ok {
break EventLoop
}
// TODO: needs to be replaced with a worker pool
wg.Add(1)
go func() {
defer wg.Done()
switch e := evt.Event.(type) {
case events.SpaceShared:
s.handleSpaceShared(e, evt.ID)
@@ -124,12 +135,25 @@ func (s eventsNotifier) Run() error {
s.sendGroupedEmailsJob(e, evt.ID)
}
}()
case <-s.signals:
if s.stopped.Load() {
break EventLoop
}
case <-s.stopCh:
s.logger.Debug().
Msg("eventsNotifier stopped")
return nil
break EventLoop
}
}
// wait until all the goroutines processing events have finished
wg.Wait()
return nil
}
func (s eventsNotifier) Close() {
if s.stopped.CompareAndSwap(false, true) {
close(s.stopCh)
}
}
func (s eventsNotifier) render(ctx context.Context, template email.MessageTemplate,

View File

@@ -3,11 +3,13 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/broker"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
ohttp "github.com/opencloud-eu/opencloud/pkg/service/http"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/ocdav/pkg/config"
@@ -34,85 +36,77 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
gr.Add(func() error {
// init reva shared config explicitly as the go-micro based ocdav does not use
// the reva runtime. But we need e.g. the shared client settings to be initialized
sc := map[string]interface{}{
"jwt_secret": cfg.TokenManager.JWTSecret,
"gatewaysvc": cfg.Reva.Address,
"skip_user_groups_in_token": cfg.SkipUserGroupsInToken,
"grpc_client_options": cfg.Reva.GetGRPCClientConfig(),
}
if err := sharedconf.Decode(sc); err != nil {
logger.Error().Err(err).Msg("error decoding shared config for ocdav")
}
opts := []ocdav.Option{
ocdav.Name(cfg.HTTP.Namespace + "." + cfg.Service.Name),
ocdav.Version(version.GetString()),
ocdav.Context(ctx),
ocdav.Logger(logger.Logger),
ocdav.Address(cfg.HTTP.Addr),
ocdav.AllowCredentials(cfg.HTTP.CORS.AllowCredentials),
ocdav.AllowedMethods(cfg.HTTP.CORS.AllowedMethods),
ocdav.AllowedHeaders(cfg.HTTP.CORS.AllowedHeaders),
ocdav.AllowedOrigins(cfg.HTTP.CORS.AllowedOrigins),
ocdav.FilesNamespace(cfg.FilesNamespace),
ocdav.WebdavNamespace(cfg.WebdavNamespace),
ocdav.OCMNamespace(cfg.OCMNamespace),
ocdav.AllowDepthInfinity(cfg.AllowPropfindDepthInfinity),
ocdav.SharesNamespace(cfg.SharesNamespace),
ocdav.Timeout(cfg.Timeout),
ocdav.Insecure(cfg.Insecure),
ocdav.PublicURL(cfg.PublicURL),
ocdav.Prefix(cfg.HTTP.Prefix),
ocdav.GatewaySvc(cfg.Reva.Address),
ocdav.JWTSecret(cfg.TokenManager.JWTSecret),
ocdav.ProductName(cfg.Status.ProductName),
ocdav.ProductVersion(cfg.Status.ProductVersion),
ocdav.Product(cfg.Status.Product),
ocdav.Version(cfg.Status.Version),
ocdav.VersionString(cfg.Status.VersionString),
ocdav.Edition(cfg.Status.Edition),
ocdav.MachineAuthAPIKey(cfg.MachineAuthAPIKey),
ocdav.Broker(broker.NoOp{}),
// ocdav.FavoriteManager() // FIXME needs a proper persistence implementation https://github.com/owncloud/ocis/issues/1228
// ocdav.LockSystem(), // will default to the CS3 lock system
// ocdav.TLSConfig() // tls config for the http server
ocdav.MetricsEnabled(true),
ocdav.MetricsNamespace("opencloud"),
ocdav.Tracing("Adding these strings is a workaround for ->", "https://github.com/cs3org/reva/issues/4131"),
ocdav.WithTraceProvider(traceProvider),
ocdav.RegisterTTL(registry.GetRegisterTTL()),
ocdav.RegisterInterval(registry.GetRegisterInterval()),
ocdav.URLSigningSharedSecret(cfg.URLSigningSharedSecret),
}
gr := runner.NewGroup()
s, err := ocdav.Service(opts...)
if err != nil {
return err
}
// init reva shared config explicitly as the go-micro based ocdav does not use
// the reva runtime. But we need e.g. the shared client settings to be initialized
sc := map[string]interface{}{
"jwt_secret": cfg.TokenManager.JWTSecret,
"gatewaysvc": cfg.Reva.Address,
"skip_user_groups_in_token": cfg.SkipUserGroupsInToken,
"grpc_client_options": cfg.Reva.GetGRPCClientConfig(),
}
if err := sharedconf.Decode(sc); err != nil {
logger.Error().Err(err).Msg("error decoding shared config for ocdav")
}
opts := []ocdav.Option{
ocdav.Name(cfg.HTTP.Namespace + "." + cfg.Service.Name),
ocdav.Version(version.GetString()),
ocdav.Context(ctx),
ocdav.Logger(logger.Logger),
ocdav.Address(cfg.HTTP.Addr),
ocdav.AllowCredentials(cfg.HTTP.CORS.AllowCredentials),
ocdav.AllowedMethods(cfg.HTTP.CORS.AllowedMethods),
ocdav.AllowedHeaders(cfg.HTTP.CORS.AllowedHeaders),
ocdav.AllowedOrigins(cfg.HTTP.CORS.AllowedOrigins),
ocdav.FilesNamespace(cfg.FilesNamespace),
ocdav.WebdavNamespace(cfg.WebdavNamespace),
ocdav.OCMNamespace(cfg.OCMNamespace),
ocdav.AllowDepthInfinity(cfg.AllowPropfindDepthInfinity),
ocdav.SharesNamespace(cfg.SharesNamespace),
ocdav.Timeout(cfg.Timeout),
ocdav.Insecure(cfg.Insecure),
ocdav.PublicURL(cfg.PublicURL),
ocdav.Prefix(cfg.HTTP.Prefix),
ocdav.GatewaySvc(cfg.Reva.Address),
ocdav.JWTSecret(cfg.TokenManager.JWTSecret),
ocdav.ProductName(cfg.Status.ProductName),
ocdav.ProductVersion(cfg.Status.ProductVersion),
ocdav.Product(cfg.Status.Product),
ocdav.Version(cfg.Status.Version),
ocdav.VersionString(cfg.Status.VersionString),
ocdav.Edition(cfg.Status.Edition),
ocdav.MachineAuthAPIKey(cfg.MachineAuthAPIKey),
ocdav.Broker(broker.NoOp{}),
// ocdav.FavoriteManager() // FIXME needs a proper persistence implementation https://github.com/owncloud/ocis/issues/1228
// ocdav.LockSystem(), // will default to the CS3 lock system
// ocdav.TLSConfig() // tls config for the http server
ocdav.MetricsEnabled(true),
ocdav.MetricsNamespace("ocis"),
ocdav.Tracing("Adding these strings is a workaround for ->", "https://github.com/cs3org/reva/issues/4131"),
ocdav.WithTraceProvider(traceProvider),
ocdav.RegisterTTL(registry.GetRegisterTTL()),
ocdav.RegisterInterval(registry.GetRegisterInterval()),
ocdav.URLSigningSharedSecret(cfg.URLSigningSharedSecret),
}
return s.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
s, err := ocdav.Service(opts...)
if err != nil {
return err
}
cancel()
})
// creating a runner for a go-micro service is a bit complex, so we'll
// wrap the go-micro service with an ocis service the same way as
// ocis-pkg/service/http is doing in order to reuse the factory.
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", ohttp.Service{Service: s}))
debugServer, err := debug.Server(
debug.Logger(logger),
@@ -125,12 +119,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/ocm/pkg/config"
@@ -36,61 +34,48 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
rCfg := revaconfig.OCMConfigFromStruct(cfg, logger)
gr := runner.NewGroup()
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.OCMConfigFromStruct(cfg, logger)
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
@@ -102,7 +87,15 @@ func Server(cfg *config.Config) *cli.Command {
logger.Fatal().Err(err).Msg("failed to register the http service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -39,16 +40,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
metrics = metrics.New()
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
metrics := metrics.New()
metrics.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
{
server, err := http.Server(
http.Logger(logger),
@@ -67,27 +69,11 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
server, err := debug.Server(
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
@@ -98,13 +84,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(server.ListenAndServe, func(_ error) {
_ = server.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,14 +3,15 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/log"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -33,18 +34,20 @@ func Server(cfg *config.Config) *cli.Command {
return configlog.ReturnFatal(parser.ParseConfig(cfg))
},
Action: func(c *cli.Context) error {
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
logger = log.NewLogger(
log.Name(cfg.Service.Name),
log.Level(cfg.Log.Level),
log.Pretty(cfg.Log.Pretty),
log.Color(cfg.Log.Color),
log.File(cfg.Log.File),
).SubloggerWithRequestID(ctx)
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
logger := log.NewLogger(
log.Name(cfg.Service.Name),
log.Level(cfg.Log.Level),
log.Pretty(cfg.Log.Pretty),
log.Color(cfg.Log.Color),
log.File(cfg.Log.File),
).SubloggerWithRequestID(ctx)
traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name)
if err != nil {
@@ -56,6 +59,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr := runner.NewGroup()
{
grpcClient, err := grpc.NewClient(
append(
@@ -98,9 +102,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(svc.Run, func(_ error) {
cancel()
})
gr.Add(runner.NewGoMicroGrpcServerRunner(cfg.Service.Name+".grpc", svc))
}
{
@@ -116,9 +118,11 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(eventSvc.Run, func(_ error) {
cancel()
})
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
return eventSvc.Run()
}, func() {
eventSvc.Close()
}))
}
{
@@ -132,13 +136,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -2,6 +2,7 @@ package eventSVC
import (
"context"
"sync/atomic"
"github.com/opencloud-eu/opencloud/pkg/log"
"github.com/opencloud-eu/opencloud/services/policies/pkg/engine"
@@ -11,23 +12,27 @@ import (
// Service defines the service handlers.
type Service struct {
ctx context.Context
query string
log log.Logger
stream events.Stream
engine engine.Engine
tp trace.TracerProvider
ctx context.Context
query string
log log.Logger
stream events.Stream
engine engine.Engine
tp trace.TracerProvider
stopCh chan struct{}
stopped *atomic.Bool
}
// New returns a service implementation for Service.
func New(ctx context.Context, stream events.Stream, logger log.Logger, tp trace.TracerProvider, engine engine.Engine, query string) (Service, error) {
svc := Service{
ctx: ctx,
log: logger,
query: query,
tp: tp,
engine: engine,
stream: stream,
ctx: ctx,
log: logger,
query: query,
tp: tp,
engine: engine,
stream: stream,
stopCh: make(chan struct{}, 1),
stopped: new(atomic.Bool),
}
return svc, nil
@@ -40,16 +45,42 @@ func (s Service) Run() error {
return err
}
for e := range ch {
err := s.processEvent(e)
if err != nil {
return err
EventLoop:
for {
select {
case <-s.stopCh:
break EventLoop
case e, ok := <-ch:
if !ok {
break EventLoop
}
err := s.processEvent(e)
if err != nil {
return err
}
if s.stopped.Load() {
break EventLoop
}
}
}
return nil
}
// Close will make the policies service to stop processing, so the `Run`
// method can finish.
// TODO: Underlying services can't be stopped. This means that some goroutines
// will get stuck trying to push events through a channel nobody is reading
// from, so resources won't be freed and there will be memory leaks. For now,
// if the service is stopped, you should close the app soon after.
func (s Service) Close() {
if s.stopped.CompareAndSwap(false, true) {
close(s.stopCh)
}
}
func (s Service) processEvent(e events.Event) error {
ctx := e.GetTraceContext(s.ctx)
ctx, span := s.tp.Tracer("policies").Start(ctx, "processEvent")

View File

@@ -4,12 +4,13 @@ import (
"context"
"fmt"
"os"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/pkg/store"
"github.com/urfave/cli/v2"
microstore "go-micro.dev/v4/store"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/services/postprocessing/pkg/config"
"github.com/opencloud-eu/opencloud/services/postprocessing/pkg/config/parser"
@@ -33,18 +34,21 @@ func Server(cfg *config.Config) *cli.Command {
return err
},
Action: func(c *cli.Context) error {
var (
gr = run.Group{}
logger = logging.Configure(cfg.Service.Name, cfg.Log)
ctx, cancel = context.WithCancel(c.Context)
)
defer cancel()
logger := logging.Configure(cfg.Service.Name, cfg.Log)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
traceProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name)
if err != nil {
return err
}
gr := runner.NewGroup()
{
st := store.Create(
store.Store(cfg.Store.Store),
@@ -59,30 +63,12 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr.Add(func() error {
err := make(chan error, 1)
select {
case <-ctx.Done():
return nil
case err <- svc.Run():
return <-err
}
}, func(err error) {
if err != nil {
logger.Info().
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.New(cfg.Service.Name+".svc", func() error {
return svc.Run()
}, func() {
svc.Close()
}))
}
{
@@ -96,12 +82,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner("postprocessing_debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/opencloud-eu/opencloud/pkg/generators"
@@ -34,6 +35,8 @@ type PostprocessingService struct {
c config.Postprocessing
tp trace.TracerProvider
metrics *metrics.Metrics
stopCh chan struct{}
stopped atomic.Bool
}
var (
@@ -97,6 +100,7 @@ func NewPostprocessingService(ctx context.Context, logger log.Logger, sto store.
c: cfg.Postprocessing,
tp: tp,
metrics: m,
stopCh: make(chan struct{}, 1),
}, nil
}
@@ -104,30 +108,70 @@ func NewPostprocessingService(ctx context.Context, logger log.Logger, sto store.
func (pps *PostprocessingService) Run() error {
wg := sync.WaitGroup{}
for i := 0; i < pps.c.Workers; i++ {
for range pps.c.Workers {
wg.Add(1)
go func() {
defer wg.Done()
for e := range pps.events {
if err := pps.processEvent(e); err != nil {
switch {
case errors.Is(err, ErrFatal):
pps.log.Fatal().Err(err).Msg("fatal error - exiting")
case errors.Is(err, ErrEvent):
pps.log.Error().Err(err).Msg("continuing")
default:
pps.log.Fatal().Err(err).Msg("unknown error - exiting")
EventLoop:
for {
select {
case <-pps.stopCh:
// stop requested
// TODO: we might need a way to unsubscribe from the event channel, otherwise
// we'll be leaking a goroutine in reva that will be stuck waiting for
// someone to read from the event channel.
// Note: redis implementation seems to have a timeout, so the goroutine
// will exit if there is nobody processing the events and the timeout
// is reached. The behavior is unclear with natsjs
break EventLoop
case e, ok := <-pps.events:
if !ok {
// event channel is closed, so nothing more to do
break EventLoop
}
err := pps.processEvent(e)
if err != nil {
switch {
case errors.Is(err, ErrFatal):
pps.log.Fatal().Err(err).Msg("fatal error - exiting")
case errors.Is(err, ErrEvent):
pps.log.Error().Err(err).Msg("continuing")
default:
pps.log.Fatal().Err(err).Msg("unknown error - exiting")
}
}
if pps.stopped.Load() {
// if stopped, don't process any more events
break EventLoop
}
}
}
}()
}
wg.Wait()
return nil
}
// Close will make the postprocessing service to stop processing, so the `Run`
// method can finish.
// TODO: Underlying services can't be stopped. This means that some goroutines
// will get stuck trying to push events through a channel nobody is reading
// from, so resources won't be freed and there will be memory leaks. For now,
// if the service is stopped, you should close the app soon after.
func (pps *PostprocessingService) Close() {
if pps.stopped.CompareAndSwap(false, true) {
close(pps.stopCh)
}
}
func (pps *PostprocessingService) processEvent(e raw.Event) error {
pps.log.Debug().Str("Type", e.Type).Str("ID", e.ID).Msg("processing event received")
var (
next interface{}
pp *postprocessing.Postprocessing

View File

@@ -5,18 +5,19 @@ import (
"crypto/tls"
"fmt"
"net/http"
"os/signal"
"time"
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
chimiddleware "github.com/go-chi/chi/v5/middleware"
"github.com/justinas/alice"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/log"
pkgmiddleware "github.com/opencloud-eu/opencloud/pkg/middleware"
"github.com/opencloud-eu/opencloud/pkg/oidc"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -107,13 +108,13 @@ func Server(cfg *config.Config) *cli.Command {
oidc.WithJWKSOptions(cfg.OIDC.JWKS),
)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
m := metrics.New()
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
m.BuildInfo.WithLabelValues(version.GetString()).Set(1)
rp, err := proxy.NewMultiHostReverseProxy(
@@ -183,13 +184,14 @@ func Server(cfg *config.Config) *cli.Command {
return fmt.Errorf("failed to initialize reverse proxy: %w", err)
}
gr := runner.NewGroup()
{
middlewares := loadMiddlewares(logger, cfg, userInfoCache, signingKeyStore, traceProvider, *m, userProvider, publisher, gatewaySelector, serviceSelector)
server, err := proxyHTTP.Server(
proxyHTTP.Handler(lh.Handler()),
proxyHTTP.Logger(logger),
proxyHTTP.Context(ctx),
proxyHTTP.Context(cfg.Context),
proxyHTTP.Config(cfg),
proxyHTTP.Metrics(metrics.New()),
proxyHTTP.Middlewares(middlewares),
@@ -203,29 +205,13 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Context(cfg.Context),
debug.Config(cfg),
)
if err != nil {
@@ -233,13 +219,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(cfg.Context)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -40,13 +41,19 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
mtrcs := metrics.New()
mtrcs.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
grpcServer, teardown, err := grpc.Server(
grpc.Config(cfg),
grpc.Logger(logger),
@@ -62,21 +69,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(grpcServer.Run, func(_ error) {
if err == nil {
logger.Info().
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroGrpcServerRunner(cfg.Service.Name+".grpc", grpcServer))
debugServer, err := debug.Server(
debug.Logger(logger),
@@ -88,12 +81,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -42,15 +43,20 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
servers := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
mtrcs := metrics.New()
mtrcs.BuildInfo.WithLabelValues(version.GetString()).Set(1)
handle := svc.NewDefaultLanguageService(cfg, svc.NewService(cfg, logger))
gr := runner.NewGroup()
// prepare an HTTP server and add it to the group run.
httpServer, err := http.Server(
http.Name(cfg.Service.Name),
@@ -67,21 +73,7 @@ func Server(cfg *config.Config) *cli.Command {
Msg("Error initializing http service")
return fmt.Errorf("could not initialize http service: %w", err)
}
servers.Add(httpServer.Run, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", httpServer))
// prepare a gRPC server and add it to the group run.
grpcServer := grpc.Server(
@@ -93,21 +85,7 @@ func Server(cfg *config.Config) *cli.Command {
grpc.ServiceHandler(handle),
grpc.TraceProvider(traceProvider),
)
servers.Add(grpcServer.Run, func(_ error) {
if err == nil {
logger.Info().
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroGrpcServerRunner(cfg.Service.Name+".grpc", grpcServer))
// prepare a debug server and add it to the group run.
debugServer, err := debug.Server(
@@ -120,12 +98,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
servers.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
return servers.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -4,13 +4,12 @@ import (
"context"
"fmt"
"os"
"path"
"os/signal"
"path/filepath"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/sharing/pkg/config"
@@ -37,10 +36,6 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
// precreate folders
if cfg.UserSharingDriver == "json" && cfg.UserSharingDrivers.JSON.File != "" {
@@ -54,65 +49,66 @@ func Server(cfg *config.Config) *cli.Command {
}
}
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
gr := runner.NewGroup()
{
rCfg, err := revaconfig.SharingConfigFromStruct(cfg, logger)
if err != nil {
return err
}
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
// run the appropriate reva servers based on the config
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,8 +3,8 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
"github.com/urfave/cli/v2"
@@ -12,6 +12,7 @@ import (
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/log"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/services/sse/pkg/config"
"github.com/opencloud-eu/opencloud/services/sse/pkg/config/parser"
@@ -34,24 +35,27 @@ func Server(cfg *config.Config) *cli.Command {
return configlog.ReturnFatal(parser.ParseConfig(cfg))
},
Action: func(c *cli.Context) error {
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
logger = log.NewLogger(
log.Name(cfg.Service.Name),
log.Level(cfg.Log.Level),
log.Pretty(cfg.Log.Pretty),
log.Color(cfg.Log.Color),
log.File(cfg.Log.File),
)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
logger := log.NewLogger(
log.Name(cfg.Service.Name),
log.Level(cfg.Log.Level),
log.Pretty(cfg.Log.Pretty),
log.Color(cfg.Log.Color),
log.File(cfg.Log.File),
)
defer cancel()
tracerProvider, err := tracing.GetServiceTraceProvider(cfg.Tracing, cfg.Service.Name)
if err != nil {
return err
}
gr := runner.NewGroup()
{
connName := generators.GenerateConnectionName(cfg.Service.Name, generators.NTypeBus)
natsStream, err := stream.NatsFromConfig(connName, true, stream.NatsConfig(cfg.Events))
@@ -71,9 +75,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(server.Run, func(_ error) {
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -87,13 +89,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/storage-publiclink/pkg/config"
@@ -36,67 +34,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.StoragePublicLinkConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner("storage-publiclink_debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/storage-shares/pkg/config"
@@ -36,67 +34,63 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.StorageSharesConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner("storage-shares_debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,16 +3,14 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/cmd/revad/runtime"
"github.com/urfave/cli/v2"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/storage-system/pkg/config"
@@ -37,60 +35,48 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.StorageSystemFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner("storage-system_debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
@@ -102,7 +88,15 @@ func Server(cfg *config.Config) *cli.Command {
logger.Fatal().Err(err).Msg("failed to register the http service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,13 +3,11 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/storage-users/pkg/config"
@@ -38,60 +36,48 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
gr := runner.NewGroup()
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.StorageUsersConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(err error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner("storage-users_debug", debugServer))
}
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
if err := registry.RegisterService(ctx, logger, grpcSvc, cfg.Debug.Addr); err != nil {
@@ -113,25 +99,24 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
logger.Fatal().Err(err).Msg("can't create event handler")
}
gr.Add(eventSVC.Run, func(err error) {
if err == nil {
logger.Info().
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "stream").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
// The event service Run() function handles the stop signal itself
go func() {
err := eventSVC.Run()
if err != nil {
logger.Fatal().Err(err).Msg("can't run event server")
}
cancel()
})
}()
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -40,16 +41,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
m = metrics.New()
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
m := metrics.New()
m.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
service := grpc.NewService(
grpc.Logger(logger),
grpc.Context(ctx),
@@ -61,22 +64,7 @@ func Server(cfg *config.Config) *cli.Command {
grpc.TraceProvider(traceProvider),
grpc.MaxConcurrentRequests(cfg.GRPC.MaxConcurrentRequests),
)
gr.Add(service.Run, func(_ error) {
if err == nil {
logger.Info().
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "grpc").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroGrpcServerRunner(cfg.Service.Name+".grpc", service))
server, err := debug.Server(
debug.Logger(logger),
@@ -87,11 +75,7 @@ func Server(cfg *config.Config) *cli.Command {
logger.Info().Err(err).Str("transport", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(server.ListenAndServe, func(_ error) {
_ = server.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", server))
httpServer, err := http.Server(
http.Logger(logger),
@@ -109,24 +93,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", httpServer))
gr.Add(httpServer.Run, func(_ error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
cancel()
})
return gr.Run()
}
return nil
},
}
}

View File

@@ -3,8 +3,8 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
@@ -15,6 +15,7 @@ import (
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/generators"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -69,14 +70,16 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
mtrcs := metrics.New()
mtrcs.BuildInfo.WithLabelValues(version.GetString()).Set(1)
defer cancel()
connName := generators.GenerateConnectionName(cfg.Service.Name, generators.NTypeBus)
stream, err := stream.NatsFromConfig(connName, false, stream.NatsConfig(cfg.Events))
if err != nil {
@@ -111,6 +114,7 @@ func Server(cfg *config.Config) *cli.Command {
vClient := settingssvc.NewValueService("eu.opencloud.api.settings", grpcClient)
rClient := settingssvc.NewRoleService("eu.opencloud.api.settings", grpcClient)
gr := runner.NewGroup()
{
server, err := http.Server(
http.Logger(logger),
@@ -132,23 +136,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -162,13 +150,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,14 +3,12 @@ package command
import (
"context"
"fmt"
"os"
"path"
"os/signal"
"github.com/gofrs/uuid"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/ldap"
"github.com/opencloud-eu/opencloud/pkg/registry"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/users/pkg/config"
@@ -37,10 +35,6 @@ func Server(cfg *config.Config) *cli.Command {
if err != nil {
return err
}
gr := run.Group{}
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
// the reva runtime calls os.Exit in the case of a failure and there is no way for the OpenCloud
// runtime to catch it and restart a reva service. Therefore we need to ensure the service has
@@ -54,55 +48,47 @@ func Server(cfg *config.Config) *cli.Command {
}
}
// make sure the run group executes all interrupt handlers when the context is canceled
gr.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
})
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
gr.Add(func() error {
pidFile := path.Join(os.TempDir(), "revad-"+cfg.Service.Name+"-"+uuid.Must(uuid.NewV4()).String()+".pid")
gr := runner.NewGroup()
{
// run the appropriate reva servers based on the config
rCfg := revaconfig.UsersConfigFromStruct(cfg)
reg := registry.GetRegistry()
runtime.RunWithOptions(rCfg, pidFile,
if rServer := runtime.NewDrivenHTTPServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(reg),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
)
return nil
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "reva").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rhttp", rServer))
}
if rServer := runtime.NewDrivenGRPCServerWithOptions(rCfg,
runtime.WithLogger(&logger.Logger),
runtime.WithRegistry(registry.GetRegistry()),
runtime.WithTraceProvider(traceProvider),
); rServer != nil {
gr.Add(runner.NewRevaServiceRunner(cfg.Service.Name+".rgrpc", rServer))
}
cancel()
})
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
{
debugServer, err := debug.Server(
debug.Logger(logger),
debug.Context(ctx),
debug.Config(cfg),
)
if err != nil {
logger.Info().Err(err).Str("server", "debug").Msg("Failed to initialize server")
return err
}
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
// FIXME we should defer registering the service until we are sure that reva is running
grpcSvc := registry.BuildGRPCService(cfg.GRPC.Namespace+"."+cfg.Service.Name, cfg.GRPC.Protocol, cfg.GRPC.Addr, version.GetString())
@@ -110,7 +96,15 @@ func Server(cfg *config.Config) *cli.Command {
logger.Fatal().Err(err).Msg("failed to register the grpc service")
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -5,9 +5,10 @@ import (
"encoding/json"
"fmt"
"os"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/services/web/pkg/config"
"github.com/opencloud-eu/opencloud/services/web/pkg/config/parser"
@@ -47,14 +48,16 @@ func Server(cfg *config.Config) *cli.Command {
}
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
m = metrics.New()
)
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
defer cancel()
m := metrics.New()
gr := runner.NewGroup()
{
server, err := http.Server(
http.Logger(logger),
@@ -73,30 +76,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
err := server.Run()
if err != nil {
logger.Error().
Err(err).
Str("transport", "http").
Msg("Failed to start server")
}
return err
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -110,13 +90,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(_ error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
@@ -41,16 +42,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
metrics = metrics.New()
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
metrics := metrics.New()
metrics.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
{
server, err := http.Server(
http.Logger(logger),
@@ -69,23 +71,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -100,13 +86,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(err error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -3,9 +3,10 @@ package command
import (
"context"
"fmt"
"os/signal"
"github.com/oklog/run"
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
"github.com/opencloud-eu/opencloud/pkg/runner"
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
"github.com/opencloud-eu/opencloud/services/webfinger/pkg/config"
@@ -35,16 +36,17 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
var (
gr = run.Group{}
ctx, cancel = context.WithCancel(c.Context)
m = metrics.New(metrics.Logger(logger))
)
defer cancel()
var cancel context.CancelFunc
if cfg.Context == nil {
cfg.Context, cancel = signal.NotifyContext(context.Background(), runner.StopSignals...)
defer cancel()
}
ctx := cfg.Context
m := metrics.New(metrics.Logger(logger))
m.BuildInfo.WithLabelValues(version.GetString()).Set(1)
gr := runner.NewGroup()
{
relationProviders, err := getRelationProviders(cfg)
if err != nil {
@@ -82,23 +84,7 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(func() error {
return server.Run()
}, func(err error) {
if err == nil {
logger.Info().
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
} else {
logger.Error().Err(err).
Str("transport", "http").
Str("server", cfg.Service.Name).
Msg("Shutting down server")
}
cancel()
})
gr.Add(runner.NewGoMicroHttpServerRunner(cfg.Service.Name+".http", server))
}
{
@@ -113,13 +99,18 @@ func Server(cfg *config.Config) *cli.Command {
return err
}
gr.Add(debugServer.ListenAndServe, func(err error) {
_ = debugServer.Shutdown(ctx)
cancel()
})
gr.Add(runner.NewGolangHttpServerRunner(cfg.Service.Name+".debug", debugServer))
}
return gr.Run()
grResults := gr.Run(ctx)
// return the first non-nil error found in the results
for _, grResult := range grResults {
if grResult.RunnerError != nil {
return grResult.RunnerError
}
}
return nil
},
}
}

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 Antithesis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,151 @@
//go:build enable_antithesis_sdk
// Package assert enables defining [test properties] about your program or [workload]. It is part of the [Antithesis Go SDK], which enables Go applications to integrate with the [Antithesis platform].
//
// Code that uses this package should be instrumented with the [antithesis-go-generator] utility. This step is required for the Always, Sometime, and Reachable methods. It is not required for the Unreachable and AlwaysOrUnreachable methods, but it will improve the experience of using them.
//
// These functions are no-ops with minimal performance overhead when called outside of the Antithesis environment. However, if the environment variable ANTITHESIS_SDK_LOCAL_OUTPUT is set, these functions will log to the file pointed to by that variable using a structured JSON format defined [here]. This allows you to make use of the Antithesis assertions package in your regular testing, or even in production. In particular, very few assertions frameworks offer a convenient way to define [Sometimes assertions], but they can be quite useful even outside Antithesis.
//
// Each function in this package takes a parameter called message, which is a human readable identifier used to aggregate assertions. Antithesis generates one test property per unique message and this test property will be named "<message>" in the [triage report].
//
// This test property either passes or fails, which depends upon the evaluation of every assertion that shares its message. Different assertions in different parts of the code should have different message, but the same assertion should always have the same message even if it is moved to a different file.
//
// Each function also takes a parameter called details, which is a key-value map of optional additional information provided by the user to add context for assertion failures. The information that is logged will appear in the [triage report], under the details section of the corresponding property. Normally the values passed to details are evaluated at runtime.
//
// [Antithesis Go SDK]: https://antithesis.com/docs/using_antithesis/sdk/go/
// [Antithesis platform]: https://antithesis.com
// [test properties]: https://antithesis.com/docs/using_antithesis/properties/
// [workload]: https://antithesis.com/docs/getting_started/first_test/
// [antithesis-go-generator]: https://antithesis.com/docs/using_antithesis/sdk/go/instrumentor/
// [triage report]: https://antithesis.com/docs/reports/triage/
// [here]: https://antithesis.com/docs/using_antithesis/sdk/fallback/
// [Sometimes assertions]: https://antithesis.com/docs/best_practices/sometimes_assertions/
//
// [details]: https://antithesis.com/docs/reports/triage/#details
package assert
type assertInfo struct {
Location *locationInfo `json:"location"`
Details map[string]any `json:"details"`
AssertType string `json:"assert_type"`
DisplayType string `json:"display_type"`
Message string `json:"message"`
Id string `json:"id"`
Hit bool `json:"hit"`
MustHit bool `json:"must_hit"`
Condition bool `json:"condition"`
}
type wrappedAssertInfo struct {
A *assertInfo `json:"antithesis_assert"`
}
// --------------------------------------------------------------------------------
// Assertions
// --------------------------------------------------------------------------------
const (
wasHit = true
mustBeHit = true
optionallyHit = false
expectingTrue = true
)
const (
universalTest = "always"
existentialTest = "sometimes"
reachabilityTest = "reachability"
)
const (
alwaysDisplay = "Always"
alwaysOrUnreachableDisplay = "AlwaysOrUnreachable"
sometimesDisplay = "Sometimes"
reachableDisplay = "Reachable"
unreachableDisplay = "Unreachable"
)
// Always asserts that condition is true every time this function is called, and that it is called at least once. The corresponding test property will be viewable in the Antithesis SDK: Always group of your triage report.
func Always(condition bool, message string, details map[string]any) {
locationInfo := newLocationInfo(offsetAPICaller)
id := makeKey(message, locationInfo)
assertImpl(condition, message, details, locationInfo, wasHit, mustBeHit, universalTest, alwaysDisplay, id)
}
// AlwaysOrUnreachable asserts that condition is true every time this function is called. The corresponding test property will pass if the assertion is never encountered (unlike Always assertion types). This test property will be viewable in the “Antithesis SDK: Always” group of your triage report.
func AlwaysOrUnreachable(condition bool, message string, details map[string]any) {
locationInfo := newLocationInfo(offsetAPICaller)
id := makeKey(message, locationInfo)
assertImpl(condition, message, details, locationInfo, wasHit, optionallyHit, universalTest, alwaysOrUnreachableDisplay, id)
}
// Sometimes asserts that condition is true at least one time that this function was called. (If the assertion is never encountered, the test property will therefore fail.) This test property will be viewable in the “Antithesis SDK: Sometimes” group.
func Sometimes(condition bool, message string, details map[string]any) {
locationInfo := newLocationInfo(offsetAPICaller)
id := makeKey(message, locationInfo)
assertImpl(condition, message, details, locationInfo, wasHit, mustBeHit, existentialTest, sometimesDisplay, id)
}
// Unreachable asserts that a line of code is never reached. The corresponding test property will fail if this function is ever called. (If it is never called the test property will therefore pass.) This test property will be viewable in the “Antithesis SDK: Reachablity assertions” group.
func Unreachable(message string, details map[string]any) {
locationInfo := newLocationInfo(offsetAPICaller)
id := makeKey(message, locationInfo)
assertImpl(false, message, details, locationInfo, wasHit, optionallyHit, reachabilityTest, unreachableDisplay, id)
}
// Reachable asserts that a line of code is reached at least once. The corresponding test property will pass if this function is ever called. (If it is never called the test property will therefore fail.) This test property will be viewable in the “Antithesis SDK: Reachablity assertions” group.
func Reachable(message string, details map[string]any) {
locationInfo := newLocationInfo(offsetAPICaller)
id := makeKey(message, locationInfo)
assertImpl(true, message, details, locationInfo, wasHit, mustBeHit, reachabilityTest, reachableDisplay, id)
}
// AssertRaw is a low-level method designed to be used by third-party frameworks. Regular users of the assert package should not call it.
func AssertRaw(cond bool, message string, details map[string]any,
classname, funcname, filename string, line int,
hit bool, mustHit bool,
assertType string, displayType string,
id string,
) {
assertImpl(cond, message, details,
&locationInfo{classname, funcname, filename, line, columnUnknown},
hit, mustHit,
assertType, displayType,
id)
}
func assertImpl(cond bool, message string, details map[string]any,
loc *locationInfo,
hit bool, mustHit bool,
assertType string, displayType string,
id string,
) {
trackerEntry := assertTracker.getTrackerEntry(id, loc.Filename, loc.Classname)
// Always grab the Filename and Classname captured when the trackerEntry was established
// This provides the consistency needed between instrumentation-time and runtime
if loc.Filename != trackerEntry.Filename {
loc.Filename = trackerEntry.Filename
}
if loc.Classname != trackerEntry.Classname {
loc.Classname = trackerEntry.Classname
}
aI := &assertInfo{
Hit: hit,
MustHit: mustHit,
AssertType: assertType,
DisplayType: displayType,
Message: message,
Condition: cond,
Id: id,
Location: loc,
Details: details,
}
trackerEntry.emit(aI)
}
func makeKey(message string, _ *locationInfo) string {
return message
}

View File

@@ -0,0 +1,16 @@
//go:build !enable_antithesis_sdk
package assert
func Always(condition bool, message string, details map[string]any) {}
func AlwaysOrUnreachable(condition bool, message string, details map[string]any) {}
func Sometimes(condition bool, message string, details map[string]any) {}
func Unreachable(message string, details map[string]any) {}
func Reachable(message string, details map[string]any) {}
func AssertRaw(cond bool, message string, details map[string]any,
classname, funcname, filename string, line int,
hit bool, mustHit bool,
assertType string, displayType string,
id string,
) {
}

View File

@@ -0,0 +1,30 @@
package assert
// Allowable numeric types of comparison parameters
type Number interface {
~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint8 | ~uint16 | ~uint32 | ~float32 | ~float64 | ~uint64 | ~uint | ~uintptr
}
// Internally, numeric guidanceFn Operands only use these
type operandConstraint interface {
int32 | int64 | uint64 | float64
}
type numConstraint interface {
uint64 | float64
}
// Used for boolean assertions
type NamedBool struct {
First string `json:"first"`
Second bool `json:"second"`
}
// Convenience function to construct a NamedBool used for boolean assertions
func NewNamedBool(first string, second bool) *NamedBool {
p := NamedBool{
First: first,
Second: second,
}
return &p
}

View File

@@ -0,0 +1,66 @@
//go:build enable_antithesis_sdk
package assert
import (
"sync"
"github.com/antithesishq/antithesis-sdk-go/internal"
)
// TODO: Tracker is intended to prevent sending the same guidance
// more than once. In this case, we always send, so the tracker
// is not presently used.
type booleanGuidance struct {
n int
}
type booleanGuidanceTracker map[string]*booleanGuidance
var (
boolean_guidance_tracker booleanGuidanceTracker = make(booleanGuidanceTracker)
boolean_guidance_tracker_mutex sync.Mutex
boolean_guidance_info_mutex sync.Mutex
)
func (tracker booleanGuidanceTracker) getTrackerEntry(messageKey string) *booleanGuidance {
var trackerEntry *booleanGuidance
var ok bool
if tracker == nil {
return nil
}
boolean_guidance_tracker_mutex.Lock()
defer boolean_guidance_tracker_mutex.Unlock()
if trackerEntry, ok = boolean_guidance_tracker[messageKey]; !ok {
trackerEntry = newBooleanGuidance()
tracker[messageKey] = trackerEntry
}
return trackerEntry
}
// Create a boolean guidance tracker
func newBooleanGuidance() *booleanGuidance {
trackerInfo := booleanGuidance{}
return &trackerInfo
}
func (tI *booleanGuidance) send_value(bgI *booleanGuidanceInfo) {
if tI == nil {
return
}
boolean_guidance_info_mutex.Lock()
defer boolean_guidance_info_mutex.Unlock()
// The tracker entry should be consulted to determine
// if this Guidance info has already been sent, or not.
emitBooleanGuidance(bgI)
}
func emitBooleanGuidance(bgI *booleanGuidanceInfo) error {
return internal.Json_data(map[string]any{"antithesis_guidance": bgI})
}

View File

@@ -0,0 +1,58 @@
//go:build enable_antithesis_sdk
package assert
import (
"path"
"runtime"
"strings"
)
// stackFrameOffset indicates how many frames to go up in the
// call stack to find the filename/location/line info. As
// this work is always done in NewLocationInfo(), the offset is
// specified from the perspective of NewLocationInfo
type stackFrameOffset int
// Order is important here since iota is being used
const (
offsetNewLocationInfo stackFrameOffset = iota
offsetHere
offsetAPICaller
offsetAPICallersCaller
)
// locationInfo represents the attributes known at instrumentation time
// for each Antithesis assertion discovered
type locationInfo struct {
Classname string `json:"class"`
Funcname string `json:"function"`
Filename string `json:"file"`
Line int `json:"begin_line"`
Column int `json:"begin_column"`
}
// columnUnknown is used when the column associated with
// a locationInfo is not available
const columnUnknown = 0
// NewLocationInfo creates a locationInfo directly from
// the current execution context
func newLocationInfo(nframes stackFrameOffset) *locationInfo {
// Get location info and add to details
funcname := "*function*"
classname := "*class*"
pc, filename, line, ok := runtime.Caller(int(nframes))
if !ok {
filename = "*file*"
line = 0
} else {
if this_func := runtime.FuncForPC(pc); this_func != nil {
fullname := this_func.Name()
funcname = path.Ext(fullname)
classname, _ = strings.CutSuffix(fullname, funcname)
funcname = funcname[1:]
}
}
return &locationInfo{classname, funcname, filename, line, columnUnknown}
}

View File

@@ -0,0 +1,323 @@
//go:build enable_antithesis_sdk
package assert
import (
"math"
"sync"
"github.com/antithesishq/antithesis-sdk-go/internal"
)
// --------------------------------------------------------------------------------
// IntegerGap is used for:
// - int, int8, int16, int32, int64:
// - uint, uint8, uint16, uint32, uint64, uintptr:
//
// FloatGap is used for:
// - float32, float64
// --------------------------------------------------------------------------------
type numericGapType int
const (
integerGapType numericGapType = iota
floatGapType
)
func gapTypeForOperand[T Number](num T) numericGapType {
gapType := integerGapType
switch any(num).(type) {
case float32, float64:
gapType = floatGapType
}
return gapType
}
// --------------------------------------------------------------------------------
// numericGuidanceTracker - Tracking Info for Numeric Guidance
//
// For GuidanceFnMaximize:
// - gap is the largest value sent so far
//
// For GuidanceFnMinimize:
// - gap is the most negative value sent so far
//
// --------------------------------------------------------------------------------
type numericGuidanceInfo struct {
gap any
descriminator numericGapType
maximize bool
}
type numericGuidanceTracker map[string]*numericGuidanceInfo
var (
numeric_guidance_tracker numericGuidanceTracker = make(numericGuidanceTracker)
numeric_guidance_tracker_mutex sync.Mutex
numeric_guidance_info_mutex sync.Mutex
)
func (tracker numericGuidanceTracker) getTrackerEntry(messageKey string, trackerType numericGapType, maximize bool) *numericGuidanceInfo {
var trackerEntry *numericGuidanceInfo
var ok bool
if tracker == nil {
return nil
}
numeric_guidance_tracker_mutex.Lock()
defer numeric_guidance_tracker_mutex.Unlock()
if trackerEntry, ok = numeric_guidance_tracker[messageKey]; !ok {
trackerEntry = newNumericGuidanceInfo(trackerType, maximize)
tracker[messageKey] = trackerEntry
}
return trackerEntry
}
// Create an numeric guidance entry
func newNumericGuidanceInfo(trackerType numericGapType, maximize bool) *numericGuidanceInfo {
var gap any
if trackerType == integerGapType {
gap = newGapValue(uint64(math.MaxUint64), maximize)
} else {
gap = newGapValue(float64(math.MaxFloat64), maximize)
}
trackerInfo := numericGuidanceInfo{
maximize: maximize,
descriminator: trackerType,
gap: gap,
}
return &trackerInfo
}
func (tI *numericGuidanceInfo) should_maximize() bool {
return tI.maximize
}
func (tI *numericGuidanceInfo) is_integer_gap() bool {
return tI.descriminator == integerGapType
}
// --------------------------------------------------------------------------------
// Represents integral and floating point extremes
// --------------------------------------------------------------------------------
type gapValue[T numConstraint] struct {
gap_size T
gap_is_negative bool
}
func newGapValue[T numConstraint](sz T, is_neg bool) any {
switch any(sz).(type) {
case uint64:
return gapValue[uint64]{gap_size: uint64(sz), gap_is_negative: is_neg}
case float64:
return gapValue[float64]{gap_size: float64(sz), gap_is_negative: is_neg}
}
return nil
}
func is_same_sign(left_val int64, right_val int64) bool {
same_sign := false
if left_val < 0 {
// left is negative
if right_val < 0 {
same_sign = true
}
} else {
// left is non-negative
if right_val >= 0 {
same_sign = true
}
}
return same_sign
}
func abs_int64(val int64) uint64 {
if val >= 0 {
return uint64(val)
}
return uint64(0 - val)
}
func is_greater_than[T numConstraint](left gapValue[T], right gapValue[T]) bool {
if !left.gap_is_negative && !right.gap_is_negative {
return left.gap_size > right.gap_size
}
if !left.gap_is_negative && right.gap_is_negative {
return true // any positive is greater than a negative
}
if left.gap_is_negative && right.gap_is_negative {
return right.gap_size > left.gap_size
}
if left.gap_is_negative && !right.gap_is_negative {
return false // any negative is less than a positive
}
return false
}
func is_less_than[T numConstraint](left gapValue[T], right gapValue[T]) bool {
if !left.gap_is_negative && !right.gap_is_negative {
return left.gap_size < right.gap_size
}
if !left.gap_is_negative && right.gap_is_negative {
return false // any positive is greater than a negative
}
if left.gap_is_negative && right.gap_is_negative {
return right.gap_size < left.gap_size
}
if left.gap_is_negative && !right.gap_is_negative {
return true // any negative is less than a positive
}
return true
}
func send_value_if_needed(tI *numericGuidanceInfo, gI *guidanceInfo) {
if tI == nil {
return
}
numeric_guidance_info_mutex.Lock()
defer numeric_guidance_info_mutex.Unlock()
// if this is a catalog entry (gI.hit is false)
// do not update the reference gap in the tracker (tI *numericGuidanceInfo)
if !gI.Hit {
emitGuidance(gI)
return
}
should_send := false
maximize := tI.should_maximize()
var gap gapValue[uint64]
var float_gap gapValue[float64]
// Needs to have individual case statements to assist
// the compiler to infer the actual type of the var named 'operands'
switch operands := (gI.Data).(type) {
case numericOperands[int32]:
gap = makeGap(operands)
case numericOperands[int64]:
gap = makeGap(operands)
case numericOperands[uint64]:
gap = makeGap(operands)
case numericOperands[float64]:
float_gap = makeFloatGap(operands)
}
var prev_gap gapValue[uint64]
var prev_float_gap gapValue[float64]
has_prev_gap := false
has_prev_float_gap := false
prev_gap, has_prev_gap = tI.gap.(gapValue[uint64])
if !has_prev_gap {
prev_float_gap, has_prev_float_gap = tI.gap.(gapValue[float64])
}
if has_prev_gap {
if maximize {
should_send = is_greater_than(gap, prev_gap)
} else {
should_send = is_less_than(gap, prev_gap)
}
}
if has_prev_float_gap {
if maximize {
should_send = is_greater_than(float_gap, prev_float_gap)
} else {
should_send = is_less_than(float_gap, prev_float_gap)
}
}
if should_send {
if tI.is_integer_gap() {
tI.gap = gap
} else {
tI.gap = float_gap
}
emitGuidance(gI)
}
}
func emitGuidance(gI *guidanceInfo) error {
return internal.Json_data(map[string]any{"antithesis_guidance": gI})
}
// When left and right are the same sign (both negative, or both non-negative)
// Calculate: <result> = (left - right). The gap_size is abs(<result>) and
// gap_is_negative is (right > left)
func makeGap[Op operandConstraint](operand numericOperands[Op]) gapValue[uint64] {
var gap_size uint64
var gap_is_negative bool
switch this_op := any(operand).(type) {
case numericOperands[int32]:
result := int64(this_op.Left) - int64(this_op.Right)
gap_size = abs_int64(result)
gap_is_negative = result < 0
case numericOperands[int64]:
if is_same_sign(this_op.Left, this_op.Right) {
result := int64(this_op.Left) - int64(this_op.Right)
gap_size = abs_int64(result)
gap_is_negative = result < 0
break
}
// Otherwise left and right are opposite signs
// gap = abs(left) + abs(right)
// gap_is_negative = left < right
left_gap_size := abs_int64(this_op.Left)
right_gap_size := abs_int64(this_op.Right)
gap_size = left_gap_size + right_gap_size
gap_is_negative = this_op.Left < this_op.Right
case numericOperands[uint64]:
left_val := this_op.Left
right_val := this_op.Right
gap_is_negative = false
if left_val < right_val {
gap_is_negative = true
gap_size = right_val - left_val
} else {
gap_size = left_val - right_val
}
default:
zero_gap, _ := newGapValue(uint64(0), false).(gapValue[uint64])
return zero_gap
}
this_gap, _ := newGapValue(gap_size, gap_is_negative).(gapValue[uint64])
return this_gap
} // MakeGap
func makeFloatGap[Op operandConstraint](operand numericOperands[Op]) gapValue[float64] {
switch this_op := any(operand).(type) {
case numericOperands[float64]:
left_val := this_op.Left
right_val := this_op.Right
gap_is_negative := false
var gap_size float64
if left_val < right_val {
gap_is_negative = true
gap_size = right_val - left_val
} else {
gap_size = left_val - right_val
}
this_gap, _ := newGapValue(gap_size, gap_is_negative).(gapValue[float64])
return this_gap
default:
zero_gap, _ := newGapValue(float64(0.0), false).(gapValue[float64])
return zero_gap
}
} // MakeFloatGap

View File

@@ -0,0 +1,330 @@
//go:build enable_antithesis_sdk
package assert
// A type for writing raw assertions.
// guidanceFnType allows the assertion to provide guidance to
// the Antithesis platform when testing in Antithesis.
// Regular users of the assert package should not use it.
type guidanceFnType int
const (
guidanceFnMaximize guidanceFnType = iota // Maximize (left - right) values
guidanceFnMinimize // Minimize (left - right) values
guidanceFnWantAll // Encourages fuzzing explorations where boolean values are true
guidanceFnWantNone // Encourages fuzzing explorations where boolean values are false
guidanceFnExplore
)
// guidanceFnExplore
func get_guidance_type_string(gt guidanceFnType) string {
switch gt {
case guidanceFnMaximize, guidanceFnMinimize:
return "numeric"
case guidanceFnWantAll, guidanceFnWantNone:
return "boolean"
case guidanceFnExplore:
return "json"
}
return ""
}
type numericOperands[T operandConstraint] struct {
Left T `json:"left"`
Right T `json:"right"`
}
type guidanceInfo struct {
Data any `json:"guidance_data,omitempty"`
Location *locationInfo `json:"location"`
GuidanceType string `json:"guidance_type"`
Message string `json:"message"`
Id string `json:"id"`
Maximize bool `json:"maximize"`
Hit bool `json:"hit"`
}
type booleanGuidanceInfo struct {
Data any `json:"guidance_data,omitempty"`
Location *locationInfo `json:"location"`
GuidanceType string `json:"guidance_type"`
Message string `json:"message"`
Id string `json:"id"`
Maximize bool `json:"maximize"`
Hit bool `json:"hit"`
}
func uses_maximize(gt guidanceFnType) bool {
return gt == guidanceFnMaximize || gt == guidanceFnWantAll
}
func newOperands[T Number](left, right T) any {
switch any(left).(type) {
case int8, int16, int32:
return numericOperands[int32]{int32(left), int32(right)}
case int, int64:
return numericOperands[int64]{int64(left), int64(right)}
case uint8, uint16, uint32, uint, uint64, uintptr:
return numericOperands[uint64]{uint64(left), uint64(right)}
case float32, float64:
return numericOperands[float64]{float64(left), float64(right)}
}
return nil
}
func build_numeric_guidance[T Number](gt guidanceFnType, message string, left, right T, loc *locationInfo, id string, hit bool) *guidanceInfo {
operands := newOperands(left, right)
if !hit {
operands = nil
}
gI := guidanceInfo{
GuidanceType: get_guidance_type_string(gt),
Message: message,
Id: id,
Location: loc,
Maximize: uses_maximize(gt),
Data: operands,
Hit: hit,
}
return &gI
}
type namedBoolDictionary map[string]bool
func build_boolean_guidance(gt guidanceFnType, message string, named_bools []NamedBool,
loc *locationInfo,
id string, hit bool) *booleanGuidanceInfo {
var guidance_data any
// To ensure the sequence and naming for the named_bool values
if hit {
named_bool_dictionary := namedBoolDictionary{}
for _, named_bool := range named_bools {
named_bool_dictionary[named_bool.First] = named_bool.Second
}
guidance_data = named_bool_dictionary
}
bgI := booleanGuidanceInfo{
GuidanceType: get_guidance_type_string(gt),
Message: message,
Id: id,
Location: loc,
Maximize: uses_maximize(gt),
Data: guidance_data,
Hit: hit,
}
return &bgI
}
func behavior_to_guidance(behavior string) guidanceFnType {
guidance := guidanceFnExplore
switch behavior {
case "maximize":
guidance = guidanceFnMaximize
case "minimize":
guidance = guidanceFnMinimize
case "all":
guidance = guidanceFnWantAll
case "none":
guidance = guidanceFnWantNone
}
return guidance
}
func numericGuidanceImpl[T Number](left, right T, message, id string, loc *locationInfo, guidanceFn guidanceFnType, hit bool) {
tI := numeric_guidance_tracker.getTrackerEntry(id, gapTypeForOperand(left), uses_maximize(guidanceFn))
gI := build_numeric_guidance(guidanceFn, message, left, right, loc, id, hit)
send_value_if_needed(tI, gI)
}
func booleanGuidanceImpl(named_bools []NamedBool, message, id string, loc *locationInfo, guidanceFn guidanceFnType, hit bool) {
tI := boolean_guidance_tracker.getTrackerEntry(id)
bgI := build_boolean_guidance(guidanceFn, message, named_bools, loc, id, hit)
tI.send_value(bgI)
}
// NumericGuidanceRaw is a low-level method designed to be used by third-party frameworks. Regular users of the assert package should not call it.
func NumericGuidanceRaw[T Number](
left, right T,
message, id string,
classname, funcname, filename string,
line int,
behavior string,
hit bool,
) {
loc := &locationInfo{classname, funcname, filename, line, columnUnknown}
guidanceFn := behavior_to_guidance(behavior)
numericGuidanceImpl(left, right, message, id, loc, guidanceFn, hit)
}
// BooleanGuidanceRaw is a low-level method designed to be used by third-party frameworks. Regular users of the assert package should not call it.
func BooleanGuidanceRaw(
named_bools []NamedBool,
message, id string,
classname, funcname, filename string,
line int,
behavior string,
hit bool,
) {
loc := &locationInfo{classname, funcname, filename, line, columnUnknown}
guidanceFn := behavior_to_guidance(behavior)
booleanGuidanceImpl(named_bools, message, id, loc, guidanceFn, hit)
}
func add_numeric_details[T Number](details map[string]any, left, right T) map[string]any {
// ----------------------------------------------------
// Can not use maps.Clone() until go 1.21.0 or above
// enhancedDetails := maps.Clone(details)
// ----------------------------------------------------
enhancedDetails := map[string]any{}
for k, v := range details {
enhancedDetails[k] = v
}
enhancedDetails["left"] = left
enhancedDetails["right"] = right
return enhancedDetails
}
func add_boolean_details(details map[string]any, named_bools []NamedBool) map[string]any {
// ----------------------------------------------------
// Can not use maps.Clone() until go 1.21.0 or above
// enhancedDetails := maps.Clone(details)
// ----------------------------------------------------
enhancedDetails := map[string]any{}
for k, v := range details {
enhancedDetails[k] = v
}
for _, named_bool := range named_bools {
enhancedDetails[named_bool.First] = named_bool.Second
}
return enhancedDetails
}
// Equivalent to asserting Always(left > right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func AlwaysGreaterThan[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left > right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, universalTest, alwaysDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMinimize, wasHit)
}
// Equivalent to asserting Always(left >= right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func AlwaysGreaterThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left >= right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, universalTest, alwaysDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMinimize, wasHit)
}
// Equivalent to asserting Sometimes(T left > T right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func SometimesGreaterThan[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left > right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, existentialTest, sometimesDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMaximize, wasHit)
}
// Equivalent to asserting Sometimes(T left >= T right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func SometimesGreaterThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left >= right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, existentialTest, sometimesDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMaximize, wasHit)
}
// Equivalent to asserting Always(left < right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func AlwaysLessThan[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left < right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, universalTest, alwaysDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMaximize, wasHit)
}
// Equivalent to asserting Always(left <= right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func AlwaysLessThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left <= right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, universalTest, alwaysDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMaximize, wasHit)
}
// Equivalent to asserting Sometimes(T left < T right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func SometimesLessThan[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left < right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, existentialTest, sometimesDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMinimize, wasHit)
}
// Equivalent to asserting Sometimes(T left <= T right, message, details). Information about left and right will automatically be added to the details parameter, with keys left and right. If you use this function for assertions that compare numeric quantities, you may help Antithesis find more bugs.
func SometimesLessThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
condition := left <= right
all_details := add_numeric_details(details, left, right)
assertImpl(condition, message, all_details, loc, wasHit, mustBeHit, existentialTest, sometimesDisplay, id)
numericGuidanceImpl(left, right, message, id, loc, guidanceFnMinimize, wasHit)
}
// Asserts that every time this is called, at least one bool in named_bools is true. Equivalent to Always(named_bools[0].second || named_bools[1].second || ..., message, details). If you use this for assertions about the behavior of booleans, you may help Antithesis find more bugs. Information about named_bools will automatically be added to the details parameter, and the keys will be the names of the bools.
func AlwaysSome(named_bools []NamedBool, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
disjunction := false
for _, named_bool := range named_bools {
if named_bool.Second {
disjunction = true
break
}
}
all_details := add_boolean_details(details, named_bools)
assertImpl(disjunction, message, all_details, loc, wasHit, mustBeHit, universalTest, alwaysDisplay, id)
booleanGuidanceImpl(named_bools, message, id, loc, guidanceFnWantNone, wasHit)
}
// Asserts that at least one time this is called, every bool in named_bools is true. Equivalent to Sometimes(named_bools[0].second && named_bools[1].second && ..., message, details). If you use this for assertions about the behavior of booleans, you may help Antithesis find more bugs. Information about named_bools will automatically be added to the details parameter, and the keys will be the names of the bools.
func SometimesAll(named_bools []NamedBool, message string, details map[string]any) {
loc := newLocationInfo(offsetAPICaller)
id := makeKey(message, loc)
conjunction := true
for _, named_bool := range named_bools {
if !named_bool.Second {
conjunction = false
break
}
}
all_details := add_boolean_details(details, named_bools)
assertImpl(conjunction, message, all_details, loc, wasHit, mustBeHit, existentialTest, sometimesDisplay, id)
booleanGuidanceImpl(named_bools, message, id, loc, guidanceFnWantAll, wasHit)
}

View File

@@ -0,0 +1,34 @@
//go:build !enable_antithesis_sdk
package assert
func AlwaysGreaterThan[T Number](left, right T, message string, details map[string]any) {}
func AlwaysGreaterThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {}
func SometimesGreaterThan[T Number](left, right T, message string, details map[string]any) {}
func SometimesGreaterThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {}
func AlwaysLessThan[T Number](left, right T, message string, details map[string]any) {}
func AlwaysLessThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {}
func SometimesLessThan[T Number](left, right T, message string, details map[string]any) {}
func SometimesLessThanOrEqualTo[T Number](left, right T, message string, details map[string]any) {}
func AlwaysSome(named_bool []NamedBool, message string, details map[string]any) {}
func SometimesAll(named_bool []NamedBool, message string, details map[string]any) {}
func NumericGuidanceRaw[T Number](left, right T,
message, id string,
classname, funcname, filename string,
line int,
behavior string,
hit bool,
) {
}
func BooleanGuidanceRaw(
named_bools []NamedBool,
message, id string,
classname, funcname, filename string,
line int,
behavior string,
hit bool,
) {
}

View File

@@ -0,0 +1,111 @@
//go:build enable_antithesis_sdk
package assert
import (
"runtime"
"sync"
"sync/atomic"
"github.com/antithesishq/antithesis-sdk-go/internal"
)
type trackerInfo struct {
Filename string
Classname string
PassCount int
FailCount int
}
type emitTracker map[string]*trackerInfo
// assert_tracker (global) keeps track of the unique asserts evaluated
var (
assertTracker emitTracker = make(emitTracker)
trackerMutex sync.Mutex
trackerInfoMutex sync.Mutex
)
func (tracker emitTracker) getTrackerEntry(messageKey string, filename, classname string) *trackerInfo {
var trackerEntry *trackerInfo
var ok bool
if tracker == nil {
return nil
}
trackerMutex.Lock()
defer trackerMutex.Unlock()
if trackerEntry, ok = tracker[messageKey]; !ok {
trackerEntry = newTrackerInfo(filename, classname)
tracker[messageKey] = trackerEntry
}
return trackerEntry
}
func newTrackerInfo(filename, classname string) *trackerInfo {
trackerInfo := trackerInfo{
PassCount: 0,
FailCount: 0,
Filename: filename,
Classname: classname,
}
return &trackerInfo
}
func (ti *trackerInfo) emit(ai *assertInfo) {
if ti == nil || ai == nil {
return
}
// Registrations are just sent to voidstar
hit := ai.Hit
if !hit {
emitAssert(ai)
return
}
var err error
cond := ai.Condition
trackerInfoMutex.Lock()
defer trackerInfoMutex.Unlock()
if cond {
if ti.PassCount == 0 {
err = emitAssert(ai)
}
if err == nil {
ti.PassCount++
}
return
}
if ti.FailCount == 0 {
err = emitAssert(ai)
}
if err == nil {
ti.FailCount++
}
}
func versionMessage() {
languageBlock := map[string]any{
"name": "Go",
"version": runtime.Version(),
}
versionBlock := map[string]any{
"language": languageBlock,
"sdk_version": internal.SDK_Version,
"protocol_version": internal.Protocol_Version,
}
internal.Json_data(map[string]any{"antithesis_sdk": versionBlock})
}
// package-level flag
var hasEmitted atomic.Bool // initialzed to false
func emitAssert(ai *assertInfo) error {
if hasEmitted.CompareAndSwap(false, true) {
versionMessage()
}
return internal.Json_data(wrappedAssertInfo{ai})
}

View File

@@ -0,0 +1,242 @@
//go:build enable_antithesis_sdk
package internal
import (
"encoding/json"
"fmt"
"log"
"math/rand"
"os"
"unsafe"
)
// --------------------------------------------------------------------------------
// To build and run an executable with this package
//
// CC=clang CGO_ENABLED=1 go run ./main.go
// --------------------------------------------------------------------------------
// \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
//
// The commented lines below, and the `import "C"` line which must directly follow
// the commented lines are used by CGO. They are load-bearing, and should not be
// changed without first understanding how CGO uses them.
//
// \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
// #cgo LDFLAGS: -ldl
//
// #include <dlfcn.h>
// #include <stdbool.h>
// #include <stdint.h>
// #include <stdlib.h>
//
// typedef void (*go_fuzz_json_data_fn)(const char *data, size_t size);
// void
// go_fuzz_json_data(void *f, const char *data, size_t size) {
// ((go_fuzz_json_data_fn)f)(data, size);
// }
//
// typedef void (*go_fuzz_flush_fn)(void);
// void
// go_fuzz_flush(void *f) {
// ((go_fuzz_flush_fn)f)();
// }
//
// typedef uint64_t (*go_fuzz_get_random_fn)(void);
// uint64_t
// go_fuzz_get_random(void *f) {
// return ((go_fuzz_get_random_fn)f)();
// }
//
// typedef bool (*go_notify_coverage_fn)(size_t);
// int
// go_notify_coverage(void *f, size_t edges) {
// bool b = ((go_notify_coverage_fn)f)(edges);
// return b ? 1 : 0;
// }
//
// typedef uint64_t (*go_init_coverage_fn)(size_t num_edges, const char *symbols);
// uint64_t
// go_init_coverage(void *f, size_t num_edges, const char *symbols) {
// return ((go_init_coverage_fn)f)(num_edges, symbols);
// }
//
import "C"
func Json_data(v any) error {
if data, err := json.Marshal(v); err != nil {
return err
} else {
handler.output(string(data))
return nil
}
}
func Get_random() uint64 {
return handler.random()
}
func Notify(edge uint64) bool {
return handler.notify(edge)
}
func InitCoverage(num_edges uint64, symbols string) uint64 {
return handler.init_coverage(num_edges, symbols)
}
type libHandler interface {
output(message string)
random() uint64
notify(edge uint64) bool
init_coverage(num_edges uint64, symbols string) uint64
}
const (
errorLogLinePrefix = "[* antithesis-sdk-go *]"
defaultNativeLibraryPath = "/usr/lib/libvoidstar.so"
)
var handler libHandler
type voidstarHandler struct {
fuzzJsonData unsafe.Pointer
fuzzFlush unsafe.Pointer
fuzzGetRandom unsafe.Pointer
initCoverage unsafe.Pointer
notifyCoverage unsafe.Pointer
}
func (h *voidstarHandler) output(message string) {
msg_len := len(message)
if msg_len == 0 {
return
}
cstrMessage := C.CString(message)
defer C.free(unsafe.Pointer(cstrMessage))
C.go_fuzz_json_data(h.fuzzJsonData, cstrMessage, C.ulong(msg_len))
C.go_fuzz_flush(h.fuzzFlush)
}
func (h *voidstarHandler) random() uint64 {
return uint64(C.go_fuzz_get_random(h.fuzzGetRandom))
}
func (h *voidstarHandler) init_coverage(num_edge uint64, symbols string) uint64 {
cstrSymbols := C.CString(symbols)
defer C.free(unsafe.Pointer(cstrSymbols))
return uint64(C.go_init_coverage(h.initCoverage, C.ulong(num_edge), cstrSymbols))
}
func (h *voidstarHandler) notify(edge uint64) bool {
ival := int(C.go_notify_coverage(h.notifyCoverage, C.ulong(edge)))
return ival == 1
}
type localHandler struct {
outputFile *os.File // can be nil
}
func (h *localHandler) output(message string) {
msg_len := len(message)
if msg_len == 0 {
return
}
if h.outputFile != nil {
h.outputFile.WriteString(message + "\n")
}
}
func (h *localHandler) random() uint64 {
return rand.Uint64()
}
func (h *localHandler) notify(edge uint64) bool {
return false
}
func (h *localHandler) init_coverage(num_edges uint64, symbols string) uint64 {
return 0
}
// If we have a file at `defaultNativeLibraryPath`, we load the shared library
// (and panic on any error encountered during load).
// Otherwise fallback to the local handler.
func init() {
if _, err := os.Stat(defaultNativeLibraryPath); err == nil {
if handler, err = openSharedLib(defaultNativeLibraryPath); err != nil {
panic(err)
}
return
}
handler = openLocalHandler()
}
// Attempt to load libvoidstar and some symbols from `path`
func openSharedLib(path string) (*voidstarHandler, error) {
cstrPath := C.CString(path)
defer C.free(unsafe.Pointer(cstrPath))
dlError := func(message string) error {
return fmt.Errorf("%s: (%s)", message, C.GoString(C.dlerror()))
}
sharedLib := C.dlopen(cstrPath, C.int(C.RTLD_NOW))
if sharedLib == nil {
return nil, dlError("Can not load the Antithesis native library")
}
loadFunc := func(name string) (symbol unsafe.Pointer, err error) {
cstrName := C.CString(name)
defer C.free(unsafe.Pointer(cstrName))
if symbol = C.dlsym(sharedLib, cstrName); symbol == nil {
err = dlError(fmt.Sprintf("Can not access symbol %s", name))
}
return
}
fuzzJsonData, err := loadFunc("fuzz_json_data")
if err != nil {
return nil, err
}
fuzzFlush, err := loadFunc("fuzz_flush")
if err != nil {
return nil, err
}
fuzzGetRandom, err := loadFunc("fuzz_get_random")
if err != nil {
return nil, err
}
notifyCoverage, err := loadFunc("notify_coverage")
if err != nil {
return nil, err
}
initCoverage, err := loadFunc("init_coverage_module")
if err != nil {
return nil, err
}
return &voidstarHandler{fuzzJsonData, fuzzFlush, fuzzGetRandom, initCoverage, notifyCoverage}, nil
}
// If `localOutputEnvVar` is set to a non-empty path, attempt to open that path and truncate the file
// to serve as the log file of the local handler.
// Otherwise, we don't have a log file, and logging is a no-op in the local handler.
func openLocalHandler() *localHandler {
path, is_set := os.LookupEnv(localOutputEnvVar)
if !is_set || len(path) == 0 {
return &localHandler{nil}
}
// Open the file R/W (create if needed and possible)
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
log.Printf("%s Failed to open path %s: %v", errorLogLinePrefix, path, err)
file = nil
} else if err = file.Truncate(0); err != nil {
log.Printf("%s Failed to truncate file at %s: %v", errorLogLinePrefix, path, err)
file = nil
}
return &localHandler{file}
}

View File

@@ -0,0 +1,12 @@
package internal
// --------------------------------------------------------------------------------
// Versions
// --------------------------------------------------------------------------------
const SDK_Version = "0.4.3"
const Protocol_Version = "1.1.0"
// --------------------------------------------------------------------------------
// Environment Vars
// --------------------------------------------------------------------------------
const localOutputEnvVar = "ANTITHESIS_SDK_LOCAL_OUTPUT"

View File

@@ -378,6 +378,21 @@ func (a *Account) getClients() []*client {
return clients
}
// Returns a slice of external (non-internal) clients stored in the account, or nil if none is present.
// Lock is held on entry.
func (a *Account) getExternalClientsLocked() []*client {
if len(a.clients) == 0 {
return nil
}
var clients []*client
for c := range a.clients {
if !isInternalClient(c.kind) {
clients = append(clients, c)
}
}
return clients
}
// Called to track a remote server and connections and leafnodes it
// has for this account.
func (a *Account) updateRemoteServer(m *AccountNumConns) []*client {
@@ -398,8 +413,10 @@ func (a *Account) updateRemoteServer(m *AccountNumConns) []*client {
// conservative and bit harsh here. Clients will reconnect if we over compensate.
var clients []*client
if mtce {
clients = a.getClientsLocked()
slices.SortFunc(clients, func(i, j *client) int { return -i.start.Compare(j.start) }) // reserve
clients = a.getExternalClientsLocked()
// Sort in reverse chronological.
slices.SortFunc(clients, func(i, j *client) int { return -i.start.Compare(j.start) })
over := (len(a.clients) - int(a.sysclients) + int(a.nrclients)) - int(a.mconns)
if over < len(clients) {
clients = clients[:over]
@@ -3769,9 +3786,9 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim
ajs := a.js
a.mu.Unlock()
// Sort if we are over the limit.
// Sort in chronological order so that most recent connections over the limit are pruned.
if a.MaxTotalConnectionsReached() {
slices.SortFunc(clients, func(i, j *client) int { return -i.start.Compare(j.start) }) // sort in reverse order
slices.SortFunc(clients, func(i, j *client) int { return i.start.Compare(j.start) })
}
// If JetStream is enabled for this server we will call into configJetStream for the account
@@ -3783,6 +3800,11 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim
// Absent reload of js server cfg, this is going to be broken until js is disabled
a.incomplete = true
a.mu.Unlock()
} else {
a.mu.Lock()
// Refresh reference, we've just enabled JetStream, so it would have been nil before.
ajs = a.js
a.mu.Unlock()
}
} else if a.jsLimits != nil {
// We do not have JS enabled for this server, but the account has it enabled so setup
@@ -3811,6 +3833,7 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim
}
}
// client list is in chronological order (older cids at the beginning of the list).
count := 0
for _, c := range clients {
a.mu.RLock()

View File

@@ -403,7 +403,7 @@ func (s *Server) processClientOrLeafCallout(c *client, opts *Options) (authorize
return false, errStr
}
req := []byte(b)
var hdr map[string]string
var hdr []byte
// Check if we have been asked to encrypt.
if xkp != nil {
@@ -413,7 +413,7 @@ func (s *Server) processClientOrLeafCallout(c *client, opts *Options) (authorize
s.Warnf(errStr)
return false, errStr
}
hdr = map[string]string{AuthRequestXKeyHeader: xkey}
hdr = genHeader(hdr, AuthRequestXKeyHeader, xkey)
}
// Send out our request.

View File

@@ -19,6 +19,7 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
@@ -55,7 +56,7 @@ func FetchOCSPResponse(link *ChainLink, opts *OCSPPeerConfig, log *Log) ([]byte,
return nil, err
}
reqEnc := base64.StdEncoding.EncodeToString(reqDER)
reqEnc := encodeOCSPRequest(reqDER)
responders := *link.OCSPWebEndpoints
@@ -68,10 +69,10 @@ func FetchOCSPResponse(link *ChainLink, opts *OCSPPeerConfig, log *Log) ([]byte,
Timeout: timeout,
}
for _, u := range responders {
url := u.String()
log.Debugf(DbgMakingCARequest, url)
url = strings.TrimSuffix(url, "/")
raw, err = getRequestBytes(fmt.Sprintf("%s/%s", url, reqEnc), hc)
responderURL := u.String()
log.Debugf(DbgMakingCARequest, responderURL)
responderURL = strings.TrimSuffix(responderURL, "/")
raw, err = getRequestBytes(fmt.Sprintf("%s/%s", responderURL, reqEnc), hc)
if err == nil {
break
}
@@ -82,3 +83,10 @@ func FetchOCSPResponse(link *ChainLink, opts *OCSPPeerConfig, log *Log) ([]byte,
return raw, nil
}
// encodeOCSPRequest encodes the OCSP request in base64 and URL-encodes it.
// This is needed to fulfill the OCSP responder's requirements for the request format. (X.690)
func encodeOCSPRequest(reqDER []byte) string {
reqEnc := base64.StdEncoding.EncodeToString(reqDER)
return url.QueryEscape(reqEnc)
}

View File

@@ -152,6 +152,7 @@ const (
compressionNegotiated // Marks if this connection has negotiated compression level with remote.
didTLSFirst // Marks if this connection requested and was accepted doing the TLS handshake first (prior to INFO).
isSlowConsumer // Marks connection as a slow consumer.
firstPong // Marks if this is the first PONG received
)
// set the flag (would be equivalent to set the boolean to true)
@@ -2563,6 +2564,14 @@ func (c *client) processPong() {
c.rtt = computeRTT(c.rttStart)
srv := c.srv
reorderGWs := c.kind == GATEWAY && c.gw.outbound
firstPong := c.flags.setIfNotSet(firstPong)
var ri *routeInfo
// When receiving the first PONG, for a route with pooling, we may be
// instructed to start a new route.
if firstPong && c.kind == ROUTER && c.route != nil {
ri = c.route.startNewRoute
c.route.startNewRoute = nil
}
// If compression is currently active for a route/leaf connection, if the
// compression configuration is s2_auto, check if we should change
// the compression level.
@@ -2581,6 +2590,11 @@ func (c *client) processPong() {
if reorderGWs {
srv.gateway.orderOutboundConnections()
}
if ri != nil {
srv.startGoRoutine(func() {
srv.connectToRoute(ri.url, ri.rtype, true, ri.gossipMode, _EMPTY_)
})
}
}
// Select the s2 compression level based on the client's current RTT and the configured
@@ -3084,6 +3098,13 @@ func (c *client) addShadowSub(sub *subscription, ime *ime, enact bool) (*subscri
// Update our route map here. But only if we are not a leaf node or a hub leafnode.
if c.kind != LEAF || c.isHubLeafNode() {
c.srv.updateRemoteSubscription(im.acc, &nsub, 1)
} else if c.kind == LEAF {
// Update all leafnodes that connect to this server. Note that we could have
// used the updateLeafNodes() function since when it does invoke updateSmap()
// this function already takes care of not sending to a spoke leafnode since
// the `nsub` here is already from a spoke leafnode, but to be explicit, we
// use this version that updates only leafnodes that connect to this server.
im.acc.updateLeafNodesEx(&nsub, 1, true)
}
return &nsub, nil
@@ -3192,14 +3213,12 @@ func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool
// Check to see if we have shadow subscriptions.
var updateRoute bool
var updateGWs bool
var isSpokeLeaf bool
shadowSubs := sub.shadow
sub.shadow = nil
if len(shadowSubs) > 0 {
updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil
if updateRoute {
updateGWs = c.srv.gateway.enabled
}
isSpokeLeaf = c.isSpokeLeafNode()
updateRoute = !isSpokeLeaf && (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil
}
sub.close()
c.mu.Unlock()
@@ -3208,16 +3227,12 @@ func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool
for _, nsub := range shadowSubs {
if err := nsub.im.acc.sl.Remove(nsub); err != nil {
c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
} else {
if updateRoute {
c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
}
if updateGWs {
c.srv.gatewayUpdateSubInterest(nsub.im.acc.Name, nsub, -1)
}
}
// Now check on leafnode updates.
nsub.im.acc.updateLeafNodes(nsub, -1)
if updateRoute {
c.srv.updateRemoteSubscription(nsub.im.acc, nsub, -1)
} else if isSpokeLeaf {
nsub.im.acc.updateLeafNodesEx(nsub, -1, true)
}
}
// Now check to see if this was part of a respMap entry for service imports.

View File

@@ -43,6 +43,14 @@ var (
semVerRe = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`)
)
// formatRevision formats a VCS revision string for display.
func formatRevision(revision string) string {
if len(revision) >= 7 {
return revision[:7]
}
return revision
}
func init() {
// Use build info if present, it would be if building using 'go build .'
// or when using a release.
@@ -50,7 +58,7 @@ func init() {
for _, setting := range info.Settings {
switch setting.Key {
case "vcs.revision":
gitCommit = setting.Value[:7]
gitCommit = formatRevision(setting.Value)
}
}
}
@@ -58,7 +66,7 @@ func init() {
const (
// VERSION is the current version for the server.
VERSION = "2.11.8"
VERSION = "2.11.9"
// PROTO is the currently supported protocol.
// 0 was the original

View File

@@ -20,6 +20,8 @@ import (
"errors"
"fmt"
"math/rand"
"os"
"path/filepath"
"reflect"
"regexp"
"slices"
@@ -70,6 +72,13 @@ type ConsumerInfo struct {
PriorityGroups []PriorityGroupState `json:"priority_groups,omitempty"`
}
// consumerInfoClusterResponse is a response used in a cluster to communicate the consumer info
// back to the meta leader as part of a consumer list request.
type consumerInfoClusterResponse struct {
ConsumerInfo
OfflineReason string `json:"offline_reason,omitempty"` // Reporting when a consumer is offline.
}
type PriorityGroupState struct {
Group string `json:"group"`
PinnedClientID string `json:"pinned_client_id,omitempty"`
@@ -452,6 +461,7 @@ type consumer struct {
dthresh time.Duration
mch chan struct{} // Message channel
qch chan struct{} // Quit channel
mqch chan struct{} // The monitor's quit channel.
inch chan bool // Interest change channel
sfreq int32
ackEventT string
@@ -497,6 +507,10 @@ type consumer struct {
/// pinnedTtl is the remaining time before the current PinId expires.
pinnedTtl *time.Timer
pinnedTS time.Time
// If standalone/single-server, the offline reason needs to be stored directly in the consumer.
// Otherwise, if clustered it will be part of the consumer assignment.
offlineReason string
}
// A single subject filter.
@@ -1021,10 +1035,11 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri
outq: mset.outq,
active: true,
qch: make(chan struct{}),
mqch: make(chan struct{}),
uch: make(chan struct{}, 1),
mch: make(chan struct{}, 1),
sfreq: int32(sampleFreq),
maxdc: uint64(config.MaxDeliver),
maxdc: uint64(max(config.MaxDeliver, 0)), // MaxDeliver is negative (-1) when infinite.
maxp: config.MaxAckPending,
retention: cfg.Retention,
created: time.Now().UTC(),
@@ -1285,6 +1300,26 @@ func (o *consumer) setConsumerAssignment(ca *consumerAssignment) {
}
}
func (o *consumer) monitorQuitC() <-chan struct{} {
if o == nil {
return nil
}
o.mu.RLock()
defer o.mu.RUnlock()
return o.mqch
}
// signalMonitorQuit signals to exit the monitor loop. If there's no Raft node,
// this will be the only way to stop the monitor goroutine.
func (o *consumer) signalMonitorQuit() {
o.mu.Lock()
defer o.mu.Unlock()
if o.mqch != nil {
close(o.mqch)
o.mqch = nil
}
}
func (o *consumer) updateC() <-chan struct{} {
o.mu.RLock()
defer o.mu.RUnlock()
@@ -2239,7 +2274,8 @@ func (o *consumer) updateConfig(cfg *ConsumerConfig) error {
}
// Set MaxDeliver if changed
if cfg.MaxDeliver != o.cfg.MaxDeliver {
o.maxdc = uint64(cfg.MaxDeliver)
// MaxDeliver is negative (-1) when infinite.
o.maxdc = uint64(max(cfg.MaxDeliver, 0))
}
// Set InactiveThreshold if changed.
if val := cfg.InactiveThreshold; val != o.cfg.InactiveThreshold {
@@ -4836,7 +4872,7 @@ func (o *consumer) setMaxPendingBytes(limit int) {
// This does some quick sanity checks to see if we should re-calculate num pending.
// Lock should be held.
func (o *consumer) checkNumPending() uint64 {
if o.mset != nil {
if o.mset != nil && o.mset.store != nil {
var state StreamState
o.mset.store.FastState(&state)
npc := o.numPending()
@@ -5758,6 +5794,13 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error {
}
o.closed = true
// Signal to the monitor loop.
// Can't use only qch here, since that's used when stepping down as a leader.
if o.mqch != nil {
close(o.mqch)
o.mqch = nil
}
// Check if we are the leader and are being deleted (as a node).
if dflag && o.isLeader() {
// If we are clustered and node leader (probable from above), stepdown.
@@ -5880,6 +5923,14 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error {
} else {
err = store.Stop()
}
} else if dflag {
// If there's no store (for example, when it's offline), manually delete the directories.
o.mu.RLock()
stream, consumer := o.stream, o.name
o.mu.RUnlock()
accDir := filepath.Join(js.config.StoreDir, a.GetName())
consumersDir := filepath.Join(accDir, streamsDir, stream, consumerDir)
os.RemoveAll(filepath.Join(consumersDir, consumer))
}
return err

View File

@@ -1668,5 +1668,25 @@
"help": "",
"url": "",
"deprecates": ""
},
{
"constant": "JSStreamOfflineReasonErrF",
"code": 500,
"error_code": 10194,
"description": "stream is offline: {err}",
"comment": "",
"help": "",
"url": "",
"deprecates": ""
},
{
"constant": "JSConsumerOfflineReasonErrF",
"code": 500,
"error_code": 10195,
"description": "consumer is offline: {err}",
"comment": "",
"help": "",
"url": "",
"deprecates": ""
}
]

View File

@@ -419,7 +419,7 @@ type pubMsg struct {
sub string
rply string
si *ServerInfo
hdr map[string]string
hdr []byte
msg any
oct compressionType
echo bool
@@ -428,7 +428,7 @@ type pubMsg struct {
var pubMsgPool sync.Pool
func newPubMsg(c *client, sub, rply string, si *ServerInfo, hdr map[string]string,
func newPubMsg(c *client, sub, rply string, si *ServerInfo, hdr []byte,
msg any, oct compressionType, echo, last bool) *pubMsg {
var m *pubMsg
@@ -601,17 +601,28 @@ RESET:
// Add in NL
b = append(b, _CRLF_...)
// Optional raw header addition.
if pm.hdr != nil {
b = append(pm.hdr, b...)
nhdr := len(pm.hdr)
nsize := len(b) - LEN_CR_LF
// MQTT producers don't have CRLF, so add it back.
if c.isMqtt() {
nsize += LEN_CR_LF
}
// Update pubArgs
// If others will use this later we need to save and restore original.
c.pa.hdr = nhdr
c.pa.size = nsize
c.pa.hdb = []byte(strconv.Itoa(nhdr))
c.pa.szb = []byte(strconv.Itoa(nsize))
}
// Check if we should set content-encoding
if contentHeader != _EMPTY_ {
b = c.setHeader(contentEncodingHeader, contentHeader, b)
}
// Optional header processing.
if pm.hdr != nil {
for k, v := range pm.hdr {
b = c.setHeader(k, v, b)
}
}
// Tracing
if trace {
c.traceInOp(fmt.Sprintf("PUB %s %s %d", c.pa.subject, c.pa.reply, c.pa.size), nil)
@@ -688,7 +699,7 @@ func (s *Server) sendInternalAccountMsg(a *Account, subject string, msg any) err
}
// Used to send an internal message with an optional reply to an arbitrary account.
func (s *Server) sendInternalAccountMsgWithReply(a *Account, subject, reply string, hdr map[string]string, msg any, echo bool) error {
func (s *Server) sendInternalAccountMsgWithReply(a *Account, subject, reply string, hdr []byte, msg any, echo bool) error {
s.mu.RLock()
if s.sys == nil || s.sys.sendq == nil {
s.mu.RUnlock()

View File

@@ -14,6 +14,7 @@
package server
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/binary"
@@ -1333,8 +1334,54 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro
}
var cfg FileStreamInfo
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling stream metafile %q: %v", metafile, err)
decoder := json.NewDecoder(bytes.NewReader(buf))
decoder.DisallowUnknownFields()
strictErr := decoder.Decode(&cfg)
if strictErr != nil {
cfg = FileStreamInfo{}
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling stream metafile %q: %v", metafile, err)
continue
}
}
if supported := supportsRequiredApiLevel(cfg.Metadata); !supported || strictErr != nil {
var offlineReason string
if !supported {
apiLevel := getRequiredApiLevel(cfg.Metadata)
offlineReason = fmt.Sprintf("unsupported - required API level: %s, current API level: %d", apiLevel, JSApiLevel)
s.Warnf(" Detected unsupported stream '%s > %s', delete the stream or upgrade the server to API level %s", a.Name, cfg.StreamConfig.Name, apiLevel)
} else {
offlineReason = fmt.Sprintf("decoding error: %v", strictErr)
s.Warnf(" Error unmarshalling stream metafile %q: %v", metafile, strictErr)
}
singleServerMode := !s.JetStreamIsClustered() && s.standAloneMode()
if singleServerMode {
// Fake a stream, so we can respond to API requests as single-server.
mset := &stream{
acc: a,
jsa: jsa,
cfg: cfg.StreamConfig,
js: js,
srv: s,
stype: cfg.Storage,
consumers: make(map[string]*consumer),
active: false,
created: time.Now().UTC(),
offlineReason: offlineReason,
}
if !cfg.Created.IsZero() {
mset.created = cfg.Created
}
mset.closed.Store(true)
jsa.mu.Lock()
jsa.streams[cfg.Name] = mset
jsa.mu.Unlock()
// Now do the consumers.
odir := filepath.Join(sdir, fi.Name(), consumerDir)
consumers = append(consumers, &ce{mset, odir})
}
continue
}
@@ -1455,13 +1502,66 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro
}
var cfg FileConsumerInfo
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling consumer metafile %q: %v", metafile, err)
decoder := json.NewDecoder(bytes.NewReader(buf))
decoder.DisallowUnknownFields()
strictErr := decoder.Decode(&cfg)
if strictErr != nil {
cfg = FileConsumerInfo{}
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling consumer metafile %q: %v", metafile, err)
continue
}
}
if supported := supportsRequiredApiLevel(cfg.Metadata); !supported || strictErr != nil {
var offlineReason string
if !supported {
apiLevel := getRequiredApiLevel(cfg.Metadata)
offlineReason = fmt.Sprintf("unsupported - required API level: %s, current API level: %d", apiLevel, JSApiLevel)
s.Warnf(" Detected unsupported consumer '%s > %s > %s', delete the consumer or upgrade the server to API level %s", a.Name, e.mset.name(), cfg.Name, apiLevel)
} else {
offlineReason = fmt.Sprintf("decoding error: %v", strictErr)
s.Warnf(" Error unmarshalling consumer metafile %q: %v", metafile, strictErr)
}
singleServerMode := !s.JetStreamIsClustered() && s.standAloneMode()
if singleServerMode {
if !e.mset.closed.Load() {
s.Warnf(" Stopping unsupported stream '%s > %s'", a.Name, e.mset.name())
e.mset.mu.Lock()
e.mset.offlineReason = "stopped"
e.mset.mu.Unlock()
e.mset.stop(false, false)
}
// Fake a consumer, so we can respond to API requests as single-server.
o := &consumer{
mset: e.mset,
js: s.getJetStream(),
acc: a,
srv: s,
cfg: cfg.ConsumerConfig,
active: false,
stream: e.mset.name(),
name: cfg.Name,
dseq: 1,
sseq: 1,
created: time.Now().UTC(),
closed: true,
offlineReason: offlineReason,
}
if !cfg.Created.IsZero() {
o.created = cfg.Created
}
e.mset.mu.Lock()
e.mset.setConsumer(o)
e.mset.mu.Unlock()
}
continue
}
isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig)
if isEphemeral {
// This is an ephermal consumer and this could fail on restart until
// This is an ephemeral consumer and this could fail on restart until
// the consumer can reconnect. We will create it as a durable and switch it.
cfg.ConsumerConfig.Durable = ofi.Name()
}

View File

@@ -485,8 +485,9 @@ type JSApiStreamListRequest struct {
type JSApiStreamListResponse struct {
ApiResponse
ApiPaged
Streams []*StreamInfo `json:"streams"`
Missing []string `json:"missing,omitempty"`
Streams []*StreamInfo `json:"streams"`
Missing []string `json:"missing,omitempty"`
Offline map[string]string `json:"offline,omitempty"`
}
const JSApiStreamListResponseType = "io.nats.jetstream.api.v1.stream_list_response"
@@ -747,8 +748,9 @@ const JSApiConsumerNamesResponseType = "io.nats.jetstream.api.v1.consumer_names_
type JSApiConsumerListResponse struct {
ApiResponse
ApiPaged
Consumers []*ConsumerInfo `json:"consumers"`
Missing []string `json:"missing,omitempty"`
Consumers []*ConsumerInfo `json:"consumers"`
Missing []string `json:"missing,omitempty"`
Offline map[string]string `json:"offline,omitempty"`
}
const JSApiConsumerListResponseType = "io.nats.jetstream.api.v1.consumer_list_response"
@@ -1042,9 +1044,11 @@ type delayedAPIResponse struct {
subject string
reply string
request string
hdr []byte
response string
rg *raftGroup
deadline time.Time
noJs bool
next *delayedAPIResponse
}
@@ -1147,7 +1151,12 @@ func (s *Server) delayedAPIResponder() {
next()
case <-tm.C:
if r != nil {
s.sendAPIErrResponse(r.ci, r.acc, r.subject, r.reply, r.request, r.response)
// If it's not a JS API error, send it as a raw response without additional API/audit tracking.
if r.noJs {
s.sendInternalAccountMsgWithReply(r.acc, r.subject, _EMPTY_, r.hdr, r.response, false)
} else {
s.sendAPIErrResponse(r.ci, r.acc, r.subject, r.reply, r.request, r.response)
}
pop()
}
next()
@@ -1157,7 +1166,13 @@ func (s *Server) delayedAPIResponder() {
func (s *Server) sendDelayedAPIErrResponse(ci *ClientInfo, acc *Account, subject, reply, request, response string, rg *raftGroup, duration time.Duration) {
s.delayedAPIResponses.push(&delayedAPIResponse{
ci, acc, subject, reply, request, response, rg, time.Now().Add(duration), nil,
ci, acc, subject, reply, request, nil, response, rg, time.Now().Add(duration), false, nil,
})
}
func (s *Server) sendDelayedErrResponse(acc *Account, subject string, hdr []byte, response string, duration time.Duration) {
s.delayedAPIResponses.push(&delayedAPIResponse{
nil, acc, subject, _EMPTY_, _EMPTY_, hdr, response, nil, time.Now().Add(duration), true, nil,
})
}
@@ -1727,6 +1742,11 @@ func (s *Server) jsStreamUpdateRequest(sub *subscription, c *client, _ *Account,
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if mset.offlineReason != _EMPTY_ {
resp.Error = NewJSStreamOfflineReasonError(errors.New(mset.offlineReason))
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil, errRespDelay)
return
}
// Update asset version metadata.
setStaticStreamMetadata(&cfg)
@@ -1958,7 +1978,17 @@ func (s *Server) jsStreamListRequest(sub *subscription, c *client, _ *Account, s
offset = scnt
}
var missingNames []string
for _, mset := range msets[offset:] {
if mset.offlineReason != _EMPTY_ {
if resp.Offline == nil {
resp.Offline = make(map[string]string, 1)
}
resp.Offline[mset.getCfgName()] = mset.offlineReason
missingNames = append(missingNames, mset.getCfgName())
continue
}
config := mset.config()
resp.Streams = append(resp.Streams, &StreamInfo{
Created: mset.createdTime(),
@@ -1976,6 +2006,7 @@ func (s *Server) jsStreamListRequest(sub *subscription, c *client, _ *Account, s
resp.Total = scnt
resp.Limit = JSApiListLimit
resp.Offset = offset
resp.Missing = missingNames
s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp))
}
@@ -2015,6 +2046,13 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, a *Account, s
if sa != nil {
clusterWideConsCount = len(sa.consumers)
offline = s.allPeersOffline(sa.Group)
if sa.unsupported != nil && sa.Group != nil && cc.meta != nil && sa.Group.isMember(cc.meta.ID()) {
// If we're a member for this stream, and it's not supported, report it as offline.
resp.Error = NewJSStreamOfflineReasonError(errors.New(sa.unsupported.reason))
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil, errRespDelay)
js.mu.RUnlock()
return
}
}
js.mu.RUnlock()
@@ -2120,6 +2158,12 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, a *Account, s
}
}
if mset.offlineReason != _EMPTY_ {
resp.Error = NewJSStreamOfflineReasonError(errors.New(mset.offlineReason))
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil, errRespDelay)
return
}
config := mset.config()
resp.StreamInfo = &StreamInfo{
Created: mset.createdTime(),
@@ -3447,6 +3491,10 @@ func (s *Server) jsMsgGetRequest(sub *subscription, c *client, _ *Account, subje
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if mset.offlineReason != _EMPTY_ {
// Just let the request time out.
return
}
var svp StoreMsg
var sm *StoreMsg
@@ -3533,6 +3581,11 @@ func (s *Server) jsConsumerUnpinRequest(sub *subscription, c *client, _ *Account
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if sa.unsupported != nil {
js.mu.RUnlock()
// Just let the request time out.
return
}
ca, ok := sa.consumers[consumer]
if !ok || ca == nil {
@@ -3541,6 +3594,11 @@ func (s *Server) jsConsumerUnpinRequest(sub *subscription, c *client, _ *Account
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if ca.unsupported != nil {
js.mu.RUnlock()
// Just let the request time out.
return
}
js.mu.RUnlock()
// Then check if we are the leader.
@@ -3572,12 +3630,20 @@ func (s *Server) jsConsumerUnpinRequest(sub *subscription, c *client, _ *Account
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if mset.offlineReason != _EMPTY_ {
// Just let the request time out.
return
}
o := mset.lookupConsumer(consumer)
if o == nil {
resp.Error = NewJSConsumerNotFoundError()
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if o.offlineReason != _EMPTY_ {
// Just let the request time out.
return
}
var foundPriority bool
for _, group := range o.config().PriorityGroups {
@@ -4437,11 +4503,23 @@ func (s *Server) jsConsumerCreateRequest(sub *subscription, c *client, a *Accoun
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if stream.offlineReason != _EMPTY_ {
resp.Error = NewJSStreamOfflineReasonError(errors.New(stream.offlineReason))
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil, errRespDelay)
return
}
if o := stream.lookupConsumer(consumerName); o != nil {
if o.offlineReason != _EMPTY_ {
resp.Error = NewJSConsumerOfflineReasonError(errors.New(o.offlineReason))
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil, errRespDelay)
return
}
// If the consumer already exists then don't allow updating the PauseUntil, just set
// it back to whatever the current configured value is.
o.mu.RLock()
req.Config.PauseUntil = o.cfg.PauseUntil
o.mu.RUnlock()
}
// Initialize/update asset version metadata.
@@ -4462,9 +4540,11 @@ func (s *Server) jsConsumerCreateRequest(sub *subscription, c *client, a *Accoun
resp.ConsumerInfo = setDynamicConsumerInfoMetadata(o.initialInfo())
s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp))
o.mu.RLock()
if o.cfg.PauseUntil != nil && !o.cfg.PauseUntil.IsZero() && time.Now().Before(*o.cfg.PauseUntil) {
o.sendPauseAdvisoryLocked(&o.cfg)
}
o.mu.RUnlock()
}
// Request for the list of all consumer names.
@@ -4668,7 +4748,16 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, _ *Account,
offset = ocnt
}
var missingNames []string
for _, o := range obs[offset:] {
if o.offlineReason != _EMPTY_ {
if resp.Offline == nil {
resp.Offline = make(map[string]string, 1)
}
resp.Offline[o.name] = o.offlineReason
missingNames = append(missingNames, o.name)
continue
}
if cinfo := o.info(); cinfo != nil {
resp.Consumers = append(resp.Consumers, cinfo)
}
@@ -4679,6 +4768,7 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, _ *Account,
resp.Total = ocnt
resp.Limit = JSApiListLimit
resp.Offset = offset
resp.Missing = missingNames
s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp))
}
@@ -4730,6 +4820,13 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account,
offline = s.allPeersOffline(rg)
isMember = rg.isMember(ourID)
}
if ca.unsupported != nil && isMember {
// If we're a member for this consumer, and it's not supported, report it as offline.
resp.Error = NewJSConsumerOfflineReasonError(errors.New(ca.unsupported.reason))
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil, errRespDelay)
js.mu.RUnlock()
return
}
}
// Capture consumer leader here.
isConsumerLeader := cc.isConsumerLeader(acc.Name, streamName, consumerName)
@@ -4856,6 +4953,12 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account,
return
}
if obs.offlineReason != _EMPTY_ {
resp.Error = NewJSConsumerOfflineReasonError(errors.New(obs.offlineReason))
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil, errRespDelay)
return
}
if resp.ConsumerInfo = setDynamicConsumerInfoMetadata(obs.info()); resp.ConsumerInfo == nil {
// This consumer returned nil which means it's closed. Respond with not found.
resp.Error = NewJSConsumerNotFoundError()
@@ -4997,6 +5100,11 @@ func (s *Server) jsConsumerPauseRequest(sub *subscription, c *client, _ *Account
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if sa.unsupported != nil {
js.mu.RUnlock()
// Just let the request time out.
return
}
ca, ok := sa.consumers[consumer]
if !ok || ca == nil {
@@ -5005,6 +5113,11 @@ func (s *Server) jsConsumerPauseRequest(sub *subscription, c *client, _ *Account
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if ca.unsupported != nil {
js.mu.RUnlock()
// Just let the request time out.
return
}
nca := *ca
ncfg := *ca.Config
@@ -5038,6 +5151,10 @@ func (s *Server) jsConsumerPauseRequest(sub *subscription, c *client, _ *Account
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if mset.offlineReason != _EMPTY_ {
// Just let the request time out.
return
}
obs := mset.lookupConsumer(consumer)
if obs == nil {
@@ -5045,6 +5162,10 @@ func (s *Server) jsConsumerPauseRequest(sub *subscription, c *client, _ *Account
s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))
return
}
if obs.offlineReason != _EMPTY_ {
// Just let the request time out.
return
}
ncfg := obs.cfg
pauseUTC := req.PauseUntil.UTC()

View File

File diff suppressed because it is too large Load Diff

View File

@@ -176,6 +176,9 @@ const (
// JSConsumerOfflineErr consumer is offline
JSConsumerOfflineErr ErrorIdentifier = 10119
// JSConsumerOfflineReasonErrF consumer is offline: {err}
JSConsumerOfflineReasonErrF ErrorIdentifier = 10195
// JSConsumerOnMappedErr consumer direct on a mapped consumer
JSConsumerOnMappedErr ErrorIdentifier = 10092
@@ -440,6 +443,9 @@ const (
// JSStreamOfflineErr stream is offline
JSStreamOfflineErr ErrorIdentifier = 10118
// JSStreamOfflineReasonErrF stream is offline: {err}
JSStreamOfflineReasonErrF ErrorIdentifier = 10194
// JSStreamPurgeFailedF Generic stream purge failure error string ({err})
JSStreamPurgeFailedF ErrorIdentifier = 10110
@@ -566,6 +572,7 @@ var (
JSConsumerNameTooLongErrF: {Code: 400, ErrCode: 10102, Description: "consumer name is too long, maximum allowed is {max}"},
JSConsumerNotFoundErr: {Code: 404, ErrCode: 10014, Description: "consumer not found"},
JSConsumerOfflineErr: {Code: 500, ErrCode: 10119, Description: "consumer is offline"},
JSConsumerOfflineReasonErrF: {Code: 500, ErrCode: 10195, Description: "consumer is offline: {err}"},
JSConsumerOnMappedErr: {Code: 400, ErrCode: 10092, Description: "consumer direct on a mapped consumer"},
JSConsumerOverlappingSubjectFilters: {Code: 400, ErrCode: 10138, Description: "consumer subject filters cannot overlap"},
JSConsumerPriorityPolicyWithoutGroup: {Code: 400, ErrCode: 10159, Description: "Setting PriorityPolicy requires at least one PriorityGroup to be set"},
@@ -654,6 +661,7 @@ var (
JSStreamNotFoundErr: {Code: 404, ErrCode: 10059, Description: "stream not found"},
JSStreamNotMatchErr: {Code: 400, ErrCode: 10060, Description: "expected stream does not match"},
JSStreamOfflineErr: {Code: 500, ErrCode: 10118, Description: "stream is offline"},
JSStreamOfflineReasonErrF: {Code: 500, ErrCode: 10194, Description: "stream is offline: {err}"},
JSStreamPurgeFailedF: {Code: 500, ErrCode: 10110, Description: "{err}"},
JSStreamReplicasNotSupportedErr: {Code: 500, ErrCode: 10074, Description: "replicas > 1 not supported in non-clustered mode"},
JSStreamReplicasNotUpdatableErr: {Code: 400, ErrCode: 10061, Description: "Replicas configuration can not be updated"},
@@ -1331,6 +1339,22 @@ func NewJSConsumerOfflineError(opts ...ErrorOption) *ApiError {
return ApiErrors[JSConsumerOfflineErr]
}
// NewJSConsumerOfflineReasonError creates a new JSConsumerOfflineReasonErrF error: "consumer is offline: {err}"
func NewJSConsumerOfflineReasonError(err error, opts ...ErrorOption) *ApiError {
eopts := parseOpts(opts)
if ae, ok := eopts.err.(*ApiError); ok {
return ae
}
e := ApiErrors[JSConsumerOfflineReasonErrF]
args := e.toReplacerArgs([]interface{}{"{err}", err})
return &ApiError{
Code: e.Code,
ErrCode: e.ErrCode,
Description: strings.NewReplacer(args...).Replace(e.Description),
}
}
// NewJSConsumerOnMappedError creates a new JSConsumerOnMappedErr error: "consumer direct on a mapped consumer"
func NewJSConsumerOnMappedError(opts ...ErrorOption) *ApiError {
eopts := parseOpts(opts)
@@ -2349,6 +2373,22 @@ func NewJSStreamOfflineError(opts ...ErrorOption) *ApiError {
return ApiErrors[JSStreamOfflineErr]
}
// NewJSStreamOfflineReasonError creates a new JSStreamOfflineReasonErrF error: "stream is offline: {err}"
func NewJSStreamOfflineReasonError(err error, opts ...ErrorOption) *ApiError {
eopts := parseOpts(opts)
if ae, ok := eopts.err.(*ApiError); ok {
return ae
}
e := ApiErrors[JSStreamOfflineReasonErrF]
args := e.toReplacerArgs([]interface{}{"{err}", err})
return &ApiError{
Code: e.Code,
ErrCode: e.ErrCode,
Description: strings.NewReplacer(args...).Replace(e.Description),
}
}
// NewJSStreamPurgeFailedError creates a new JSStreamPurgeFailedF error: "{err}"
func NewJSStreamPurgeFailedError(err error, opts ...ErrorOption) *ApiError {
eopts := parseOpts(opts)

View File

@@ -24,6 +24,23 @@ const (
JSServerLevelMetadataKey = "_nats.level"
)
// getRequiredApiLevel returns the required API level for the JetStream asset.
func getRequiredApiLevel(metadata map[string]string) string {
if l, ok := metadata[JSRequiredLevelMetadataKey]; ok && l != _EMPTY_ {
return l
}
return _EMPTY_
}
// supportsRequiredApiLevel returns whether the required API level for the JetStream asset is supported.
func supportsRequiredApiLevel(metadata map[string]string) bool {
if l := getRequiredApiLevel(metadata); l != _EMPTY_ {
li, err := strconv.Atoi(l)
return err == nil && li <= JSApiLevel
}
return true
}
// setStaticStreamMetadata sets JetStream stream metadata, like the server version and API level.
// Any dynamic metadata is removed, it must not be stored and only be added for responses.
func setStaticStreamMetadata(cfg *StreamConfig) {
@@ -50,10 +67,15 @@ func setStaticStreamMetadata(cfg *StreamConfig) {
// setDynamicStreamMetadata adds dynamic fields into the (copied) metadata.
func setDynamicStreamMetadata(cfg *StreamConfig) *StreamConfig {
newCfg := *cfg
var newCfg StreamConfig
if cfg != nil {
newCfg = *cfg
}
newCfg.Metadata = make(map[string]string)
for key, value := range cfg.Metadata {
newCfg.Metadata[key] = value
if cfg != nil {
for key, value := range cfg.Metadata {
newCfg.Metadata[key] = value
}
}
newCfg.Metadata[JSServerVersionMetadataKey] = VERSION
newCfg.Metadata[JSServerLevelMetadataKey] = strconv.Itoa(JSApiLevel)
@@ -121,10 +143,15 @@ func setStaticConsumerMetadata(cfg *ConsumerConfig) {
// setDynamicConsumerMetadata adds dynamic fields into the (copied) metadata.
func setDynamicConsumerMetadata(cfg *ConsumerConfig) *ConsumerConfig {
newCfg := *cfg
var newCfg ConsumerConfig
if cfg != nil {
newCfg = *cfg
}
newCfg.Metadata = make(map[string]string)
for key, value := range cfg.Metadata {
newCfg.Metadata[key] = value
if cfg != nil {
for key, value := range cfg.Metadata {
newCfg.Metadata[key] = value
}
}
newCfg.Metadata[JSServerVersionMetadataKey] = VERSION
newCfg.Metadata[JSServerLevelMetadataKey] = strconv.Itoa(JSApiLevel)

View File

@@ -80,7 +80,11 @@ func validateTrustedOperators(o *Options) error {
if err != nil {
return fmt.Errorf("default sentinel JWT not valid")
}
if !juc.BearerToken {
if !juc.BearerToken && juc.IssuerAccount != "" && juc.HasEmptyPermissions() {
// we cannot resolve the account yet - but this looks like a scoped user
// it will be rejected at runtime if not valid
} else if !juc.BearerToken {
return fmt.Errorf("default sentinel must be a bearer token")
}
}

View File

@@ -1418,7 +1418,7 @@ func (c *client) processLeafnodeInfo(info *Info) {
c.setPermissions(perms)
}
var resumeConnect, checkSyncConsumers bool
var resumeConnect bool
// If this is a remote connection and this is the first INFO protocol,
// then we need to finish the connect process by sending CONNECT, etc..
@@ -1428,7 +1428,6 @@ func (c *client) processLeafnodeInfo(info *Info) {
resumeConnect = true
} else if !firstINFO && didSolicit {
c.leaf.remoteAccName = info.RemoteAccount
checkSyncConsumers = info.JetStream
}
// Check if we have the remote account information and if so make sure it's stored.
@@ -1448,11 +1447,10 @@ func (c *client) processLeafnodeInfo(info *Info) {
s.leafNodeFinishConnectProcess(c)
}
// If we have JS enabled and so does the other side, we will
// check to see if we need to kick any internal source or mirror consumers.
if checkSyncConsumers {
s.checkInternalSyncConsumers(c.acc, info.Domain)
}
// Check to see if we need to kick any internal source or mirror consumers.
// This will be a no-op if JetStream not enabled for this server or if the bound account
// does not have jetstream.
s.checkInternalSyncConsumers(c.acc)
}
func (s *Server) negotiateLeafCompression(c *client, didSolicit bool, infoCompression string, co *CompressionOpts) (bool, error) {
@@ -1984,16 +1982,16 @@ func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) erro
// This will be a no-op as needed.
s.sendLeafNodeConnect(c.acc)
// If we have JS enabled and so does the other side, we will
// check to see if we need to kick any internal source or mirror consumers.
if proto.JetStream {
s.checkInternalSyncConsumers(acc, proto.Domain)
}
// Check to see if we need to kick any internal source or mirror consumers.
// This will be a no-op if JetStream not enabled for this server or if the bound account
// does not have jetstream.
s.checkInternalSyncConsumers(acc)
return nil
}
// checkInternalSyncConsumers
func (s *Server) checkInternalSyncConsumers(acc *Account, remoteDomain string) {
func (s *Server) checkInternalSyncConsumers(acc *Account) {
// Grab our js
js := s.getJetStream()
@@ -2012,6 +2010,7 @@ func (s *Server) checkInternalSyncConsumers(acc *Account, remoteDomain string) {
if jsa == nil {
return
}
var streams []*stream
jsa.mu.RLock()
for _, mset := range jsa.streams {
@@ -2029,7 +2028,7 @@ func (s *Server) checkInternalSyncConsumers(acc *Account, remoteDomain string) {
// Now loop through all candidates and check if we are the leader and have NOT
// created the sync up consumer.
for _, mset := range streams {
mset.retryDisconnectedSyncConsumers(remoteDomain)
mset.retryDisconnectedSyncConsumers()
}
}
@@ -2228,9 +2227,11 @@ func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscrip
acc.updateLeafNodes(sub, delta)
}
// updateLeafNodes will make sure to update the account smap for the subscription.
// updateLeafNodesEx will make sure to update the account smap for the subscription.
// Will also forward to all leaf nodes as needed.
func (acc *Account) updateLeafNodes(sub *subscription, delta int32) {
// If `hubOnly` is true, then will update only leaf nodes that connect to this server
// (that is, for which this server acts as a hub to them).
func (acc *Account) updateLeafNodesEx(sub *subscription, delta int32, hubOnly bool) {
if acc == nil || sub == nil {
return
}
@@ -2278,8 +2279,14 @@ func (acc *Account) updateLeafNodes(sub *subscription, delta int32) {
if ln == sub.client {
continue
}
// Check to make sure this sub does not have an origin cluster that matches the leafnode.
ln.mu.Lock()
// If `hubOnly` is true, it means that we want to update only leafnodes
// that connect to this server (so isHubLeafNode() would return `true`).
if hubOnly && !ln.isHubLeafNode() {
ln.mu.Unlock()
continue
}
// Check to make sure this sub does not have an origin cluster that matches the leafnode.
// If skipped, make sure that we still let go the "$LDS." subscription that allows
// the detection of loops as long as different cluster.
clusterDifferent := cluster != ln.remoteCluster()
@@ -2290,6 +2297,12 @@ func (acc *Account) updateLeafNodes(sub *subscription, delta int32) {
}
}
// updateLeafNodes will make sure to update the account smap for the subscription.
// Will also forward to all leaf nodes as needed.
func (acc *Account) updateLeafNodes(sub *subscription, delta int32) {
acc.updateLeafNodesEx(sub, delta, false)
}
// This will make an update to our internal smap and determine if we should send out
// an interest update to the remote side.
// Lock should be held.

View File

@@ -3788,6 +3788,9 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus {
}
for stream, sa := range asa {
if sa != nil && sa.unsupported != nil {
continue
}
// Make sure we can look up
if err := js.isStreamHealthy(acc, sa); err != nil {
if !details {
@@ -3905,11 +3908,14 @@ type RaftzGroup struct {
Applied uint64 `json:"applied"`
CatchingUp bool `json:"catching_up,omitempty"`
Leader string `json:"leader,omitempty"`
LeaderSince *time.Time `json:"leader_since,omitempty"`
EverHadLeader bool `json:"ever_had_leader"`
Term uint64 `json:"term"`
Vote string `json:"voted_for,omitempty"`
PTerm uint64 `json:"pterm"`
PIndex uint64 `json:"pindex"`
SystemAcc bool `json:"system_account"`
TrafficAcc string `json:"traffic_account"`
IPQPropLen int `json:"ipq_proposal_len"`
IPQEntryLen int `json:"ipq_entry_len"`
IPQRespLen int `json:"ipq_resp_len"`
@@ -4010,11 +4016,14 @@ func (s *Server) Raftz(opts *RaftzOptions) *RaftzStatus {
Applied: n.applied,
CatchingUp: n.catchup != nil,
Leader: n.leader,
LeaderSince: n.leaderSince.Load(),
EverHadLeader: n.pleader.Load(),
Term: n.term,
Vote: n.vote,
PTerm: n.pterm,
PIndex: n.pindex,
SystemAcc: n.IsSystemAccount(),
TrafficAcc: n.acc.GetName(),
IPQPropLen: n.prop.len(),
IPQEntryLen: n.entry.len(),
IPQRespLen: n.resp.len(),

View File

@@ -31,6 +31,7 @@ import (
"sync/atomic"
"time"
"github.com/antithesishq/antithesis-sdk-go/assert"
"github.com/nats-io/nats-server/v2/internal/fastrand"
"github.com/minio/highwayhash"
@@ -48,6 +49,7 @@ type RaftNode interface {
Size() (entries, bytes uint64)
Progress() (index, commit, applied uint64)
Leader() bool
LeaderSince() *time.Time
Quorum() bool
Current() bool
Healthy() bool
@@ -81,6 +83,7 @@ type RaftNode interface {
Delete()
RecreateInternalSubs() error
IsSystemAccount() bool
GetTrafficAccountName() string
}
type WAL interface {
@@ -145,10 +148,11 @@ type raft struct {
bytes uint64 // Total amount of bytes stored in the WAL. (Saves us from needing to call wal.FastState very often)
werr error // Last write error
state atomic.Int32 // RaftState
leaderState atomic.Bool // Is in (complete) leader state.
hh hash.Hash64 // Highwayhash, used for snapshots
snapfile string // Snapshot filename
state atomic.Int32 // RaftState
leaderState atomic.Bool // Is in (complete) leader state.
leaderSince atomic.Pointer[time.Time] // How long since becoming leader.
hh hash.Hash64 // Highwayhash, used for snapshots
snapfile string // Snapshot filename
csz int // Cluster size
qn int // Number of nodes needed to establish quorum
@@ -579,6 +583,13 @@ func (n *raft) IsSystemAccount() bool {
return n.isSysAcc.Load()
}
// GetTrafficAccountName returns the account name of the account used for replication traffic.
func (n *raft) GetTrafficAccountName() string {
n.RLock()
defer n.RUnlock()
return n.acc.GetName()
}
func (n *raft) RecreateInternalSubs() error {
n.Lock()
defer n.Unlock()
@@ -1092,7 +1103,11 @@ func (n *raft) Applied(index uint64) (entries uint64, bytes uint64) {
// Quick sanity-check to confirm we're still leader.
// In which case we must signal, since switchToLeader would not have done so already.
if n.State() == Leader {
n.leaderState.Store(true)
if !n.leaderState.Swap(true) {
// Only update timestamp if leader state actually changed.
nowts := time.Now().UTC()
n.leaderSince.Store(&nowts)
}
n.updateLeadChange(true)
}
}
@@ -1399,6 +1414,15 @@ func (n *raft) Leader() bool {
return n.leaderState.Load()
}
// LeaderSince returns how long we have been leader for,
// if applicable.
func (n *raft) LeaderSince() *time.Time {
if n == nil {
return nil
}
return n.leaderSince.Load()
}
// stepdown immediately steps down the Raft node to the
// follower state. This will take the lock itself.
func (n *raft) stepdown(newLeader string) {
@@ -1813,6 +1837,7 @@ func (n *raft) shutdown() {
// to notify the runAs goroutines to stop what they're doing.
if n.state.Swap(int32(Closed)) != int32(Closed) {
n.leaderState.Store(false)
n.leaderSince.Store(nil)
close(n.quit)
}
}
@@ -3300,6 +3325,14 @@ func (n *raft) truncateWAL(term, index uint64) {
n.debug("Clearing WAL state (no commits)")
}
}
if index < n.commit {
assert.Unreachable("WAL truncate lost commits", map[string]any{
"term": term,
"index": index,
"commit": n.commit,
"applied": n.applied,
})
}
defer func() {
// Check to see if we invalidated any snapshots that might have held state
@@ -3388,7 +3421,6 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) {
// Are we receiving from another leader.
if n.State() == Leader {
// If we are the same we should step down to break the tie.
if lterm >= n.term {
// If the append entry term is newer than the current term, erase our
// vote.
@@ -3396,6 +3428,16 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) {
n.term = lterm
n.vote = noVote
n.writeTermVote()
} else {
assert.Unreachable(
"Two leaders using the same term",
map[string]any{
"Node id": n.id,
"Node term": n.term,
"AppendEntry id": ae.leader,
"AppendEntry term": ae.term,
"AppendEntry lterm": ae.lterm,
})
}
n.debug("Received append entry from another leader, stepping down to %q", ae.leader)
n.stepdownLocked(ae.leader)
@@ -3444,22 +3486,50 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) {
}
}
// If we are/were catching up ignore old catchup subs.
// This could happen when we stall or cancel a catchup.
if !isNew && sub != nil && (!catchingUp || sub != n.catchup.sub) {
// If we are/were catching up ignore old catchup subs, but only if catching up from an older server
// that doesn't send the leader term when catching up. We can reject old catchups from newer subs
// later, just by checking the append entry is on the correct term.
if !isNew && sub != nil && ae.lterm == 0 && (!catchingUp || sub != n.catchup.sub) {
n.Unlock()
n.debug("AppendEntry ignoring old entry from previous catchup")
return
}
// If this term is greater than ours.
if lterm > n.term {
n.term = lterm
n.vote = noVote
if isNew {
n.writeTermVote()
}
if n.State() != Follower {
n.debug("Term higher than ours and we are not a follower: %v, stepping down to %q", n.State(), ae.leader)
n.stepdownLocked(ae.leader)
}
} else if lterm < n.term && sub != nil && (isNew || ae.lterm != 0) {
// Anything that's below our expected highest term needs to be rejected.
// Unless we're replaying (sub=nil), in which case we'll always continue.
// For backward-compatibility we shouldn't reject if we're being caught up by an old server.
if !isNew {
n.debug("AppendEntry ignoring old entry from previous catchup")
n.Unlock()
return
}
n.debug("Rejected AppendEntry from a leader (%s) with term %d which is less than ours", ae.leader, lterm)
ar := newAppendEntryResponse(n.term, n.pindex, n.id, false)
n.Unlock()
n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf))
arPool.Put(ar)
return
}
// Check state if we are catching up.
var resetCatchingUp bool
if catchingUp {
if cs := n.catchup; cs != nil && n.pterm >= cs.cterm && n.pindex >= cs.cindex {
// If we are here we are good, so if we have a catchup pending we can cancel.
n.cancelCatchup()
// Reset our notion of catching up.
resetCatchingUp = true
catchingUp = false
} else if isNew {
var ar *appendEntryResponse
var inbox string
@@ -3479,34 +3549,6 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) {
}
}
// If this term is greater than ours.
if lterm > n.term {
n.term = lterm
n.vote = noVote
if isNew {
n.writeTermVote()
}
if n.State() != Follower {
n.debug("Term higher than ours and we are not a follower: %v, stepping down to %q", n.State(), ae.leader)
n.stepdownLocked(ae.leader)
}
} else if lterm < n.term && sub != nil && !(catchingUp && ae.lterm == 0) {
// Anything that's below our expected highest term needs to be rejected.
// Unless we're replaying (sub=nil), in which case we'll always continue.
// For backward-compatibility we shouldn't reject if we're being caught up by an old server.
n.debug("Rejected AppendEntry from a leader (%s) with term %d which is less than ours", ae.leader, lterm)
ar := newAppendEntryResponse(n.term, n.pindex, n.id, false)
n.Unlock()
n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf))
arPool.Put(ar)
return
}
// Reset after checking the term is correct, because we use catchingUp in a condition above.
if resetCatchingUp {
catchingUp = false
}
if isNew && n.leader != ae.leader && n.State() == Follower {
n.debug("AppendEntry updating leader to %q", ae.leader)
n.updateLeader(ae.leader)
@@ -3647,21 +3689,7 @@ CONTINUE:
n.Unlock()
return
}
// Save in memory for faster processing during applyCommit.
// Only save so many however to avoid memory bloat.
if l := len(n.pae); l <= paeDropThreshold {
n.pae[n.pindex], l = ae, l+1
if l > paeWarnThreshold && l%paeWarnModulo == 0 {
n.warn("%d append entries pending", len(n.pae))
}
} else {
// Invalidate cache entry at this index, we might have
// stored it previously with a different value.
delete(n.pae, n.pindex)
if l%paeWarnModulo == 0 {
n.debug("Not saving to append entries pending")
}
}
n.cachePendingEntry(ae)
} else {
// This is a replay on startup so just take the appendEntry version.
n.pterm = ae.term
@@ -3880,12 +3908,7 @@ func (n *raft) sendAppendEntry(entries []*Entry) {
return
}
n.active = time.Now()
// Save in memory for faster processing during applyCommit.
n.pae[n.pindex] = ae
if l := len(n.pae); l > paeWarnThreshold && l%paeWarnModulo == 0 {
n.warn("%d append entries pending", len(n.pae))
}
n.cachePendingEntry(ae)
}
n.sendRPC(n.asubj, n.areply, ae.buf)
if !shouldStore {
@@ -3893,6 +3916,21 @@ func (n *raft) sendAppendEntry(entries []*Entry) {
}
}
// cachePendingEntry saves append entries in memory for faster processing during applyCommit.
// Only save so many however to avoid memory bloat.
func (n *raft) cachePendingEntry(ae *appendEntry) {
if l := len(n.pae); l < paeDropThreshold {
n.pae[n.pindex], l = ae, l+1
if l >= paeWarnThreshold && l%paeWarnModulo == 0 {
n.warn("%d append entries pending", len(n.pae))
}
} else {
// Invalidate cache entry at this index, we might have
// stored it previously with a different value.
delete(n.pae, n.pindex)
}
}
type extensionState uint16
const (
@@ -4392,6 +4430,7 @@ func (n *raft) switchToFollowerLocked(leader string) {
n.aflr = 0
n.leaderState.Store(false)
n.leaderSince.Store(nil)
n.lxfer = false
// Reset acks, we can't assume acks from a previous term are still valid in another term.
if len(n.acks) > 0 {
@@ -4458,7 +4497,11 @@ func (n *raft) switchToLeader() {
// We know we have applied all entries in our log and can signal immediately.
// For sanity reset applied floor back down to 0, so we aren't able to signal twice.
n.aflr = 0
n.leaderState.Store(true)
if !n.leaderState.Swap(true) {
// Only update timestamp if leader state actually changed.
nowts := time.Now().UTC()
n.leaderSince.Store(&nowts)
}
n.updateLeadChange(true)
}
}

View File

@@ -87,6 +87,17 @@ type route struct {
// Transient value used to set the Info.GossipMode when initiating
// an implicit route and sending to the remote.
gossipMode byte
// This will be set in case of pooling so that a route can trigger
// the creation of the next after receiving the first PONG, ensuring
// that authentication did not fail.
startNewRoute *routeInfo
}
// This contains the information required to create a new route.
type routeInfo struct {
url *url.URL
rtype RouteType
gossipMode byte
}
// Do not change the values/order since they are exchanged between servers.
@@ -2379,20 +2390,18 @@ func (s *Server) addRoute(c *client, didSolicit, sendDelayedInfo bool, gossipMod
// Send the subscriptions interest.
s.sendSubsToRoute(c, idx, _EMPTY_)
// In pool mode, if we did not yet reach the cap, try to connect a new connection
// In pool mode, if we did not yet reach the cap, try to connect a new connection,
// but do so only after receiving the first PONG to our PING, which will ensure
// that we have proper authentication.
if pool && didSolicit && sz != effectivePoolSize {
s.startGoRoutine(func() {
select {
case <-time.After(time.Duration(rand.Intn(100)) * time.Millisecond):
case <-s.quitCh:
// Doing this here and not as a defer because connectToRoute is also
// calling s.grWG.Done() on exit, so we do this only if we don't
// invoke connectToRoute().
s.grWG.Done()
return
}
s.connectToRoute(url, rtype, true, gossipMode, _EMPTY_)
})
c.mu.Lock()
c.route.startNewRoute = &routeInfo{
url: url,
rtype: rtype,
gossipMode: gossipMode,
}
c.sendPing()
c.mu.Unlock()
}
}
s.mu.Unlock()

View File

@@ -205,6 +205,13 @@ type StreamInfo struct {
TimeStamp time.Time `json:"ts"`
}
// streamInfoClusterResponse is a response used in a cluster to communicate the stream info
// back to the meta leader as part of a stream list request.
type streamInfoClusterResponse struct {
StreamInfo
OfflineReason string `json:"offline_reason,omitempty"` // Reporting when a stream is offline.
}
type StreamAlternate struct {
Name string `json:"name"`
Domain string `json:"domain,omitempty"`
@@ -214,10 +221,13 @@ type StreamAlternate struct {
// ClusterInfo shows information about the underlying set of servers
// that make up the stream or consumer.
type ClusterInfo struct {
Name string `json:"name,omitempty"`
RaftGroup string `json:"raft_group,omitempty"`
Leader string `json:"leader,omitempty"`
Replicas []*PeerInfo `json:"replicas,omitempty"`
Name string `json:"name,omitempty"`
RaftGroup string `json:"raft_group,omitempty"`
Leader string `json:"leader,omitempty"`
LeaderSince *time.Time `json:"leader_since,omitempty"`
SystemAcc bool `json:"system_account,omitempty"`
TrafficAcc string `json:"traffic_account,omitempty"`
Replicas []*PeerInfo `json:"replicas,omitempty"`
}
// PeerInfo shows information about all the peers in the cluster that
@@ -375,6 +385,10 @@ type stream struct {
lastBySub *subscription
monitorWg sync.WaitGroup // Wait group for the monitor routine.
// If standalone/single-server, the offline reason needs to be stored directly in the stream.
// Otherwise, if clustered it will be part of the stream assignment.
offlineReason string
}
type sourceInfo struct {
@@ -937,6 +951,17 @@ func (mset *stream) monitorQuitC() <-chan struct{} {
return mset.mqch
}
// signalMonitorQuit signals to exit the monitor loop. If there's no Raft node,
// this will be the only way to stop the monitor goroutine.
func (mset *stream) signalMonitorQuit() {
mset.mu.Lock()
defer mset.mu.Unlock()
if mset.mqch != nil {
close(mset.mqch)
mset.mqch = nil
}
}
func (mset *stream) updateC() <-chan struct{} {
if mset == nil {
return nil
@@ -1778,6 +1803,10 @@ func (s *Server) checkStreamCfg(config *StreamConfig, acc *Account, pedantic boo
}
}
// Remove placement if it's an empty object.
if cfg.Placement != nil && reflect.DeepEqual(cfg.Placement, &Placement{}) {
cfg.Placement = nil
}
// For now don't allow preferred server in placement.
if cfg.Placement != nil && cfg.Placement.Preferred != _EMPTY_ {
return StreamConfig{}, NewJSStreamInvalidConfigError(fmt.Errorf("preferred server not permitted in placement"))
@@ -2420,7 +2449,7 @@ func (mset *stream) mirrorInfo() *StreamSourceInfo {
// retryDisconnectedSyncConsumers() will check if we have any disconnected
// sync consumers for either mirror or a source and will reset and retry to connect.
func (mset *stream) retryDisconnectedSyncConsumers(remoteDomain string) {
func (mset *stream) retryDisconnectedSyncConsumers() {
mset.mu.Lock()
defer mset.mu.Unlock()
@@ -2429,23 +2458,24 @@ func (mset *stream) retryDisconnectedSyncConsumers(remoteDomain string) {
return
}
shouldRetry := func(si *sourceInfo) bool {
if si != nil && (si.sip || si.sub == nil || (si.sub.client != nil && si.sub.client.isClosed())) {
// Need to reset
si.fails, si.sip = 0, false
mset.cancelSourceInfo(si)
return true
}
return false
}
// Check mirrors first.
if si := mset.mirror; si != nil {
if si.sub == nil && !si.sip {
if remoteDomain == _EMPTY_ || (mset.cfg.Mirror != nil && mset.cfg.Mirror.External.Domain() == remoteDomain) {
// Need to reset
si.fails = 0
mset.cancelSourceInfo(si)
mset.scheduleSetupMirrorConsumerRetry()
}
if shouldRetry(si) {
mset.scheduleSetupMirrorConsumerRetry()
}
} else {
for _, si := range mset.sources {
ss := mset.streamSource(si.iname)
if remoteDomain == _EMPTY_ || (ss != nil && ss.External.Domain() == remoteDomain) {
// Need to reset
si.fails = 0
mset.cancelSourceInfo(si)
if shouldRetry(si) {
mset.setupSourceConsumer(si.iname, si.sseq+1, time.Time{})
}
}
@@ -2970,7 +3000,8 @@ func (mset *stream) setupMirrorConsumer() error {
if mset.mirror != nil {
mset.mirror.sip = false
// If we need to retry, schedule now
if retry {
// If sub is not nil means we re-established somewhere else so do not re-attempt here.
if retry && mset.mirror.sub == nil {
mset.mirror.fails++
// Cancel here since we can not do anything with this consumer at this point.
mset.cancelSourceInfo(mset.mirror)
@@ -3331,7 +3362,8 @@ func (mset *stream) trySetupSourceConsumer(iname string, seq uint64, startTime t
if si := mset.sources[iname]; si != nil {
si.sip = false
// If we need to retry, schedule now
if retry {
// If sub is not nil means we re-established somewhere else so do not re-attempt here.
if retry && si.sub == nil {
si.fails++
// Cancel here since we can not do anything with this consumer at this point.
mset.cancelSourceInfo(si)
@@ -5275,6 +5307,8 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte,
if ttl > 0 && mset.cfg.SubjectDeleteMarkerTTL > 0 && mset.cfg.MaxMsgsPer != 1 {
if minTtl := int64(mset.cfg.SubjectDeleteMarkerTTL.Seconds()); ttl < minTtl {
ttl = minTtl
hdr = removeHeaderIfPresent(hdr, JSMessageTTL)
hdr = genHeader(hdr, JSMessageTTL, strconv.FormatInt(ttl, 10))
}
}
@@ -5740,6 +5774,7 @@ func (mset *stream) resetAndWaitOnConsumers() {
node.Stop()
}
if o.isMonitorRunning() {
o.signalMonitorQuit()
o.monitorWg.Wait()
}
}
@@ -5756,7 +5791,7 @@ func (mset *stream) delete() error {
// Internal function to stop or delete the stream.
func (mset *stream) stop(deleteFlag, advisory bool) error {
mset.mu.RLock()
js, jsa, name := mset.js, mset.jsa, mset.cfg.Name
js, jsa, name, offlineReason := mset.js, mset.jsa, mset.cfg.Name, mset.offlineReason
mset.mu.RUnlock()
if jsa == nil {
@@ -5765,7 +5800,10 @@ func (mset *stream) stop(deleteFlag, advisory bool) error {
// Remove from our account map first.
jsa.mu.Lock()
delete(jsa.streams, name)
// Preserve in the account if it's marked offline, to have it remain queryable.
if deleteFlag || offlineReason == _EMPTY_ {
delete(jsa.streams, name)
}
accName := jsa.account.Name
jsa.mu.Unlock()
@@ -5798,9 +5836,12 @@ func (mset *stream) stop(deleteFlag, advisory bool) error {
for _, o := range mset.consumers {
obs = append(obs, o)
}
mset.clsMu.Lock()
mset.consumers, mset.cList, mset.csl = nil, nil, nil
mset.clsMu.Unlock()
// Preserve the consumers if it's marked offline, to have them remain queryable.
if deleteFlag || offlineReason == _EMPTY_ {
mset.clsMu.Lock()
mset.consumers, mset.cList, mset.csl = nil, nil, nil
mset.clsMu.Unlock()
}
// Check if we are a mirror.
if mset.mirror != nil && mset.mirror.sub != nil {
@@ -5824,6 +5865,7 @@ func (mset *stream) stop(deleteFlag, advisory bool) error {
// but should we log?
o.stopWithFlags(deleteFlag, deleteFlag, false, advisory)
if !isShuttingDown {
o.signalMonitorQuit()
o.monitorWg.Wait()
}
}
@@ -5901,14 +5943,17 @@ func (mset *stream) stop(deleteFlag, advisory bool) error {
}
if deleteFlag {
// cleanup directories after the stream
accDir := filepath.Join(js.config.StoreDir, accName)
if store != nil {
// Ignore errors.
store.Delete()
} else {
streamDir := filepath.Join(accDir, streamsDir)
os.RemoveAll(filepath.Join(streamDir, name))
}
// Release any resources.
js.releaseStreamResources(&mset.cfg)
// cleanup directories after the stream
accDir := filepath.Join(js.config.StoreDir, accName)
// Do cleanup in separate go routine similar to how fs will use purge here..
go func() {
// no op if not empty

View File

@@ -26,6 +26,7 @@ import (
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
@@ -88,13 +89,18 @@ func NewWatcher(opts ...Option) *Watcher {
// Exit exits the current process cleaning up
// existing pid files.
func (w *Watcher) Exit(errc int) {
w.Clean()
os.Exit(errc)
}
// Clean removes the pid file.
func (w *Watcher) Clean() {
err := w.clean()
if err != nil {
w.log.Warn().Err(err).Msg("error removing pid file")
} else {
w.log.Info().Msgf("pid file %q got removed", w.pidFile)
}
os.Exit(errc)
}
func (w *Watcher) clean() error {
@@ -266,7 +272,7 @@ type Server interface {
// TrapSignals captures the OS signal.
func (w *Watcher) TrapSignals() {
signalCh := make(chan os.Signal, 1024)
signal.Notify(signalCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT)
signal.Notify(signalCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)
for {
s := <-signalCh
w.log.Info().Msgf("%v signal received", s)
@@ -284,69 +290,51 @@ func (w *Watcher) TrapSignals() {
w.log.Info().Msgf("child forked with new pid %d", p.Pid)
w.childPIDs = append(w.childPIDs, p.Pid)
}
case syscall.SIGQUIT:
gracefulShutdown(w)
case syscall.SIGINT, syscall.SIGTERM:
if w.gracefulShutdownTimeout == 0 {
hardShutdown(w)
}
case syscall.SIGQUIT, syscall.SIGINT, syscall.SIGTERM:
gracefulShutdown(w)
return
}
}
}
// TODO: Ideally this would call exit() but properly return an error. The
// exit() is problematic (i.e. racey) especiaily when orchestrating multiple
// reva services from some external runtime (like in the "opencloud server" case
func gracefulShutdown(w *Watcher) {
defer w.Clean()
w.log.Info().Int("Timeout", w.gracefulShutdownTimeout).Msg("preparing for a graceful shutdown with deadline")
wg := sync.WaitGroup{}
for _, s := range w.ss {
wg.Add(1)
go func() {
defer wg.Done()
w.log.Info().Str("network.transport", s.Network()).Str("network.local.address", s.Address()).Msg("fd gracefully closed")
err := s.GracefulStop()
if err != nil {
w.log.Error().Err(err).Msg("error stopping server")
}
}()
}
done := make(chan struct{})
go func() {
count := w.gracefulShutdownTimeout
ticker := time.NewTicker(time.Second)
for ; true; <-ticker.C {
w.log.Info().Msgf("shutting down in %d seconds", count-1)
count--
if count <= 0 {
w.log.Info().Msg("deadline reached before draining active conns, hard stopping ...")
for _, s := range w.ss {
err := s.Stop()
if err != nil {
w.log.Error().Err(err).Msg("error stopping server")
}
w.log.Info().Msgf("fd to %s:%s abruptly closed", s.Network(), s.Address())
}
w.Exit(1)
wg.Wait()
close(done)
}()
select {
case <-time.After(time.Duration(w.gracefulShutdownTimeout) * time.Second):
w.log.Info().Msg("graceful shutdown timeout reached. running hard shutdown")
for _, s := range w.ss {
w.log.Info().Str("network.transport", s.Network()).Str("network.local.address", s.Address()).Msg("fd abruptly closed")
err := s.Stop()
if err != nil {
w.log.Error().Err(err).Msg("error stopping server")
}
}
}()
for _, s := range w.ss {
w.log.Info().Msgf("fd to %s:%s gracefully closed ", s.Network(), s.Address())
err := s.GracefulStop()
if err != nil {
w.log.Error().Err(err).Msg("error stopping server")
w.log.Info().Msg("exit with error code 1")
w.Exit(1)
}
return
case <-done:
w.log.Info().Msg("all servers gracefully stopped")
return
}
w.log.Info().Msg("exit with error code 0")
w.Exit(0)
}
// TODO: Ideally this would call exit() but properly return an error. The
// exit() is problematic (i.e. racey) especiaily when orchestrating multiple
// reva services from some external runtime (like in the "opencloud server" case
func hardShutdown(w *Watcher) {
w.log.Info().Msg("preparing for hard shutdown, aborting all conns")
for _, s := range w.ss {
w.log.Info().Msgf("fd to %s:%s abruptly closed", s.Network(), s.Address())
err := s.Stop()
if err != nil {
w.log.Error().Err(err).Msg("error stopping server")
}
}
w.Exit(0)
}
func getListenerFile(ln net.Listener) (*os.File, error) {

View File

@@ -0,0 +1,186 @@
package runtime
import (
"errors"
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/opencloud-eu/reva/v2/pkg/registry"
"github.com/rs/zerolog"
)
const (
HTTP = iota
GRPC
)
// RevaDrivenServer is an interface that defines the methods for starting and stopping reva HTTP/GRPC services.
type RevaDrivenServer interface {
Start() error
Stop() error
}
// revaServer is an interface that defines the methods for starting and stopping a reva server.
type revaServer interface {
Start(ln net.Listener) error
Stop() error
GracefulStop() error
Network() string
Address() string
}
// sever represents a generic reva server that implements the RevaDrivenServer interface.
type server struct {
srv revaServer
log *zerolog.Logger
gracefulShutdownTimeout time.Duration
protocol string
}
// NewDrivenHTTPServerWithOptions runs a revad server w/o watcher with the given config file and options.
// Use it in cases where you want to run a revad server without the need for a watcher and the os signal handling as a part of another runtime.
// Returns nil if no http server is configured in the config file.
// The GracefulShutdownTimeout set to default 20 seconds and can be overridden in the core config.
// Logging a fatal error and exit with code 1 if the http server cannot be created.
func NewDrivenHTTPServerWithOptions(mainConf map[string]interface{}, opts ...Option) *server {
if !isEnabledHTTP(mainConf) {
return nil
}
options := newOptions(opts...)
var srv *server
var err error
if srv, err = newServer(HTTP, mainConf, options); err != nil {
options.Logger.Fatal().Err(err).Msg("failed to create http server")
}
return srv
}
// NewDrivenGRPCServerWithOptions runs a revad server w/o watcher with the given config file and options.
// Use it in cases where you want to run a revad server without the need for a watcher and the os signal handling as a part of another runtime.
// Returns nil if no grpc server is configured in the config file.
// The GracefulShutdownTimeout set to default 20 seconds and can be overridden in the core config.
// Logging a fatal error and exit with code 1 if the grpc server cannot be created.
func NewDrivenGRPCServerWithOptions(mainConf map[string]interface{}, opts ...Option) *server {
if !isEnabledGRPC(mainConf) {
return nil
}
options := newOptions(opts...)
var srv *server
var err error
if srv, err = newServer(GRPC, mainConf, options); err != nil {
options.Logger.Fatal().Err(err).Msg("failed to create grpc server")
}
return srv
}
// Start starts the reva server, listening on the configured address and network.
func (s *server) Start() error {
if s.srv == nil {
return fmt.Errorf("reva %s server not initialized", s.protocol)
}
ln, err := net.Listen(s.srv.Network(), s.srv.Address())
if err != nil {
return err
}
if err = s.srv.Start(ln); err != nil {
if !errors.Is(err, http.ErrServerClosed) {
s.log.Error().Err(err).Msg("reva server error")
}
return err
}
// update logger with transport and address
logger := s.log.With().Str("network.transport", s.srv.Network()).Str("network.local.address", s.srv.Address()).Logger()
s.log = &logger
return nil
}
// Stop gracefully stops the reva server, waiting for the graceful shutdown timeout.
func (s *server) Stop() error {
if s.srv == nil {
return nil
}
done := make(chan struct{})
go func() {
s.log.Info().Msg("gracefully stopping reva server")
if err := s.srv.GracefulStop(); err != nil {
s.log.Error().Err(err).Msg("error gracefully stopping reva server")
err := s.srv.Stop()
if err != nil {
s.log.Error().Err(err).Msg("error stopping reva server")
}
}
close(done)
}()
select {
case <-time.After(s.gracefulShutdownTimeout):
s.log.Info().Msg("graceful shutdown timeout reached. running hard shutdown")
err := s.srv.Stop()
if err != nil {
s.log.Error().Err(err).Msg("error stopping reva server")
}
return nil
case <-done:
s.log.Info().Msg("reva server gracefully stopped")
return nil
}
}
// newServer runs a revad server w/o watcher with the given config file and options.
func newServer(protocol int, mainConf map[string]interface{}, options Options) (*server, error) {
parseSharedConfOrDie(mainConf["shared"])
coreConf := parseCoreConfOrDie(mainConf["core"])
if err := registry.Init(options.Registry); err != nil {
return nil, err
}
srv := &server{}
// update logger with hostname
host, _ := os.Hostname()
logger := options.Logger.With().Str("host.name", host).Logger()
srv.log = &logger
// Only initialize tracing if we didn't get a tracer provider.
if options.TraceProvider == nil {
srv.log.Debug().Msg("no pre-existing tracer given, initializing tracing")
options.TraceProvider = initTracing(coreConf)
}
initCPUCount(coreConf, srv.log)
srv.gracefulShutdownTimeout = 20 * time.Second
if coreConf.GracefulShutdownTimeout > 0 {
srv.gracefulShutdownTimeout = time.Duration(coreConf.GracefulShutdownTimeout) * time.Second
}
switch protocol {
case HTTP:
s, err := getHTTPServer(mainConf["http"], srv.log, options.TraceProvider)
if err != nil {
return nil, err
}
srv.srv = s
srv.protocol = "http"
// update logger with protocol
logger := srv.log.With().Str("protocol", "http").Logger()
srv.log = &logger
return srv, nil
case GRPC:
s, err := getGRPCServer(mainConf["grpc"], srv.log, options.TraceProvider)
if err != nil {
return nil, err
}
srv.srv = s
srv.protocol = "grpc"
// update logger with protocol
logger := srv.log.With().Str("protocol", "grpc").Logger()
srv.log = &logger
return srv, nil
}
return nil, fmt.Errorf("unknown protocol: %d", protocol)
}

View File

@@ -53,7 +53,8 @@ func RunWithOptions(mainConf map[string]interface{}, pidFile string, opts ...Opt
coreConf := parseCoreConfOrDie(mainConf["core"])
if err := registry.Init(options.Registry); err != nil {
panic(err)
options.Logger.Fatal().Err(err).Msg("failed to initialize registry client")
return
}
run(mainConf, coreConf, options.Logger, options.TraceProvider, pidFile)

View File

@@ -279,15 +279,15 @@ func (s *Server) cleanupServices() {
// Stop stops the server.
func (s *Server) Stop() error {
s.cleanupServices()
s.s.Stop()
s.cleanupServices()
return nil
}
// GracefulStop gracefully stops the server.
func (s *Server) GracefulStop() error {
s.cleanupServices()
s.s.GracefulStop()
s.cleanupServices()
return nil
}

View File

@@ -132,10 +132,10 @@ func (s *Server) Start(ln net.Listener) error {
// Stop stops the server.
func (s *Server) Stop() error {
s.closeServices()
// TODO(labkode): set ctx deadline to zero
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
defer s.closeServices()
return s.httpServer.Shutdown(ctx)
}
@@ -164,7 +164,7 @@ func (s *Server) Address() string {
// GracefulStop gracefully stops the server.
func (s *Server) GracefulStop() error {
s.closeServices()
defer s.closeServices()
return s.httpServer.Shutdown(context.Background())
}

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2024 Masayuki Shamoto
Copyright (c) 2025 Masayuki Shamoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -6,15 +6,12 @@
[![codecov](https://codecov.io/gh/shamaton/msgpack/branch/master/graph/badge.svg?token=9PD2JUK5V3)](https://codecov.io/gh/shamaton/msgpack)
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fshamaton%2Fmsgpack.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fshamaton%2Fmsgpack?ref=badge_shield)
## 📣 Notice
If your application serializes only primitive types, array, map and struct, code generation is also recommended.
You can get the fastest performance with [msgpackgen](https://github.com/shamaton/msgpackgen).
## Features
* Supported types : primitive / array / slice / struct / map / interface{} and time.Time
* Renaming fields via `msgpack:"field_name"`
* Omitting fields via `msgpack:"-"`
* Supports extend encoder / decoder
* Omitting empty fields via `msgpack:"field_name,omitempty"`
* Supports extend encoder / decoder [(example)](./msgpack_example_test.go)
* Can also Encoding / Decoding struct as array
## Installation
@@ -66,7 +63,7 @@ func handle(w http.ResponseWriter, r *http.Request) {
## Benchmark
This result made from [shamaton/msgpack_bench](https://github.com/shamaton/msgpack_bench)
![msgpack_bench](https://user-images.githubusercontent.com/4637556/128299009-4823e79b-d70b-4d11-8f35-10a4758dfeca.png)
![msgpack_bench](https://github.com/user-attachments/assets/ed5bc4c5-a149-4083-98b8-ee6820c00eae)
## License

View File

@@ -6,35 +6,56 @@ import (
"github.com/shamaton/msgpack/v2/def"
)
// Decoder defines an interface for decoding values from bytes.
// It provides methods to get the decoder type, check if the data matches the type,
// and convert the data into a Go value.
type Decoder interface {
// Code returns the unique code representing the decoder type.
Code() int8
// IsType checks if the data at the given offset matches the expected type.
// Returns true if the type matches, false otherwise.
IsType(offset int, d *[]byte) bool
// AsValue decodes the data at the given offset into a Go value of the specified kind.
// Returns the decoded value, the new offset, and an error if decoding fails.
AsValue(offset int, k reflect.Kind, d *[]byte) (interface{}, int, error)
}
// DecoderCommon provides common utility methods for decoding data from bytes.
type DecoderCommon struct {
}
// ReadSize1 reads a single byte from the given index in the byte slice.
// Returns the byte and the new index after reading.
func (cd *DecoderCommon) ReadSize1(index int, d *[]byte) (byte, int) {
rb := def.Byte1
return (*d)[index], index + rb
}
// ReadSize2 reads two bytes from the given index in the byte slice.
// Returns the bytes as a slice and the new index after reading.
func (cd *DecoderCommon) ReadSize2(index int, d *[]byte) ([]byte, int) {
rb := def.Byte2
return (*d)[index : index+rb], index + rb
}
// ReadSize4 reads four bytes from the given index in the byte slice.
// Returns the bytes as a slice and the new index after reading.
func (cd *DecoderCommon) ReadSize4(index int, d *[]byte) ([]byte, int) {
rb := def.Byte4
return (*d)[index : index+rb], index + rb
}
// ReadSize8 reads eight bytes from the given index in the byte slice.
// Returns the bytes as a slice and the new index after reading.
func (cd *DecoderCommon) ReadSize8(index int, d *[]byte) ([]byte, int) {
rb := def.Byte8
return (*d)[index : index+rb], index + rb
}
// ReadSizeN reads a specified number of bytes (n) from the given index in the byte slice.
// Returns the bytes as a slice and the new index after reading.
func (cd *DecoderCommon) ReadSizeN(index, n int, d *[]byte) ([]byte, int) {
return (*d)[index : index+n], index + n
}

View File

@@ -4,8 +4,19 @@ import (
"reflect"
)
// StreamDecoder defines an interface for decoding streams of data.
// It provides methods to retrieve the decoder's code, check type compatibility,
// and convert raw data into a Go value of a specified kind.
type StreamDecoder interface {
// Code returns the unique identifier for the decoder.
Code() int8
// IsType checks if the provided code, inner type, and data length match the expected type.
// Returns true if the type matches, otherwise false.
IsType(code byte, innerType int8, dataLength int) bool
// ToValue converts the raw data into a Go value of the specified kind.
// Takes the code, raw data, and the target kind as input.
// Returns the decoded value or an error if the conversion fails.
ToValue(code byte, data []byte, k reflect.Kind) (any, error)
}

View File

@@ -4,27 +4,48 @@ import (
"reflect"
)
// Encoder defines an interface for encoding values into bytes.
// It provides methods to get the encoding type, calculate the byte size of a value,
// and write the encoded value into a byte slice.
type Encoder interface {
// Code returns the unique code representing the encoder type.
Code() int8
// Type returns the reflect.Type of the value that the encoder handles.
Type() reflect.Type
// CalcByteSize calculates the number of bytes required to encode the given value.
// Returns the size and an error if the calculation fails.
CalcByteSize(value reflect.Value) (int, error)
// WriteToBytes encodes the given value into a byte slice starting at the specified offset.
// Returns the new offset after writing the bytes.
WriteToBytes(value reflect.Value, offset int, bytes *[]byte) int
}
// EncoderCommon provides utility methods for encoding various types of values into bytes.
// It includes methods to encode integers and unsigned integers of different sizes,
// as well as methods to write raw byte slices into a target byte slice.
type EncoderCommon struct {
}
// SetByte1Int64 encodes a single byte from the given int64 value into the byte slice at the specified offset.
// Returns the new offset after writing the byte.
func (c *EncoderCommon) SetByte1Int64(value int64, offset int, d *[]byte) int {
(*d)[offset] = byte(value)
return offset + 1
}
// SetByte2Int64 encodes the lower two bytes of the given int64 value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte2Int64(value int64, offset int, d *[]byte) int {
(*d)[offset+0] = byte(value >> 8)
(*d)[offset+1] = byte(value)
return offset + 2
}
// SetByte4Int64 encodes the lower four bytes of the given int64 value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte4Int64(value int64, offset int, d *[]byte) int {
(*d)[offset+0] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
@@ -33,6 +54,8 @@ func (c *EncoderCommon) SetByte4Int64(value int64, offset int, d *[]byte) int {
return offset + 4
}
// SetByte8Int64 encodes all eight bytes of the given int64 value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte8Int64(value int64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 56)
(*d)[offset+1] = byte(value >> 48)
@@ -45,17 +68,23 @@ func (c *EncoderCommon) SetByte8Int64(value int64, offset int, d *[]byte) int {
return offset + 8
}
// SetByte1Uint64 encodes a single byte from the given uint64 value into the byte slice at the specified offset.
// Returns the new offset after writing the byte.
func (c *EncoderCommon) SetByte1Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value)
return offset + 1
}
// SetByte2Uint64 encodes the lower two bytes of the given uint64 value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte2Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 8)
(*d)[offset+1] = byte(value)
return offset + 2
}
// SetByte4Uint64 encodes the lower four bytes of the given uint64 value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte4Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
@@ -64,6 +93,8 @@ func (c *EncoderCommon) SetByte4Uint64(value uint64, offset int, d *[]byte) int
return offset + 4
}
// SetByte8Uint64 encodes all eight bytes of the given uint64 value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte8Uint64(value uint64, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 56)
(*d)[offset+1] = byte(value >> 48)
@@ -76,17 +107,23 @@ func (c *EncoderCommon) SetByte8Uint64(value uint64, offset int, d *[]byte) int
return offset + 8
}
// SetByte1Int encodes a single byte from the given int value into the byte slice at the specified offset.
// Returns the new offset after writing the byte.
func (c *EncoderCommon) SetByte1Int(code, offset int, d *[]byte) int {
(*d)[offset] = byte(code)
return offset + 1
}
// SetByte2Int encodes the lower two bytes of the given int value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte2Int(value int, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 8)
(*d)[offset+1] = byte(value)
return offset + 2
}
// SetByte4Int encodes the lower four bytes of the given int value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte4Int(value int, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
@@ -95,6 +132,8 @@ func (c *EncoderCommon) SetByte4Int(value int, offset int, d *[]byte) int {
return offset + 4
}
// SetByte4Uint32 encodes the lower four bytes of the given uint32 value into the byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetByte4Uint32(value uint32, offset int, d *[]byte) int {
(*d)[offset] = byte(value >> 24)
(*d)[offset+1] = byte(value >> 16)
@@ -103,6 +142,8 @@ func (c *EncoderCommon) SetByte4Uint32(value uint32, offset int, d *[]byte) int
return offset + 4
}
// SetBytes writes the given byte slice `bs` into the target byte slice at the specified offset.
// Returns the new offset after writing the bytes.
func (c *EncoderCommon) SetBytes(bs []byte, offset int, d *[]byte) int {
for i := range bs {
(*d)[offset+i] = bs[i]

View File

@@ -7,29 +7,37 @@ import (
"github.com/shamaton/msgpack/v2/internal/common"
)
// StreamEncoder is interface that extended encoder should implement
// StreamEncoder is an interface that extended encoders should implement.
// It defines methods for encoding data into a stream.
type StreamEncoder interface {
// Code returns the unique code for the encoder.
Code() int8
// Type returns the reflect.Type of the value being encoded.
Type() reflect.Type
// Write encodes the given value and writes it to the provided StreamWriter.
Write(w StreamWriter, value reflect.Value) error
}
// StreamWriter is provided some writing functions for extended format by user
// StreamWriter provides methods for writing data in extended formats.
// It wraps an io.Writer and a buffer for efficient writing.
type StreamWriter struct {
w io.Writer
buf *common.Buffer
w io.Writer // The underlying writer to write data to.
buf *common.Buffer // A buffer used for temporary storage during writing.
}
// CreateStreamWriter creates and returns a new StreamWriter instance.
func CreateStreamWriter(w io.Writer, buf *common.Buffer) StreamWriter {
return StreamWriter{w, buf}
}
// WriteByte1Int64 writes a single byte representation of an int64 value.
func (w *StreamWriter) WriteByte1Int64(value int64) error {
return w.buf.Write(w.w,
byte(value),
)
}
// WriteByte2Int64 writes a two-byte representation of an int64 value.
func (w *StreamWriter) WriteByte2Int64(value int64) error {
return w.buf.Write(w.w,
byte(value>>8),
@@ -37,6 +45,7 @@ func (w *StreamWriter) WriteByte2Int64(value int64) error {
)
}
// WriteByte4Int64 writes a four-byte representation of an int64 value.
func (w *StreamWriter) WriteByte4Int64(value int64) error {
return w.buf.Write(w.w,
byte(value>>24),
@@ -46,6 +55,7 @@ func (w *StreamWriter) WriteByte4Int64(value int64) error {
)
}
// WriteByte8Int64 writes an eight-byte representation of an int64 value.
func (w *StreamWriter) WriteByte8Int64(value int64) error {
return w.buf.Write(w.w,
byte(value>>56),
@@ -59,12 +69,14 @@ func (w *StreamWriter) WriteByte8Int64(value int64) error {
)
}
// WriteByte1Uint64 writes a single byte representation of a uint64 value.
func (w *StreamWriter) WriteByte1Uint64(value uint64) error {
return w.buf.Write(w.w,
byte(value),
)
}
// WriteByte2Uint64 writes a two-byte representation of a uint64 value.
func (w *StreamWriter) WriteByte2Uint64(value uint64) error {
return w.buf.Write(w.w,
byte(value>>8),
@@ -72,6 +84,7 @@ func (w *StreamWriter) WriteByte2Uint64(value uint64) error {
)
}
// WriteByte4Uint64 writes a four-byte representation of a uint64 value.
func (w *StreamWriter) WriteByte4Uint64(value uint64) error {
return w.buf.Write(w.w,
byte(value>>24),
@@ -81,6 +94,7 @@ func (w *StreamWriter) WriteByte4Uint64(value uint64) error {
)
}
// WriteByte8Uint64 writes an eight-byte representation of a uint64 value.
func (w *StreamWriter) WriteByte8Uint64(value uint64) error {
return w.buf.Write(w.w,
byte(value>>56),
@@ -94,12 +108,14 @@ func (w *StreamWriter) WriteByte8Uint64(value uint64) error {
)
}
// WriteByte1Int writes a single byte representation of an int value.
func (w *StreamWriter) WriteByte1Int(value int) error {
return w.buf.Write(w.w,
byte(value),
)
}
// WriteByte2Int writes a two-byte representation of an int value.
func (w *StreamWriter) WriteByte2Int(value int) error {
return w.buf.Write(w.w,
byte(value>>8),
@@ -107,6 +123,7 @@ func (w *StreamWriter) WriteByte2Int(value int) error {
)
}
// WriteByte4Int writes a four-byte representation of an int value.
func (w *StreamWriter) WriteByte4Int(value int) error {
return w.buf.Write(w.w,
byte(value>>24),
@@ -116,6 +133,7 @@ func (w *StreamWriter) WriteByte4Int(value int) error {
)
}
// WriteByte4Uint32 writes a four-byte representation of a uint32 value.
func (w *StreamWriter) WriteByte4Uint32(value uint32) error {
return w.buf.Write(w.w,
byte(value>>24),
@@ -125,6 +143,7 @@ func (w *StreamWriter) WriteByte4Uint32(value uint32) error {
)
}
// WriteBytes writes a slice of bytes to the underlying writer.
func (w *StreamWriter) WriteBytes(bs []byte) error {
return w.buf.Write(w.w, bs...)
}

View File

@@ -1,23 +1,42 @@
package common
import "reflect"
import (
"reflect"
"strings"
)
// Common is used encoding/decoding
type Common struct {
}
// CheckField returns flag whether should encode/decode or not and field name
func (c *Common) CheckField(field reflect.StructField) (bool, string) {
func (c *Common) CheckField(field reflect.StructField) (public, omit bool, name string) {
// A to Z
if c.isPublic(field.Name) {
if tag := field.Tag.Get("msgpack"); tag == "-" {
return false, ""
} else if len(tag) > 0 {
return true, tag
}
return true, field.Name
if !c.isPublic(field.Name) {
return false, false, ""
}
return false, ""
tag := field.Tag.Get("msgpack")
if tag == "" {
return true, false, field.Name
}
parts := strings.Split(tag, ",")
// check ignore
if parts[0] == "-" {
return false, false, ""
}
// check omitempty
for _, part := range parts[1:] {
if part == "omitempty" {
omit = true
}
}
// check name
name = field.Name
if parts[0] != "" {
name = parts[0]
}
return true, omit, name
}
func (c *Common) isPublic(name string) bool {

View File

@@ -71,7 +71,7 @@ func (d *decoder) setStructFromArray(rv reflect.Value, offset int, k reflect.Kin
if !findCache {
scta = &structCacheTypeArray{}
for i := 0; i < rv.NumField(); i++ {
if ok, _ := d.CheckField(rv.Type().Field(i)); ok {
if ok, _, _ := d.CheckField(rv.Type().Field(i)); ok {
scta.m = append(scta.m, i)
}
}
@@ -112,7 +112,7 @@ func (d *decoder) setStructFromMap(rv reflect.Value, offset int, k reflect.Kind)
if !cacheFind {
sctm = &structCacheTypeMap{}
for i := 0; i < rv.NumField(); i++ {
if ok, name := d.CheckField(rv.Type().Field(i)); ok {
if ok, _, name := d.CheckField(rv.Type().Field(i)); ok {
sctm.keys = append(sctm.keys, []byte(name))
sctm.indexes = append(sctm.indexes, i)
}

Some files were not shown because too many files have changed in this diff Show More