mirror of
https://github.com/kopia/kopia.git
synced 2025-12-23 22:57:50 -05:00
chore(general): minor cleanups and other nits (#4507)
* use uint8 for clarity * unexport writeContentAsyncAndVerify * fix typo in test function name * remove commented interface functions * use atomic.Int32 * cleanups in socket server activation test * leverage stdlib's maps and slices packages replace uses of `golang.org/x/exp/maps` * nit: leverage `maps.Values`
This commit is contained in:
@@ -2,10 +2,11 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/kopia/kopia/notification"
|
||||
"github.com/kopia/kopia/notification/notifyprofile"
|
||||
@@ -23,7 +24,7 @@ type commonNotificationOptions struct {
|
||||
func (c *commonNotificationOptions) setup(svc appServices, cmd *kingpin.CmdClause) {
|
||||
c.notificationProfileFlag.setup(svc, cmd)
|
||||
cmd.Flag("send-test-notification", "Test the notification").BoolVar(&c.sendTestNotification)
|
||||
cmd.Flag("min-severity", "Minimum severity").EnumVar(&c.minSeverity, maps.Keys(notification.SeverityToNumber)...)
|
||||
cmd.Flag("min-severity", "Minimum severity").EnumVar(&c.minSeverity, mapKeys(notification.SeverityToNumber)...)
|
||||
}
|
||||
|
||||
// configureNotificationAction is a helper function that creates a Kingpin action that
|
||||
@@ -101,3 +102,7 @@ func configureNotificationAction[T any](
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func mapKeys[Map ~map[K]V, K comparable, V any](m Map) []K {
|
||||
return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
|
||||
}
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
|
||||
2
internal/cache/content_cache_test.go
vendored
2
internal/cache/content_cache_test.go
vendored
@@ -3,6 +3,7 @@
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -11,7 +12,6 @@
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobtesting"
|
||||
"github.com/kopia/kopia/internal/cache"
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,12 +29,12 @@ func getPrometheusCounter(opts prometheus.CounterOpts, labels map[string]string)
|
||||
|
||||
prom := promCounters[opts.Name]
|
||||
if prom == nil {
|
||||
prom = promauto.NewCounterVec(opts, maps.Keys(labels))
|
||||
prom = promauto.NewCounterVec(opts, mapKeys(labels))
|
||||
|
||||
promCounters[opts.Name] = prom
|
||||
}
|
||||
|
||||
return prom.WithLabelValues(maps.Values(labels)...)
|
||||
return prom.WithLabelValues(mapValues(labels)...)
|
||||
}
|
||||
|
||||
func getPrometheusHistogram(opts prometheus.HistogramOpts, labels map[string]string) prometheus.Observer { //nolint:gocritic
|
||||
@@ -42,10 +43,18 @@ func getPrometheusHistogram(opts prometheus.HistogramOpts, labels map[string]str
|
||||
|
||||
prom := promHistograms[opts.Name]
|
||||
if prom == nil {
|
||||
prom = promauto.NewHistogramVec(opts, maps.Keys(labels))
|
||||
prom = promauto.NewHistogramVec(opts, mapKeys(labels))
|
||||
|
||||
promHistograms[opts.Name] = prom
|
||||
}
|
||||
|
||||
return prom.WithLabelValues(maps.Values(labels)...)
|
||||
return prom.WithLabelValues(mapValues(labels)...)
|
||||
}
|
||||
|
||||
func mapKeys[Map ~map[K]V, K comparable, V any](m Map) []K {
|
||||
return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
|
||||
}
|
||||
|
||||
func mapValues[Map ~map[K]V, K comparable, V any](m Map) []V {
|
||||
return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m))
|
||||
}
|
||||
|
||||
@@ -5,12 +5,13 @@
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
@@ -163,7 +164,7 @@ func TestDebug_parseProfileConfigs(t *testing.T) {
|
||||
require.ErrorIs(t, tc.expectError, err)
|
||||
require.Len(t, pbs, tc.n)
|
||||
pb, ok := pbs[tc.key] // no negative testing for missing keys (see newProfileConfigs)
|
||||
require.Equalf(t, !tc.expectMissing, ok, "key %q for set %q expect missing %t", tc.key, maps.Keys(pbs), tc.expectMissing)
|
||||
require.Equalf(t, !tc.expectMissing, ok, "key %q for set %q expect missing %t", tc.key, mapKeys(pbs), tc.expectMissing)
|
||||
if tc.expectMissing {
|
||||
return
|
||||
}
|
||||
@@ -174,6 +175,10 @@ func TestDebug_parseProfileConfigs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func mapKeys[Map ~map[K]V, K comparable, V any](m Map) []K {
|
||||
return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
|
||||
}
|
||||
|
||||
func TestDebug_newProfileConfigs(t *testing.T) {
|
||||
saveLockEnv(t)
|
||||
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"regexp"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@@ -61,22 +63,16 @@ func LoadProfileMap(ctx context.Context, rep repo.Repository, old map[string]*Pr
|
||||
|
||||
// ListUserProfiles gets the list of all user profiles in the system.
|
||||
func ListUserProfiles(ctx context.Context, rep repo.Repository) ([]*Profile, error) {
|
||||
var result []*Profile
|
||||
|
||||
users, err := LoadProfileMap(ctx, rep, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range users {
|
||||
result = append(result, v)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i].Username < result[j].Username
|
||||
profs := slices.SortedFunc(maps.Values(users), func(p1, p2 *Profile) int {
|
||||
return strings.Compare(p1.Username, p2.Username)
|
||||
})
|
||||
|
||||
return result, nil
|
||||
return profs, nil
|
||||
}
|
||||
|
||||
// GetUserProfile returns the user profile with a given username.
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
@@ -106,7 +107,7 @@ func ListTemplates(ctx context.Context, rep repo.Repository, prefix string) ([]I
|
||||
}
|
||||
}
|
||||
|
||||
return maps.Values(infos), nil
|
||||
return slices.AppendSeq(make([]Info, 0, len(infos)), maps.Values(infos)), nil
|
||||
}
|
||||
|
||||
// SetTemplate saves a template in the repository.
|
||||
|
||||
@@ -6,11 +6,12 @@
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// Severity represents the severity of a notification message.
|
||||
@@ -80,7 +81,7 @@ func (m Message) ToString() string {
|
||||
|
||||
fmt.Fprintf(&buf, "Subject: %v\n", m.Subject)
|
||||
|
||||
headers := maps.Keys(m.Headers)
|
||||
headers := slices.AppendSeq(make([]string, 0, len(m.Headers)), maps.Keys(m.Headers))
|
||||
|
||||
sort.Strings(headers)
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ type ID struct {
|
||||
|
||||
// those 2 could be packed into one byte, but that seems like overkill
|
||||
prefix byte
|
||||
idLen byte
|
||||
idLen uint8
|
||||
}
|
||||
|
||||
// MarshalJSON implements JSON serialization.
|
||||
|
||||
@@ -732,7 +732,7 @@ func (r *grpcRepositoryClient) doWriteAsync(ctx context.Context, contentID conte
|
||||
r.opt.OnUpload(int64(len(data)))
|
||||
|
||||
if _, err := inSessionWithoutRetry(ctx, r, func(ctx context.Context, sess *grpcInnerSession) (content.ID, error) {
|
||||
sess.WriteContentAsyncAndVerify(ctx, contentID, data, prefix, comp, r.asyncWritesWG)
|
||||
sess.writeContentAsyncAndVerify(ctx, contentID, data, prefix, comp, r.asyncWritesWG)
|
||||
return contentID, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -777,7 +777,7 @@ func (r *grpcRepositoryClient) WriteContent(ctx context.Context, data gather.Byt
|
||||
return contentID, nil
|
||||
}
|
||||
|
||||
func (r *grpcInnerSession) WriteContentAsyncAndVerify(ctx context.Context, contentID content.ID, data []byte, prefix content.IDPrefix, comp compression.HeaderID, eg *errgroup.Group) {
|
||||
func (r *grpcInnerSession) writeContentAsyncAndVerify(ctx context.Context, contentID content.ID, data []byte, prefix content.IDPrefix, comp compression.HeaderID, eg *errgroup.Group) {
|
||||
ch := r.sendRequest(ctx, &apipb.SessionRequest{
|
||||
Request: &apipb.SessionRequest_WriteContent{
|
||||
WriteContent: &apipb.WriteContentRequest{
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -365,7 +366,7 @@ func openWithConfig(ctx context.Context, st blob.Storage, cliOpts ClientOptions,
|
||||
timeNow: cmOpts.TimeNow,
|
||||
cliOpts: cliOpts,
|
||||
configFile: configFile,
|
||||
nextWriterID: new(int32),
|
||||
nextWriterID: &atomic.Int32{},
|
||||
throttler: throttler,
|
||||
metricsRegistry: mr,
|
||||
refCountedCloser: closer,
|
||||
|
||||
@@ -95,12 +95,6 @@ type DirectRepositoryWriter interface {
|
||||
DirectRepository
|
||||
BlobStorage() blob.Storage
|
||||
ContentManager() *content.WriteManager
|
||||
// SetParameters(ctx context.Context, m format.MutableParameters, blobcfg format.BlobStorageConfiguration, requiredFeatures []feature.Required) error
|
||||
// ChangePassword(ctx context.Context, newPassword string) error
|
||||
// GetUpgradeLockIntent(ctx context.Context) (*format.UpgradeLockIntent, error)
|
||||
// SetUpgradeLockIntent(ctx context.Context, l format.UpgradeLockIntent) (*format.UpgradeLockIntent, error)
|
||||
// CommitUpgrade(ctx context.Context) error
|
||||
// RollbackUpgrade(ctx context.Context) error
|
||||
}
|
||||
|
||||
type immutableDirectRepositoryParameters struct {
|
||||
@@ -109,7 +103,7 @@ type immutableDirectRepositoryParameters struct {
|
||||
cliOpts ClientOptions
|
||||
timeNow func() time.Time
|
||||
fmgr *format.Manager
|
||||
nextWriterID *int32
|
||||
nextWriterID *atomic.Int32
|
||||
throttler throttling.SettableThrottler
|
||||
metricsRegistry *metrics.Registry
|
||||
beforeFlush []RepositoryWriterCallback
|
||||
@@ -282,7 +276,7 @@ func (r *directRepository) NewWriter(ctx context.Context, opt WriteSessionOption
|
||||
|
||||
// NewDirectWriter returns new DirectRepositoryWriter session for repository.
|
||||
func (r *directRepository) NewDirectWriter(ctx context.Context, opt WriteSessionOptions) (context.Context, DirectRepositoryWriter, error) {
|
||||
writeManagerID := fmt.Sprintf("writer-%v:%v", atomic.AddInt32(r.nextWriterID, 1), opt.Purpose)
|
||||
writeManagerID := fmt.Sprintf("writer-%v:%v", r.nextWriterID.Add(1), opt.Purpose)
|
||||
|
||||
cmgr := content.NewWriteManager(ctx, r.sm, content.SessionOptions{
|
||||
SessionUser: r.cliOpts.Username,
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestOneLargeFile(t *testing.T) {
|
||||
th.RunN(ctx, t, numClients, f)
|
||||
}
|
||||
|
||||
func TestManySmallFilesAcrossDirecoryTree(t *testing.T) {
|
||||
func TestManySmallFilesAcrossDirectoryTree(t *testing.T) {
|
||||
// TODO: Test takes too long - need to address performance issues with fio writes
|
||||
const (
|
||||
fileSize = 4096
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestOneLargeFile(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestManySmallFilesAcrossDirecoryTree(t *testing.T) {
|
||||
func TestManySmallFilesAcrossDirectoryTree(t *testing.T) {
|
||||
// TODO: Test takes too long - need to address performance issues with fio writes
|
||||
const (
|
||||
fileSize = 4096
|
||||
|
||||
@@ -37,9 +37,7 @@ func TestServerControlSocketActivated(t *testing.T) {
|
||||
env.Environment["LISTEN_FDS"] = "1"
|
||||
|
||||
l1, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open Listener")
|
||||
}
|
||||
require.NoError(t, err, "Failed to open Listener")
|
||||
|
||||
defer func() {
|
||||
l1.Close()
|
||||
@@ -57,7 +55,7 @@ func TestServerControlSocketActivated(t *testing.T) {
|
||||
go func() {
|
||||
l1File, err := l1.(*net.TCPListener).File()
|
||||
if err != nil {
|
||||
t.Logf("ERROR: Failed to get filehandle for socket")
|
||||
t.Log("ERROR: Failed to get filehandle for socket")
|
||||
close(serverStarted)
|
||||
|
||||
return
|
||||
@@ -77,14 +75,11 @@ func TestServerControlSocketActivated(t *testing.T) {
|
||||
|
||||
select {
|
||||
case <-serverStarted:
|
||||
if sp.BaseURL == "" {
|
||||
t.Fatalf("Failed to start server")
|
||||
}
|
||||
|
||||
require.NotEmpty(t, sp.BaseURL, "Failed to start server")
|
||||
t.Logf("server started on %v", sp.BaseURL)
|
||||
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("server did not start in time")
|
||||
t.Fatal("server did not start in time")
|
||||
}
|
||||
|
||||
require.Contains(t, sp.BaseURL, ":"+strconv.Itoa(port))
|
||||
@@ -97,10 +92,10 @@ func TestServerControlSocketActivated(t *testing.T) {
|
||||
|
||||
select {
|
||||
case <-serverStopped:
|
||||
t.Logf("server shut down")
|
||||
t.Log("server shut down")
|
||||
|
||||
case <-time.After(15 * time.Second):
|
||||
t.Fatalf("server did not shutdown in time")
|
||||
t.Fatal("server did not shutdown in time")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,9 +115,7 @@ func TestServerControlSocketActivatedTooManyFDs(t *testing.T) {
|
||||
env.Environment["LISTEN_FDS"] = "2"
|
||||
|
||||
l1, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open Listener")
|
||||
}
|
||||
require.NoError(t, err, "Failed to open Listener")
|
||||
|
||||
defer func() {
|
||||
l1.Close()
|
||||
@@ -137,7 +130,7 @@ func TestServerControlSocketActivatedTooManyFDs(t *testing.T) {
|
||||
go func() {
|
||||
l1File, err := l1.(*net.TCPListener).File()
|
||||
if err != nil {
|
||||
t.Logf("Failed to get filehandle for socket")
|
||||
t.Log("Failed to get filehandle for socket")
|
||||
close(serverStarted)
|
||||
|
||||
return
|
||||
@@ -145,7 +138,7 @@ func TestServerControlSocketActivatedTooManyFDs(t *testing.T) {
|
||||
|
||||
l2File, err := l1.(*net.TCPListener).File()
|
||||
if err != nil {
|
||||
t.Logf("Failed to get 2nd filehandle for socket")
|
||||
t.Log("Failed to get 2nd filehandle for socket")
|
||||
close(serverStarted)
|
||||
|
||||
return
|
||||
@@ -164,9 +157,9 @@ func TestServerControlSocketActivatedTooManyFDs(t *testing.T) {
|
||||
select {
|
||||
case stderr := <-serverStarted:
|
||||
require.Contains(t, strings.Join(stderr, ""), "Too many activated sockets found. Expected 1, got 2")
|
||||
t.Logf("Done")
|
||||
t.Log("Done")
|
||||
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("server did not exit in time")
|
||||
t.Fatal("server did not exit in time")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user