mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-31 18:09:03 -05:00
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
116 lines
2.9 KiB
Go
116 lines
2.9 KiB
Go
// Copyright (C) 2016 The Syncthing Authors.
|
|
//
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
|
|
package protocol
|
|
|
|
import (
|
|
"fmt"
|
|
"sync"
|
|
"sync/atomic"
|
|
)
|
|
|
|
// Global pool to get buffers from. Initialized in init().
|
|
var BufferPool *bufferPool
|
|
|
|
type bufferPool struct {
|
|
puts atomic.Int64
|
|
skips atomic.Int64
|
|
misses atomic.Int64
|
|
pools []sync.Pool
|
|
hits []atomic.Int64
|
|
}
|
|
|
|
func newBufferPool() *bufferPool {
|
|
return &bufferPool{
|
|
pools: make([]sync.Pool, len(BlockSizes)),
|
|
hits: make([]atomic.Int64, len(BlockSizes)),
|
|
}
|
|
}
|
|
|
|
func (p *bufferPool) Get(size int) []byte {
|
|
// Too big, isn't pooled
|
|
if size > MaxBlockSize {
|
|
p.skips.Add(1)
|
|
return make([]byte, size)
|
|
}
|
|
|
|
// Try the fitting and all bigger pools
|
|
bkt := getBucketForLen(size)
|
|
for j := bkt; j < len(BlockSizes); j++ {
|
|
if intf := p.pools[j].Get(); intf != nil {
|
|
p.hits[j].Add(1)
|
|
bs := *intf.(*[]byte)
|
|
return bs[:size]
|
|
}
|
|
}
|
|
|
|
p.misses.Add(1)
|
|
|
|
// All pools are empty, must allocate. For very small slices where we
|
|
// didn't have a block to reuse, just allocate a small slice instead of
|
|
// a large one. We won't be able to reuse it, but avoid some overhead.
|
|
if size < MinBlockSize/64 {
|
|
return make([]byte, size)
|
|
}
|
|
return make([]byte, BlockSizes[bkt])[:size]
|
|
}
|
|
|
|
// Put makes the given byte slice available again in the global pool.
|
|
// You must only Put() slices that were returned by Get() or Upgrade().
|
|
func (p *bufferPool) Put(bs []byte) {
|
|
// Don't buffer slices outside of our pool range
|
|
if cap(bs) > MaxBlockSize || cap(bs) < MinBlockSize {
|
|
p.skips.Add(1)
|
|
return
|
|
}
|
|
|
|
p.puts.Add(1)
|
|
bkt := putBucketForCap(cap(bs))
|
|
p.pools[bkt].Put(&bs)
|
|
}
|
|
|
|
// Upgrade grows the buffer to the requested size, while attempting to reuse
|
|
// it if possible.
|
|
func (p *bufferPool) Upgrade(bs []byte, size int) []byte {
|
|
if cap(bs) >= size {
|
|
// Reslicing is enough, lets go!
|
|
return bs[:size]
|
|
}
|
|
|
|
// It was too small. But it pack into the pool and try to get another
|
|
// buffer.
|
|
p.Put(bs)
|
|
return p.Get(size)
|
|
}
|
|
|
|
// getBucketForLen returns the bucket where we should get a slice of a
|
|
// certain length. Each bucket is guaranteed to hold slices that are
|
|
// precisely the block size for that bucket, so if the block size is larger
|
|
// than our size we are good.
|
|
func getBucketForLen(len int) int {
|
|
for i, blockSize := range BlockSizes {
|
|
if len <= blockSize {
|
|
return i
|
|
}
|
|
}
|
|
|
|
panic(fmt.Sprintf("bug: tried to get impossible block len %d", len))
|
|
}
|
|
|
|
// putBucketForCap returns the bucket where we should put a slice of a
|
|
// certain capacity. Each bucket is guaranteed to hold slices that are
|
|
// precisely the block size for that bucket, so we just find the matching
|
|
// one.
|
|
func putBucketForCap(cap int) int {
|
|
for i, blockSize := range BlockSizes {
|
|
if cap == blockSize {
|
|
return i
|
|
}
|
|
}
|
|
|
|
panic(fmt.Sprintf("bug: tried to put impossible block cap %d", cap))
|
|
}
|