Files
kopia/internal/sparsefile/sparsefile_test.go
Ali Dowair eddde91f2d chore(snapshots): unify sparse and normal FS output paths (#1981)
* Unify sparse and normal IO output

This commit refactors the code paths that excercise normal and sparse
writing of restored content. The goal is to expose sparsefile.Copy()
and iocopy.Copy() to be interchangeable, thereby allowing us to wrap
or transform their behavior more easily in the future.

* Introduce getStreamCopier()

* Pull ioCopy() into getStreamCopier()

* Fix small nit in E2E test

We should be getting the block size of the destination file, not
the source file.

* Call stat.GetBlockSize() once per FilesystemOutput

A tiny refactor to pull this call out of the generated stream copier,
as the block size should not change from one file to the next within
a restore entry.

NOTE: as a side effect, if block size could not be found (an error
is returned), we will return the default stream copier instead of
letting the sparse copier fail. A warning will be logged, but this
error will not cause the restore to fail; it will proceed silently.
2022-06-14 18:09:45 +00:00

113 lines
1.7 KiB
Go

package sparsefile
import (
"bytes"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/kopia/kopia/internal/stat"
)
func TestSparseCopy(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
t.Skip("sparse files are not supported on windows")
}
dir := t.TempDir()
blk, err := stat.GetBlockSize(dir)
if err != nil {
t.Fatal(err)
}
type chunk struct {
slice []byte
off uint64
rep uint64
}
cases := []struct {
name string
size uint64
data []chunk
}{
{
name: "null",
size: 0,
},
{
name: "empty",
size: blk,
data: []chunk{
{slice: []byte{0}, off: 0, rep: blk},
},
},
{
name: "hole",
size: 2 * blk,
data: []chunk{
{slice: []byte{1}, off: blk, rep: blk},
},
},
{
name: "mix",
size: 2 * blk,
data: []chunk{
{slice: []byte{1}, off: 3, rep: blk - 10},
{slice: []byte{1}, off: 2*blk - 10, rep: 10},
},
},
}
for _, c := range cases {
src := filepath.Join(dir, "src"+c.name)
dst := filepath.Join(dir, "dst"+c.name)
sf, err := os.Create(src)
if err != nil {
t.Fatal(err)
}
for _, d := range c.data {
sf.WriteAt(bytes.Repeat(d.slice, int(d.rep)), int64(d.off))
}
df, err := os.Create(dst)
if err != nil {
t.Fatal(err)
}
if err = df.Truncate(int64(c.size)); err != nil {
t.Fatal(err)
}
blk, err := stat.GetBlockSize(dst)
if err != nil {
t.Fatal(err)
}
_, err = Copy(df, sf, blk)
if err != nil {
t.Fatalf("error writing %s: %v", dst, err)
}
s, err := os.ReadFile(src)
if err != nil {
t.Fatal(err)
}
d, err := os.ReadFile(dst)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(s, d) {
t.Fatalf("contents of %s and %s are not identical", src, dst)
}
}
}