From e92d3cccd0fdeaa9b277ac8248d2edb6c729ae3a Mon Sep 17 00:00:00 2001 From: Gani Georgiev Date: Fri, 18 Oct 2024 14:23:38 +0300 Subject: [PATCH] updated jsvm types --- go.mod | 8 +- go.sum | 28 +- .../jsvm/internal/types/generated/types.d.ts | 29694 ++++++++++++++-- 3 files changed, 27434 insertions(+), 2296 deletions(-) diff --git a/go.mod b/go.mod index ef41abd3..b22ea8de 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/pocketbase/pocketbase -go 1.22 +go 1.22.0 require ( github.com/AlecAivazis/survey/v2 v2.3.7 @@ -24,7 +24,7 @@ require ( github.com/labstack/echo/v5 v5.0.0-20230722203903-ec5b858dab61 github.com/mattn/go-sqlite3 v1.14.22 github.com/pocketbase/dbx v1.10.1 - github.com/pocketbase/tygoja v0.0.0-20240113091827-17918475d342 + github.com/pocketbase/tygoja v0.0.0-20241015175937-d6ff411a0f75 github.com/spf13/cast v1.7.0 github.com/spf13/cobra v1.8.1 gocloud.dev v0.39.0 @@ -70,12 +70,12 @@ require ( github.com/valyala/fasttemplate v1.2.2 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/image v0.19.0 // indirect - golang.org/x/mod v0.19.0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.23.0 // indirect + golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect google.golang.org/api v0.194.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect diff --git a/go.sum b/go.sum index d12eb238..965ec0ec 100644 --- a/go.sum +++ b/go.sum @@ -78,8 +78,6 @@ github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yA github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8= github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c= -github.com/dop251/goja v0.0.0-20240822155948-fa6d1ed5e4b6 h1:0x8Sh2rKCTVUQnRTJFIwtRWAp91VMsnATQEsMAg14kM= -github.com/dop251/goja v0.0.0-20240822155948-fa6d1ed5e4b6/go.mod h1:MxLav0peU43GgvwVgNbLAj1s/bSGboKkhuULvq/7hx4= github.com/dop251/goja v0.0.0-20241009100908-5f46f2705ca3 h1:MXsAuToxwsTn5BEEYm2DheqIiC4jWGmkEJ1uy+KFhvQ= github.com/dop251/goja v0.0.0-20241009100908-5f46f2705ca3/go.mod h1:MxLav0peU43GgvwVgNbLAj1s/bSGboKkhuULvq/7hx4= github.com/dop251/goja_nodejs v0.0.0-20240728170619-29b559befffc h1:MKYt39yZJi0Z9xEeRmDX2L4ocE0ETKcHKw6MVL3R+co= @@ -189,8 +187,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pocketbase/dbx v1.10.1 h1:cw+vsyfCJD8YObOVeqb93YErnlxwYMkNZ4rwN0G0AaA= github.com/pocketbase/dbx v1.10.1/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs= -github.com/pocketbase/tygoja v0.0.0-20240113091827-17918475d342 h1:OcAwewen3hs/zY8i0syt8CcMTGBJhQwQRVDLcoQVXVk= -github.com/pocketbase/tygoja v0.0.0-20240113091827-17918475d342/go.mod h1:dOJ+pCyqm/jRn5kO/TX598J0e5xGDcJAZerK5atCrKI= +github.com/pocketbase/tygoja v0.0.0-20241015175937-d6ff411a0f75 h1:XSbmekxgmbI2uPrre/nkCz7y8VsV652TPb3hAYzPb74= +github.com/pocketbase/tygoja v0.0.0-20241015175937-d6ff411a0f75/go.mod h1:hKJWPGFqavk3cdTa47Qvs8g37lnfI57OYdVVbIqW5aE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -235,8 +233,7 @@ gocloud.dev v0.39.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -246,8 +243,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -258,8 +255,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -282,21 +277,18 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= @@ -307,8 +299,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= diff --git a/plugins/jsvm/internal/types/generated/types.d.ts b/plugins/jsvm/internal/types/generated/types.d.ts index dac56c61..9c1ad8cb 100644 --- a/plugins/jsvm/internal/types/generated/types.d.ts +++ b/plugins/jsvm/internal/types/generated/types.d.ts @@ -1,4 +1,4 @@ -// 1710682789 +// 1729250457 // GENERATED CODE - DO NOT MODIFY BY HAND // ------------------------------------------------------------------- @@ -1148,9 +1148,9 @@ type _TygojaAny = any * functionality. The design is Unix-like, although the error handling is * Go-like; failing calls return values of type error rather than error numbers. * Often, more information is available within the error. For example, - * if a call that takes a file name fails, such as Open or Stat, the error + * if a call that takes a file name fails, such as [Open] or [Stat], the error * will include the failing file name when printed and will be of type - * *PathError, which may be unpacked for more information. + * [*PathError], which may be unpacked for more information. * * The os interface is intended to be uniform across all operating systems. * Features not generally available appear in the system-specific package syscall. @@ -1182,22 +1182,26 @@ type _TygojaAny = any * fmt.Printf("read %d bytes: %q\n", count, data[:count]) * ``` * - * Note: The maximum number of concurrent operations on a File may be limited by - * the OS or the system. The number should be high, but exceeding it may degrade - * performance or cause other issues. + * # Concurrency + * + * The methods of [File] correspond to file system operations. All are + * safe for concurrent use. The maximum number of concurrent + * operations on a File may be limited by the OS or the system. The + * number should be high, but exceeding it may degrade performance or + * cause other issues. */ namespace os { interface readdirMode extends Number{} interface File { /** * Readdir reads the contents of the directory associated with file and - * returns a slice of up to n FileInfo values, as would be returned - * by Lstat, in directory order. Subsequent calls on the same file will yield + * returns a slice of up to n [FileInfo] values, as would be returned + * by [Lstat], in directory order. Subsequent calls on the same file will yield * further FileInfos. * * If n > 0, Readdir returns at most n FileInfo structures. In this case, if * Readdir returns an empty slice, it will return a non-nil error - * explaining why. At the end of a directory, the error is io.EOF. + * explaining why. At the end of a directory, the error is [io.EOF]. * * If n <= 0, Readdir returns all the FileInfo from the directory in * a single slice. In this case, if Readdir succeeds (reads all @@ -1219,7 +1223,7 @@ namespace os { * * If n > 0, Readdirnames returns at most n names. In this case, if * Readdirnames returns an empty slice, it will return a non-nil error - * explaining why. At the end of a directory, the error is io.EOF. + * explaining why. At the end of a directory, the error is [io.EOF]. * * If n <= 0, Readdirnames returns all the names from the directory in * a single slice. In this case, if Readdirnames succeeds (reads all @@ -1232,18 +1236,18 @@ namespace os { } /** * A DirEntry is an entry read from a directory - * (using the ReadDir function or a File's ReadDir method). + * (using the [ReadDir] function or a [File.ReadDir] method). */ interface DirEntry extends fs.DirEntry{} interface File { /** * ReadDir reads the contents of the directory associated with the file f - * and returns a slice of DirEntry values in directory order. + * and returns a slice of [DirEntry] values in directory order. * Subsequent calls on the same file will yield later DirEntry records in the directory. * * If n > 0, ReadDir returns at most n DirEntry records. * In this case, if ReadDir returns an empty slice, it will return an error explaining why. - * At the end of a directory, the error is io.EOF. + * At the end of a directory, the error is [io.EOF]. * * If n <= 0, ReadDir returns all the DirEntry records remaining in the directory. * When it succeeds, it returns a nil error (not io.EOF). @@ -1260,6 +1264,28 @@ namespace os { */ (name: string): Array } + interface copyFS { + /** + * CopyFS copies the file system fsys into the directory dir, + * creating dir if necessary. + * + * Files are created with mode 0o666 plus any execute permissions + * from the source, and directories are created with mode 0o777 + * (before umask). + * + * CopyFS will not overwrite existing files. If a file name in fsys + * already exists in the destination, CopyFS will return an error + * such that errors.Is(err, fs.ErrExist) will be true. + * + * Symbolic links in fsys are not supported. A *PathError with Err set + * to ErrInvalid is returned when copying from a symbolic link. + * + * Symbolic links in dir are followed. + * + * Copying stops at and returns the first error encountered. + */ + (dir: string, fsys: fs.FS): void + } /** * Auxiliary information if the File describes a directory */ @@ -1268,7 +1294,7 @@ namespace os { interface expand { /** * Expand replaces ${var} or $var in the string based on the mapping function. - * For example, os.ExpandEnv(s) is equivalent to os.Expand(s, os.Getenv). + * For example, [os.ExpandEnv](s) is equivalent to [os.Expand](s, [os.Getenv]). */ (s: string, mapping: (_arg0: string) => string): string } @@ -1284,7 +1310,7 @@ namespace os { /** * Getenv retrieves the value of the environment variable named by the key. * It returns the value, which will be empty if the variable is not present. - * To distinguish between an empty value and an unset value, use LookupEnv. + * To distinguish between an empty value and an unset value, use [LookupEnv]. */ (key: string): string } @@ -1353,7 +1379,7 @@ namespace os { } interface newSyscallError { /** - * NewSyscallError returns, as an error, a new SyscallError + * NewSyscallError returns, as an error, a new [SyscallError] * with the given system call name and error details. * As a convenience, if err is nil, NewSyscallError returns nil. */ @@ -1361,53 +1387,55 @@ namespace os { } interface isExist { /** - * IsExist returns a boolean indicating whether the error is known to report - * that a file or directory already exists. It is satisfied by ErrExist as + * IsExist returns a boolean indicating whether its argument is known to report + * that a file or directory already exists. It is satisfied by [ErrExist] as * well as some syscall errors. * - * This function predates errors.Is. It only supports errors returned by + * This function predates [errors.Is]. It only supports errors returned by * the os package. New code should use errors.Is(err, fs.ErrExist). */ (err: Error): boolean } interface isNotExist { /** - * IsNotExist returns a boolean indicating whether the error is known to + * IsNotExist returns a boolean indicating whether its argument is known to * report that a file or directory does not exist. It is satisfied by - * ErrNotExist as well as some syscall errors. + * [ErrNotExist] as well as some syscall errors. * - * This function predates errors.Is. It only supports errors returned by + * This function predates [errors.Is]. It only supports errors returned by * the os package. New code should use errors.Is(err, fs.ErrNotExist). */ (err: Error): boolean } interface isPermission { /** - * IsPermission returns a boolean indicating whether the error is known to - * report that permission is denied. It is satisfied by ErrPermission as well + * IsPermission returns a boolean indicating whether its argument is known to + * report that permission is denied. It is satisfied by [ErrPermission] as well * as some syscall errors. * - * This function predates errors.Is. It only supports errors returned by + * This function predates [errors.Is]. It only supports errors returned by * the os package. New code should use errors.Is(err, fs.ErrPermission). */ (err: Error): boolean } interface isTimeout { /** - * IsTimeout returns a boolean indicating whether the error is known + * IsTimeout returns a boolean indicating whether its argument is known * to report that a timeout occurred. * - * This function predates errors.Is, and the notion of whether an + * This function predates [errors.Is], and the notion of whether an * error indicates a timeout can be ambiguous. For example, the Unix * error EWOULDBLOCK sometimes indicates a timeout and sometimes does not. * New code should use errors.Is with a value appropriate to the call - * returning the error, such as os.ErrDeadlineExceeded. + * returning the error, such as [os.ErrDeadlineExceeded]. */ (err: Error): boolean } interface syscallErrorType extends syscall.Errno{} + interface processMode extends Number{} + interface processStatus extends Number{} /** - * Process stores the information about a process created by StartProcess. + * Process stores the information about a process created by [StartProcess]. */ interface Process { pid: number @@ -1473,7 +1501,7 @@ namespace os { /** * FindProcess looks for a running process by its pid. * - * The Process it returns can be used to obtain information + * The [Process] it returns can be used to obtain information * about the underlying operating system process. * * On Unix systems, FindProcess always succeeds and returns a Process @@ -1486,32 +1514,32 @@ namespace os { interface startProcess { /** * StartProcess starts a new process with the program, arguments and attributes - * specified by name, argv and attr. The argv slice will become os.Args in the + * specified by name, argv and attr. The argv slice will become [os.Args] in the * new process, so it normally starts with the program name. * * If the calling goroutine has locked the operating system thread - * with runtime.LockOSThread and modified any inheritable OS-level + * with [runtime.LockOSThread] and modified any inheritable OS-level * thread state (for example, Linux or Plan 9 name spaces), the new * process will inherit the caller's thread state. * - * StartProcess is a low-level interface. The os/exec package provides + * StartProcess is a low-level interface. The [os/exec] package provides * higher-level interfaces. * - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. */ (name: string, argv: Array, attr: ProcAttr): (Process) } interface Process { /** - * Release releases any resources associated with the Process p, + * Release releases any resources associated with the [Process] p, * rendering it unusable in the future. - * Release only needs to be called if Wait is not. + * Release only needs to be called if [Process.Wait] is not. */ release(): void } interface Process { /** - * Kill causes the Process to exit immediately. Kill does not wait until + * Kill causes the [Process] to exit immediately. Kill does not wait until * the Process has actually exited. This only kills the Process itself, * not any other processes it may have started. */ @@ -1519,7 +1547,7 @@ namespace os { } interface Process { /** - * Wait waits for the Process to exit, and then returns a + * Wait waits for the [Process] to exit, and then returns a * ProcessState describing its status and an error, if any. * Wait releases any resources associated with the Process. * On most operating systems, the Process must be a child @@ -1529,8 +1557,8 @@ namespace os { } interface Process { /** - * Signal sends a signal to the Process. - * Sending Interrupt on Windows is not implemented. + * Signal sends a signal to the [Process]. + * Sending [Interrupt] on Windows is not implemented. */ signal(sig: Signal): void } @@ -1565,7 +1593,7 @@ namespace os { /** * Sys returns system-dependent exit information about * the process. Convert it to the appropriate underlying - * type, such as syscall.WaitStatus on Unix, to access its contents. + * type, such as [syscall.WaitStatus] on Unix, to access its contents. */ sys(): any } @@ -1573,7 +1601,7 @@ namespace os { /** * SysUsage returns system-dependent resource usage information about * the exited process. Convert it to the appropriate underlying - * type, such as *syscall.Rusage on Unix, to access its contents. + * type, such as [*syscall.Rusage] on Unix, to access its contents. * (On Unix, *syscall.Rusage matches struct rusage as defined in the * getrusage(2) manual page.) */ @@ -1607,7 +1635,7 @@ namespace os { * pointing to the correct executable. If a symlink was used to start * the process, depending on the operating system, the result might * be the symlink or the path it pointed to. If a stable result is - * needed, path/filepath.EvalSymlinks might help. + * needed, [path/filepath.EvalSymlinks] might help. * * Executable returns an absolute path unless an error occurred. * @@ -1619,6 +1647,8 @@ namespace os { interface File { /** * Name returns the name of the file as presented to Open. + * + * It is safe to call Name after [Close]. */ name(): string } @@ -1679,8 +1709,8 @@ namespace os { * than ReadFrom. This is used to permit ReadFrom to call io.Copy * without leading to a recursive call to ReadFrom. */ - type _subezgYh = noReadFrom&File - interface fileWithoutReadFrom extends _subezgYh { + type _subomlSs = noReadFrom&File + interface fileWithoutReadFrom extends _subomlSs { } interface File { /** @@ -1724,8 +1754,8 @@ namespace os { * than WriteTo. This is used to permit WriteTo to call io.Copy * without leading to a recursive call to WriteTo. */ - type _subJsbVf = noWriteTo&File - interface fileWithoutWriteTo extends _subJsbVf { + type _subTMSnU = noWriteTo&File + interface fileWithoutWriteTo extends _subTMSnU { } interface File { /** @@ -1771,7 +1801,7 @@ namespace os { interface create { /** * Create creates or truncates the named file. If the file already exists, - * it is truncated. If the file does not exist, it is created with mode 0666 + * it is truncated. If the file does not exist, it is created with mode 0o666 * (before umask). If successful, methods on the returned File can * be used for I/O; the associated file descriptor has mode O_RDWR. * If there is an error, it will be of type *PathError. @@ -1884,11 +1914,11 @@ namespace os { * On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and * ModeSticky are used. * - * On Windows, only the 0200 bit (owner writable) of mode is used; it + * On Windows, only the 0o200 bit (owner writable) of mode is used; it * controls whether the file's read-only attribute is set or cleared. * The other bits are currently unused. For compatibility with Go 1.12 - * and earlier, use a non-zero mode. Use mode 0400 for a read-only - * file and 0600 for a readable+writable file. + * and earlier, use a non-zero mode. Use mode 0o400 for a read-only + * file and 0o600 for a readable+writable file. * * On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive, * and ModeTemporary are used. @@ -2022,9 +2052,9 @@ namespace os { } interface File { /** - * Close closes the File, rendering it unusable for I/O. - * On files that support SetDeadline, any pending I/O operations will - * be canceled and return immediately with an ErrClosed error. + * Close closes the [File], rendering it unusable for I/O. + * On files that support [File.SetDeadline], any pending I/O operations will + * be canceled and return immediately with an [ErrClosed] error. * Close will return an error if it has already been called. */ close(): void @@ -2034,9 +2064,9 @@ namespace os { * Chown changes the numeric uid and gid of the named file. * If the file is a symbolic link, it changes the uid and gid of the link's target. * A uid or gid of -1 means to not change that value. - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. * - * On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or + * On Windows or Plan 9, Chown always returns the [syscall.EWINDOWS] or * EPLAN9 error, wrapped in *PathError. */ (name: string, uid: number, gid: number): void @@ -2045,9 +2075,9 @@ namespace os { /** * Lchown changes the numeric uid and gid of the named file. * If the file is a symbolic link, it changes the uid and gid of the link itself. - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. * - * On Windows, it always returns the syscall.EWINDOWS error, wrapped + * On Windows, it always returns the [syscall.EWINDOWS] error, wrapped * in *PathError. */ (name: string, uid: number, gid: number): void @@ -2055,9 +2085,9 @@ namespace os { interface File { /** * Chown changes the numeric uid and gid of the named file. - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. * - * On Windows, it always returns the syscall.EWINDOWS error, wrapped + * On Windows, it always returns the [syscall.EWINDOWS] error, wrapped * in *PathError. */ chown(uid: number, gid: number): void @@ -2066,7 +2096,7 @@ namespace os { /** * Truncate changes the size of the file. * It does not change the I/O offset. - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. */ truncate(size: number): void } @@ -2082,11 +2112,11 @@ namespace os { /** * Chtimes changes the access and modification times of the named * file, similar to the Unix utime() or utimes() functions. - * A zero time.Time value will leave the corresponding file time unchanged. + * A zero [time.Time] value will leave the corresponding file time unchanged. * * The underlying filesystem may truncate or round the values to a * less precise time unit. - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. */ (name: string, atime: time.Time, mtime: time.Time): void } @@ -2094,7 +2124,7 @@ namespace os { /** * Chdir changes the current working directory to the file, * which must be a directory. - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. */ chdir(): void } @@ -2111,11 +2141,11 @@ namespace os { * Fd returns the integer Unix file descriptor referencing the open file. * If f is closed, the file descriptor becomes invalid. * If f is garbage collected, a finalizer may close the file descriptor, - * making it invalid; see runtime.SetFinalizer for more information on when - * a finalizer might be run. On Unix systems this will cause the SetDeadline + * making it invalid; see [runtime.SetFinalizer] for more information on when + * a finalizer might be run. On Unix systems this will cause the [File.SetDeadline] * methods to stop working. * Because file descriptors can be reused, the returned file descriptor may - * only be closed through the Close method of f, or by its finalizer during + * only be closed through the [File.Close] method of f, or by its finalizer during * garbage collection. Otherwise, during garbage collection the finalizer * may close an unrelated file descriptor with the same (reused) number. * @@ -2216,7 +2246,7 @@ namespace os { * It removes everything it can but returns the first error * it encounters. If the path does not exist, RemoveAll * returns nil (no error). - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. */ (path: string): void } @@ -2269,7 +2299,7 @@ namespace os { /** * Getgroups returns a list of the numeric ids of groups that the caller belongs to. * - * On Windows, it returns syscall.EWINDOWS. See the os/user package + * On Windows, it returns [syscall.EWINDOWS]. See the [os/user] package * for a possible alternative. */ (): Array @@ -2300,17 +2330,17 @@ namespace os { } interface stat { /** - * Stat returns a FileInfo describing the named file. - * If there is an error, it will be of type *PathError. + * Stat returns a [FileInfo] describing the named file. + * If there is an error, it will be of type [*PathError]. */ (name: string): FileInfo } interface lstat { /** - * Lstat returns a FileInfo describing the named file. + * Lstat returns a [FileInfo] describing the named file. * If the file is a symbolic link, the returned FileInfo * describes the symbolic link. Lstat makes no attempt to follow the link. - * If there is an error, it will be of type *PathError. + * If there is an error, it will be of type [*PathError]. * * On Windows, if the file is a reparse point that is a surrogate for another * named entity (such as a symbolic link or mounted folder), the returned @@ -2320,8 +2350,8 @@ namespace os { } interface File { /** - * Stat returns the FileInfo structure describing file. - * If there is an error, it will be of type *PathError. + * Stat returns the [FileInfo] structure describing file. + * If there is an error, it will be of type [*PathError]. */ stat(): FileInfo } @@ -2337,7 +2367,8 @@ namespace os { * opens the file for reading and writing, and returns the resulting file. * The filename is generated by taking pattern and adding a random string to the end. * If pattern includes a "*", the random string replaces the last "*". - * If dir is the empty string, CreateTemp uses the default directory for temporary files, as returned by TempDir. + * The file is created with mode 0o600 (before umask). + * If dir is the empty string, CreateTemp uses the default directory for temporary files, as returned by [TempDir]. * Multiple programs or goroutines calling CreateTemp simultaneously will not choose the same file. * The caller can use the file's Name method to find the pathname of the file. * It is the caller's responsibility to remove the file when it is no longer needed. @@ -2350,6 +2381,7 @@ namespace os { * and returns the pathname of the new directory. * The new directory's name is generated by adding a random string to the end of pattern. * If pattern includes a "*", the random string replaces the last "*" instead. + * The directory is created with mode 0o700 (before umask). * If dir is the empty string, MkdirTemp uses the default directory for temporary files, as returned by TempDir. * Multiple programs or goroutines calling MkdirTemp simultaneously will not choose the same directory. * It is the caller's responsibility to remove the directory when it is no longer needed. @@ -2364,12 +2396,14 @@ namespace os { } /** * File represents an open file descriptor. + * + * The methods of File are safe for concurrent use. */ - type _subzNURo = file - interface File extends _subzNURo { + type _subOKBJQ = file + interface File extends _subOKBJQ { } /** - * A FileInfo describes a file and is returned by Stat and Lstat. + * A FileInfo describes a file and is returned by [Stat] and [Lstat]. */ interface FileInfo extends fs.FileInfo{} /** @@ -2377,7 +2411,7 @@ namespace os { * The bits have the same definition on all systems, so that * information about files can be moved from one system * to another portably. Not all bits apply to all systems. - * The only required bit is ModeDir for directories. + * The only required bit is [ModeDir] for directories. */ interface FileMode extends fs.FileMode{} interface fileStat { @@ -2392,7 +2426,7 @@ namespace os { * For example, on Unix this means that the device and inode fields * of the two underlying structures are identical; on other systems * the decision may be based on the path names. - * SameFile only applies to results returned by this package's Stat. + * SameFile only applies to results returned by this package's [Stat]. * It returns false in other cases. */ (fi1: FileInfo, fi2: FileInfo): boolean @@ -2470,14 +2504,6 @@ namespace filepath { */ (pattern: string): Array } - /** - * A lazybuf is a lazily constructed path buffer. - * It supports append, reading previously appended bytes, - * and retrieving the final string. It does not allocate a buffer - * to hold the output until that output diverges from s. - */ - interface lazybuf { - } interface clean { /** * Clean returns the shortest path name equivalent to path @@ -2535,6 +2561,19 @@ namespace filepath { */ (path: string): boolean } + interface localize { + /** + * Localize converts a slash-separated path into an operating system path. + * The input path must be a valid path as reported by [io/fs.ValidPath]. + * + * Localize returns an error if the path cannot be represented by the operating system. + * For example, the path a\b is rejected on Windows, on which \ is a separator + * character and cannot be part of a filename. + * + * The path returned by Localize will always be local, as reported by IsLocal. + */ + (path: string): string + } interface toSlash { /** * ToSlash returns the result of replacing each separator character @@ -2548,6 +2587,9 @@ namespace filepath { * FromSlash returns the result of replacing each slash ('/') character * in path with a separator character. Multiple slashes are replaced * by multiple separators. + * + * See also the Localize function, which converts a slash-separated path + * as used by the io/fs package to an operating system path. */ (path: string): string } @@ -2568,7 +2610,7 @@ namespace filepath { * and file set to path. * The returned values have the property that path = dir+file. */ - (path: string): string + (path: string): [string, string] } interface join { /** @@ -2601,6 +2643,12 @@ namespace filepath { */ (path: string): string } + interface isAbs { + /** + * IsAbs reports whether the path is absolute. + */ + (path: string): boolean + } interface abs { /** * Abs returns an absolute representation of path. @@ -2733,12 +2781,6 @@ namespace filepath { */ (path: string): string } - interface isAbs { - /** - * IsAbs reports whether the path is absolute. - */ - (path: string): boolean - } interface hasPrefix { /** * HasPrefix exists for historical compatibility and should not be used. @@ -2761,7 +2803,7 @@ namespace filepath { * pipelines, or redirections typically done by shells. The package * behaves more like C's "exec" family of functions. To expand glob * patterns, either call the shell directly, taking care to escape any - * dangerous input, or use the path/filepath package's Glob function. + * dangerous input, or use the [path/filepath] package's Glob function. * To expand environment variables, use package os's ExpandEnv. * * Note that the examples in this package assume a Unix system. @@ -2770,7 +2812,7 @@ namespace filepath { * * # Executables in the current directory * - * The functions Command and LookPath look for a program + * The functions [Command] and [LookPath] look for a program * in the directories listed in the current path, following the * conventions of the host operating system. * Operating systems have for decades included the current @@ -2781,10 +2823,10 @@ namespace filepath { * * To avoid those security problems, as of Go 1.19, this package will not resolve a program * using an implicit or explicit path entry relative to the current directory. - * That is, if you run exec.LookPath("go"), it will not successfully return + * That is, if you run [LookPath]("go"), it will not successfully return * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. * Instead, if the usual path algorithms would result in that answer, - * these functions return an error err satisfying errors.Is(err, ErrDot). + * these functions return an error err satisfying [errors.Is](err, [ErrDot]). * * For example, consider these two program snippets: * @@ -2849,12 +2891,12 @@ namespace filepath { namespace exec { interface command { /** - * Command returns the Cmd struct to execute the named program with + * Command returns the [Cmd] struct to execute the named program with * the given arguments. * * It sets only the Path and Args in the returned structure. * - * If name contains no path separators, Command uses LookPath to + * If name contains no path separators, Command uses [LookPath] to * resolve name to a complete path if possible. Otherwise it uses name * directly as Path. * @@ -3101,8 +3143,8 @@ namespace filesystem { */ open(): io.ReadSeekCloser } - type _subRtcDW = bytes.Reader - interface bytesReadSeekCloser extends _subRtcDW { + type _subShExs = bytes.Reader + interface bytesReadSeekCloser extends _subShExs { } interface bytesReadSeekCloser { /** @@ -3201,6 +3243,8 @@ namespace filesystem { interface System { /** * DeletePrefix deletes everything starting with the specified prefix. + * + * The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_"). */ deletePrefix(prefix: string): Array } @@ -3234,110 +3278,254 @@ namespace filesystem { import smithyhttp = http interface ignoredHeadersKey { } -} - -/** - * Package template is a thin wrapper around the standard html/template - * and text/template packages that implements a convenient registry to - * load and cache templates on the fly concurrently. - * - * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. - * - * Example: - * - * ``` - * registry := template.NewRegistry() - * - * html1, err := registry.LoadFiles( - * // the files set wil be parsed only once and then cached - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "John"}) - * - * html2, err := registry.LoadFiles( - * // reuse the already parsed and cached files set - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "Jane"}) - * ``` - */ -namespace template { - interface newRegistry { + // @ts-ignore + import awsv2 = aws + // @ts-ignore + import awsv2cfg = config + // @ts-ignore + import s3managerv2 = manager + // @ts-ignore + import s3v2 = s3 + // @ts-ignore + import typesv2 = types + interface hexEscape { /** - * NewRegistry creates and initializes a new templates registry with - * some defaults (eg. global "raw" template function for unescaped HTML). + * HexEscape returns s, with all runes for which shouldEscape returns true + * escaped to "__0xXXX__", where XXX is the hex representation of the rune + * value. For example, " " would escape to "__0x20__". * - * Use the Registry.Load* methods to load templates into the registry. + * Non-UTF-8 strings will have their non-UTF-8 characters escaped to + * unicode.ReplacementChar; the original value is lost. Please file an + * issue if you need non-UTF8 support. + * + * Note: shouldEscape takes the whole string as a slice of runes and an + * index. Passing it a single byte or a single rune doesn't provide + * enough context for some escape decisions; for example, the caller might + * want to escape the second "/" in "//" but not the first one. + * We pass a slice of runes instead of the string or a slice of bytes + * because some decisions will be made on a rune basis (e.g., encode + * all non-ASCII runes). */ - (): (Registry) + (s: string, shouldEscape: (s: Array, i: number) => boolean): string + } + interface hexUnescape { + /** + * HexUnescape reverses HexEscape. + */ + (s: string): string + } + interface urlEscape { + /** + * URLEscape uses url.PathEscape to escape s. + */ + (s: string): string + } + interface urlUnescape { + /** + * URLUnescape reverses URLEscape using url.PathUnescape. If the unescape + * returns an error, it returns s. + */ + (s: string): string + } + interface useV2 { + /** + * UseV2 returns true iff the URL parameters indicate that the provider + * should use the AWS SDK v2. + * + * "awssdk=v1" will force V1. + * "awssdk=v2" will force V2. + * No "awssdk" parameter (or any other value) will return the default, currently V1. + * Note that the default may change in the future. + */ + (q: url.Values): boolean + } + interface newDefaultV2Config { + /** + * NewDefaultV2Config returns a aws.Config for AWS SDK v2, using the default options. + */ + (ctx: context.Context): awsv2.Config + } + interface v2ConfigFromURLParams { + /** + * V2ConfigFromURLParams returns an aws.Config for AWS SDK v2 initialized based on the URL + * parameters in q. It is intended to be used by URLOpeners for AWS services if + * UseV2 returns true. + * + * https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config + * + * It returns an error if q contains any unknown query parameters; callers + * should remove any query parameters they know about from q before calling + * V2ConfigFromURLParams. + * + * The following query options are supported: + * ``` + * - region: The AWS region for requests; sets WithRegion. + * - profile: The shared config profile to use; sets SharedConfigProfile. + * - endpoint: The AWS service endpoint to send HTTP request. + * ``` + */ + (ctx: context.Context, q: url.Values): awsv2.Config + } + interface urlSessionOpener { + } + interface urlSessionOpener { + openBucketURL(ctx: context.Context, u: url.URL): (blob.Bucket) } /** - * Registry defines a templates registry that is safe to be used by multiple goroutines. + * URLOpener opens S3 URLs like "s3://mybucket". * - * Use the Registry.Load* methods to load templates into the registry. + * The URL host is used as the bucket name. + * + * Use "awssdk=v1" to force using AWS SDK v1, "awssdk=v2" to force using AWS SDK v2, + * or anything else to accept the default. + * + * For V1, see gocloud.dev/aws/ConfigFromURLParams for supported query parameters + * for overriding the aws.Session from the URL. + * For V2, see gocloud.dev/aws/V2ConfigFromURLParams. */ - interface Registry { - } - interface Registry { + interface URLOpener { /** - * AddFuncs registers new global template functions. - * - * The key of each map entry is the function name that will be used in the templates. - * If a function with the map entry name already exists it will be replaced with the new one. - * - * The value of each map entry is a function that must have either a - * single return value, or two return values of which the second has type error. - * - * Example: - * - * r.AddFuncs(map[string]any{ - * ``` - * "toUpper": func(str string) string { - * return strings.ToUppser(str) - * }, - * ... - * ``` - * }) + * UseV2 indicates whether the AWS SDK V2 should be used. */ - addFuncs(funcs: _TygojaDict): (Registry) - } - interface Registry { + useV2: boolean /** - * LoadFiles caches (if not already) the specified filenames set as a - * single template and returns a ready to use Renderer instance. - * - * There must be at least 1 filename specified. + * Options specifies the options to pass to OpenBucket. */ - loadFiles(...filenames: string[]): (Renderer) + options: Options } - interface Registry { + interface URLOpener { /** - * LoadString caches (if not already) the specified inline string as a - * single template and returns a ready to use Renderer instance. + * OpenBucketURL opens a blob.Bucket based on u. */ - loadString(text: string): (Renderer) - } - interface Registry { - /** - * LoadFS caches (if not already) the specified fs and globPatterns - * pair as single template and returns a ready to use Renderer instance. - * - * There must be at least 1 file matching the provided globPattern(s) - * (note that most file names serves as glob patterns matching themselves). - */ - loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) + openBucketURL(ctx: context.Context, u: url.URL): (blob.Bucket) } /** - * Renderer defines a single parsed template. + * Options sets options for constructing a *blob.Bucket backed by fileblob. */ - interface Renderer { - } - interface Renderer { + interface Options { /** - * Render executes the template with the specified data as the dot object - * and returns the result as plain string. + * UseLegacyList forces the use of ListObjects instead of ListObjectsV2. + * Some S3-compatible services (like CEPH) do not currently support + * ListObjectsV2. */ - render(data: any): string + useLegacyList: boolean + } + interface openBucketV2 { + /** + * OpenBucketV2 returns a *blob.Bucket backed by S3, using AWS SDK v2. + */ + (ctx: context.Context, client: s3v2.Client, bucketName: string, opts: Options): (blob.Bucket) + } + /** + * reader reads an S3 object. It implements io.ReadCloser. + */ + interface reader { + } + interface reader { + read(p: string|Array): number + } + interface reader { + /** + * Close closes the reader itself. It must be called when done reading. + */ + close(): void + } + interface reader { + as(i: { + }): boolean + } + interface reader { + attributes(): (any) + } + /** + * writer writes an S3 object, it implements io.WriteCloser. + */ + interface writer { + } + interface writer { + /** + * Write appends p to w.pw. User must call Close to close the w after done writing. + */ + write(p: string|Array): number + } + interface writer { + /** + * Upload reads from r. Per the driver, it is guaranteed to be the only + * write call for this writer. + */ + upload(r: io.Reader): void + } + interface writer { + /** + * Close completes the writer and closes it. Any error occurring during write + * will be returned. If a writer is closed before any Write is called, Close + * will create an empty file at the given key. + */ + close(): void + } + /** + * bucket represents an S3 bucket and handles read, write and delete operations. + */ + interface bucket { + } + interface bucket { + close(): void + } + interface bucket { + errorCode(err: Error): gcerrors.ErrorCode + } + interface bucket { + /** + * ListPaged implements driver.ListPaged. + */ + listPaged(ctx: context.Context, opts: any): (any) + } + interface bucket { + /** + * As implements driver.As. + */ + as(i: { + }): boolean + } + interface bucket { + /** + * As implements driver.ErrorAs. + */ + errorAs(err: Error, i: { + }): boolean + } + interface bucket { + /** + * Attributes implements driver.Attributes. + */ + attributes(ctx: context.Context, key: string): (any) + } + interface bucket { + /** + * NewRangeReader implements driver.NewRangeReader. + */ + newRangeReader(ctx: context.Context, key: string, offset: number, length: number, opts: any): any + } + interface bucket { + /** + * NewTypedWriter implements driver.NewTypedWriter. + */ + newTypedWriter(ctx: context.Context, key: string, contentType: string, opts: any): any + } + interface bucket { + /** + * Copy implements driver.Copy. + */ + copy(ctx: context.Context, dstKey: string, srcKey: string, opts: any): void + } + interface bucket { + /** + * Delete implements driver.Delete. + */ + delete(ctx: context.Context, key: string): void + } + interface bucket { + signedURL(ctx: context.Context, key: string, opts: any): string } } @@ -3714,14 +3902,14 @@ namespace dbx { /** * MssqlBuilder is the builder for SQL Server databases. */ - type _subrFKDD = BaseBuilder - interface MssqlBuilder extends _subrFKDD { + type _subHvLxi = BaseBuilder + interface MssqlBuilder extends _subHvLxi { } /** * MssqlQueryBuilder is the query builder for SQL Server databases. */ - type _submHtvV = BaseQueryBuilder - interface MssqlQueryBuilder extends _submHtvV { + type _subVtFMi = BaseQueryBuilder + interface MssqlQueryBuilder extends _subVtFMi { } interface newMssqlBuilder { /** @@ -3792,8 +3980,8 @@ namespace dbx { /** * MysqlBuilder is the builder for MySQL databases. */ - type _subIVLoN = BaseBuilder - interface MysqlBuilder extends _subIVLoN { + type _subcGgwD = BaseBuilder + interface MysqlBuilder extends _subcGgwD { } interface newMysqlBuilder { /** @@ -3868,14 +4056,14 @@ namespace dbx { /** * OciBuilder is the builder for Oracle databases. */ - type _subfiTVV = BaseBuilder - interface OciBuilder extends _subfiTVV { + type _subPThpx = BaseBuilder + interface OciBuilder extends _subPThpx { } /** * OciQueryBuilder is the query builder for Oracle databases. */ - type _subrSBRI = BaseQueryBuilder - interface OciQueryBuilder extends _subrSBRI { + type _subNOiLF = BaseQueryBuilder + interface OciQueryBuilder extends _subNOiLF { } interface newOciBuilder { /** @@ -3938,8 +4126,8 @@ namespace dbx { /** * PgsqlBuilder is the builder for PostgreSQL databases. */ - type _subtFJti = BaseBuilder - interface PgsqlBuilder extends _subtFJti { + type _subwdQYB = BaseBuilder + interface PgsqlBuilder extends _subwdQYB { } interface newPgsqlBuilder { /** @@ -4006,8 +4194,8 @@ namespace dbx { /** * SqliteBuilder is the builder for SQLite databases. */ - type _subrBNop = BaseBuilder - interface SqliteBuilder extends _subrBNop { + type _subuwoLq = BaseBuilder + interface SqliteBuilder extends _subuwoLq { } interface newSqliteBuilder { /** @@ -4106,8 +4294,8 @@ namespace dbx { /** * StandardBuilder is the builder that is used by DB for an unknown driver. */ - type _subesQFA = BaseBuilder - interface StandardBuilder extends _subesQFA { + type _subQKHka = BaseBuilder + interface StandardBuilder extends _subQKHka { } interface newStandardBuilder { /** @@ -4173,8 +4361,8 @@ namespace dbx { * DB enhances sql.DB by providing a set of DB-agnostic query building methods. * DB allows easier query building and population of data into Go variables. */ - type _subkWabA = Builder - interface DB extends _subkWabA { + type _subDKzjR = Builder + interface DB extends _subDKzjR { /** * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc. */ @@ -4978,8 +5166,8 @@ namespace dbx { * Rows enhances sql.Rows by providing additional data query methods. * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row. */ - type _subrMzGi = sql.Rows - interface Rows extends _subrMzGi { + type _subnAbdX = sql.Rows + interface Rows extends _subnAbdX { } interface Rows { /** @@ -5337,8 +5525,8 @@ namespace dbx { }): string } interface structInfo { } - type _subSCEkW = structInfo - interface structValue extends _subSCEkW { + type _subxLLPJ = structInfo + interface structValue extends _subxLLPJ { } interface fieldInfo { } @@ -5377,8 +5565,8 @@ namespace dbx { /** * Tx enhances sql.Tx with additional querying methods. */ - type _subXvIkD = Builder - interface Tx extends _subXvIkD { + type _subqhOLL = Builder + interface Tx extends _subqhOLL { } interface Tx { /** @@ -5459,6 +5647,14 @@ namespace mails { */ (app: CoreApp, admin: models.Admin): void } + interface sendRecordPasswordLoginAlert { + /** + * @todo remove after the refactoring + * + * SendRecordPasswordLoginAlert sends a OAuth2 password login alert to the specified auth record. + */ + (app: CoreApp, authRecord: models.Record, ...providerNames: string[]): void + } interface sendRecordPasswordReset { /** * SendRecordPasswordReset sends a password reset request email to the specified user. @@ -6396,8 +6592,8 @@ namespace forms { /** * SettingsUpsert is a [settings.Settings] upsert (create/update) form. */ - type _subxZPsK = settings.Settings - interface SettingsUpsert extends _subxZPsK { + type _subIwjtv = settings.Settings + interface SettingsUpsert extends _subIwjtv { } interface newSettingsUpsert { /** @@ -6719,7 +6915,7 @@ namespace apis { codeChallenge: string codeChallengeMethod: string } - interface oauth2EventMessage { + interface oauth2RedirectData { state: string code: string error: string @@ -6828,8 +7024,8 @@ namespace pocketbase { /** * appWrapper serves as a private CoreApp instance wrapper. */ - type _subXbotK = CoreApp - interface appWrapper extends _subXbotK { + type _subRaHzg = CoreApp + interface appWrapper extends _subRaHzg { } /** * PocketBase defines a PocketBase app launcher. @@ -6837,8 +7033,8 @@ namespace pocketbase { * It implements [CoreApp] via embedding and all of the app interface methods * could be accessed directly through the instance (eg. PocketBase.DataDir()). */ - type _subYRHBu = appWrapper - interface PocketBase extends _subYRHBu { + type _subNtXmf = appWrapper + interface PocketBase extends _subNtXmf { /** * RootCmd is the main console command */ @@ -6921,6 +7117,111 @@ namespace pocketbase { } } +/** + * Package template is a thin wrapper around the standard html/template + * and text/template packages that implements a convenient registry to + * load and cache templates on the fly concurrently. + * + * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. + * + * Example: + * + * ``` + * registry := template.NewRegistry() + * + * html1, err := registry.LoadFiles( + * // the files set wil be parsed only once and then cached + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "John"}) + * + * html2, err := registry.LoadFiles( + * // reuse the already parsed and cached files set + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "Jane"}) + * ``` + */ +namespace template { + interface newRegistry { + /** + * NewRegistry creates and initializes a new templates registry with + * some defaults (eg. global "raw" template function for unescaped HTML). + * + * Use the Registry.Load* methods to load templates into the registry. + */ + (): (Registry) + } + /** + * Registry defines a templates registry that is safe to be used by multiple goroutines. + * + * Use the Registry.Load* methods to load templates into the registry. + */ + interface Registry { + } + interface Registry { + /** + * AddFuncs registers new global template functions. + * + * The key of each map entry is the function name that will be used in the templates. + * If a function with the map entry name already exists it will be replaced with the new one. + * + * The value of each map entry is a function that must have either a + * single return value, or two return values of which the second has type error. + * + * Example: + * + * r.AddFuncs(map[string]any{ + * ``` + * "toUpper": func(str string) string { + * return strings.ToUppser(str) + * }, + * ... + * ``` + * }) + */ + addFuncs(funcs: _TygojaDict): (Registry) + } + interface Registry { + /** + * LoadFiles caches (if not already) the specified filenames set as a + * single template and returns a ready to use Renderer instance. + * + * There must be at least 1 filename specified. + */ + loadFiles(...filenames: string[]): (Renderer) + } + interface Registry { + /** + * LoadString caches (if not already) the specified inline string as a + * single template and returns a ready to use Renderer instance. + */ + loadString(text: string): (Renderer) + } + interface Registry { + /** + * LoadFS caches (if not already) the specified fs and globPatterns + * pair as single template and returns a ready to use Renderer instance. + * + * There must be at least 1 file matching the provided globPattern(s) + * (note that most file names serves as glob patterns matching themselves). + */ + loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) + } + /** + * Renderer defines a single parsed template. + */ + interface Renderer { + } + interface Renderer { + /** + * Render executes the template with the specified data as the dot object + * and returns the result as plain string. + */ + render(data: any): string + } +} + /** * Package io provides basic interfaces to I/O primitives. * Its primary job is to wrap existing implementations of such primitives, @@ -6999,8 +7300,8 @@ namespace io { */ namespace bytes { /** - * A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker, - * io.ByteScanner, and io.RuneScanner interfaces by reading from + * A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker], + * [io.ByteScanner], and [io.RuneScanner] interfaces by reading from * a byte slice. * Unlike a [Buffer], a Reader is read-only and supports seeking. * The zero value for Reader operates like a Reader of an empty slice. @@ -7072,7 +7373,7 @@ namespace bytes { } interface Reader { /** - * Reset resets the [Reader.Reader] to be reading from b. + * Reset resets the [Reader] to be reading from b. */ reset(b: string|Array): void } @@ -7093,7 +7394,7 @@ namespace bytes { * the manuals for the appropriate operating system. * These calls return err == nil to indicate success; otherwise * err is an operating system error describing the failure. - * On most systems, that error has type syscall.Errno. + * On most systems, that error has type [Errno]. * * NOTE: Most of the functions, types, and constants defined in * this package are also available in the [golang.org/x/sys] package. @@ -7193,6 +7494,8 @@ namespace syscall { */ write(f: (fd: number) => boolean): void } + // @ts-ignore + import runtimesyscall = syscall /** * An Errno is an unsigned number describing an error condition. * It implements the error interface. The zero Errno is by convention @@ -7205,7 +7508,7 @@ namespace syscall { * } * ``` * - * Errno values can be tested against error values using errors.Is. + * Errno values can be tested against error values using [errors.Is]. * For example: * * ``` @@ -7240,7 +7543,7 @@ namespace syscall { * changes for clock synchronization, and a “monotonic clock,” which is * not. The general rule is that the wall clock is for telling time and * the monotonic clock is for measuring time. Rather than split the API, - * in this package the Time returned by time.Now contains both a wall + * in this package the Time returned by [time.Now] contains both a wall * clock reading and a monotonic clock reading; later time-telling * operations use the wall clock reading, but later time-measuring * operations, specifically comparisons and subtractions, use the @@ -7257,7 +7560,7 @@ namespace syscall { * elapsed := t.Sub(start) * ``` * - * Other idioms, such as time.Since(start), time.Until(deadline), and + * Other idioms, such as [time.Since](start), [time.Until](deadline), and * time.Now().Before(deadline), are similarly robust against wall clock * resets. * @@ -7282,23 +7585,26 @@ namespace syscall { * * On some systems the monotonic clock will stop if the computer goes to sleep. * On such a system, t.Sub(u) may not accurately reflect the actual - * time that passed between t and u. + * time that passed between t and u. The same applies to other functions and + * methods that subtract times, such as [Since], [Until], [Before], [After], + * [Add], [Sub], [Equal] and [Compare]. In some cases, you may need to strip + * the monotonic clock to get accurate results. * * Because the monotonic clock reading has no meaning outside * the current process, the serialized forms generated by t.GobEncode, * t.MarshalBinary, t.MarshalJSON, and t.MarshalText omit the monotonic * clock reading, and t.Format provides no format for it. Similarly, the - * constructors time.Date, time.Parse, time.ParseInLocation, and time.Unix, + * constructors [time.Date], [time.Parse], [time.ParseInLocation], and [time.Unix], * as well as the unmarshalers t.GobDecode, t.UnmarshalBinary. * t.UnmarshalJSON, and t.UnmarshalText always create times with * no monotonic clock reading. * - * The monotonic clock reading exists only in Time values. It is not - * a part of Duration values or the Unix times returned by t.Unix and + * The monotonic clock reading exists only in [Time] values. It is not + * a part of [Duration] values or the Unix times returned by t.Unix and * friends. * * Note that the Go == operator compares not just the time instant but - * also the Location and the monotonic clock reading. See the + * also the [Location] and the monotonic clock reading. See the * documentation for the Time type for a discussion of equality * testing for Time values. * @@ -7308,10 +7614,11 @@ namespace syscall { * * # Timer Resolution * - * Timer resolution varies depending on the Go runtime, the operating system + * [Timer] resolution varies depending on the Go runtime, the operating system * and the underlying hardware. - * On Unix, the resolution is approximately 1ms. - * On Windows, the default resolution is approximately 16ms, but + * On Unix, the resolution is ~1ms. + * On Windows version 1803 and newer, the resolution is ~0.5ms. + * On older Windows versions, the default resolution is ~16ms, but * a higher resolution may be requested using [golang.org/x/sys/windows.TimeBeginPeriod]. */ namespace time { @@ -7335,7 +7642,7 @@ namespace time { } interface Time { /** - * GoString implements fmt.GoStringer and formats t to be printed in Go source + * GoString implements [fmt.GoStringer] and formats t to be printed in Go source * code. */ goString(): string @@ -7344,16 +7651,16 @@ namespace time { /** * Format returns a textual representation of the time value formatted according * to the layout defined by the argument. See the documentation for the - * constant called Layout to see how to represent the layout format. + * constant called [Layout] to see how to represent the layout format. * - * The executable example for Time.Format demonstrates the working + * The executable example for [Time.Format] demonstrates the working * of the layout string in detail and is a good reference. */ format(layout: string): string } interface Time { /** - * AppendFormat is like Format but appends the textual + * AppendFormat is like [Time.Format] but appends the textual * representation to b and returns the extended buffer. */ appendFormat(b: string|Array, layout: string): string|Array @@ -7363,27 +7670,27 @@ namespace time { * * Programs using times should typically store and pass them as values, * not pointers. That is, time variables and struct fields should be of - * type time.Time, not *time.Time. + * type [time.Time], not *time.Time. * * A Time value can be used by multiple goroutines simultaneously except - * that the methods GobDecode, UnmarshalBinary, UnmarshalJSON and - * UnmarshalText are not concurrency-safe. + * that the methods [Time.GobDecode], [Time.UnmarshalBinary], [Time.UnmarshalJSON] and + * [Time.UnmarshalText] are not concurrency-safe. * - * Time instants can be compared using the Before, After, and Equal methods. - * The Sub method subtracts two instants, producing a Duration. - * The Add method adds a Time and a Duration, producing a Time. + * Time instants can be compared using the [Time.Before], [Time.After], and [Time.Equal] methods. + * The [Time.Sub] method subtracts two instants, producing a [Duration]. + * The [Time.Add] method adds a Time and a Duration, producing a Time. * * The zero value of type Time is January 1, year 1, 00:00:00.000000000 UTC. - * As this time is unlikely to come up in practice, the IsZero method gives + * As this time is unlikely to come up in practice, the [Time.IsZero] method gives * a simple way of detecting a time that has not been initialized explicitly. * - * Each time has an associated Location. The methods Local, UTC, and In return a + * Each time has an associated [Location]. The methods [Time.Local], [Time.UTC], and Time.In return a * Time with a specific Location. Changing the Location of a Time value with * these methods does not change the actual instant it represents, only the time * zone in which to interpret it. * - * Representations of a Time value saved by the GobEncode, MarshalBinary, - * MarshalJSON, and MarshalText methods store the Time.Location's offset, but not + * Representations of a Time value saved by the [Time.GobEncode], [Time.MarshalBinary], + * [Time.MarshalJSON], and [Time.MarshalText] methods store the [Time.Location]'s offset, but not * the location name. They therefore lose information about Daylight Saving Time. * * In addition to the required “wall clock” reading, a Time may contain an optional @@ -7476,13 +7783,13 @@ namespace time { * week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1 * of year n+1. */ - isoWeek(): number + isoWeek(): [number, number] } interface Time { /** * Clock returns the hour, minute, and second within the day specified by t. */ - clock(): number + clock(): [number, number, number] } interface Time { /** @@ -7579,7 +7886,7 @@ namespace time { * Round returns the result of rounding d to the nearest multiple of m. * The rounding behavior for halfway values is to round away from zero. * If the result exceeds the maximum (or minimum) - * value that can be stored in a Duration, + * value that can be stored in a [Duration], * Round returns the maximum (or minimum) duration. * If m <= 0, Round returns d unchanged. */ @@ -7588,7 +7895,7 @@ namespace time { interface Duration { /** * Abs returns the absolute value of d. - * As a special case, math.MinInt64 is converted to math.MaxInt64. + * As a special case, [math.MinInt64] is converted to [math.MaxInt64]. */ abs(): Duration } @@ -7601,7 +7908,7 @@ namespace time { interface Time { /** * Sub returns the duration t-u. If the result exceeds the maximum (or minimum) - * value that can be stored in a Duration, the maximum (or minimum) duration + * value that can be stored in a [Duration], the maximum (or minimum) duration * will be returned. * To compute t-d for a duration d, use t.Add(-d). */ @@ -7672,7 +7979,7 @@ namespace time { * If the zone goes on forever, end will be returned as a zero Time. * The Location of the returned times will be the same as t. */ - zoneBounds(): Time + zoneBounds(): [Time, Time] } interface Time { /** @@ -7742,7 +8049,7 @@ namespace time { } interface Time { /** - * MarshalJSON implements the json.Marshaler interface. + * MarshalJSON implements the [json.Marshaler] interface. * The time is a quoted string in the RFC 3339 format with sub-second precision. * If the timestamp cannot be represented as valid RFC 3339 * (e.g., the year is out of range), then an error is reported. @@ -7751,14 +8058,14 @@ namespace time { } interface Time { /** - * UnmarshalJSON implements the json.Unmarshaler interface. + * UnmarshalJSON implements the [json.Unmarshaler] interface. * The time must be a quoted string in the RFC 3339 format. */ unmarshalJSON(data: string|Array): void } interface Time { /** - * MarshalText implements the encoding.TextMarshaler interface. + * MarshalText implements the [encoding.TextMarshaler] interface. * The time is formatted in RFC 3339 format with sub-second precision. * If the timestamp cannot be represented as valid RFC 3339 * (e.g., the year is out of range), then an error is reported. @@ -7767,7 +8074,7 @@ namespace time { } interface Time { /** - * UnmarshalText implements the encoding.TextUnmarshaler interface. + * UnmarshalText implements the [encoding.TextUnmarshaler] interface. * The time must be in the RFC 3339 format. */ unmarshalText(data: string|Array): void @@ -8168,6 +8475,244 @@ namespace context { } } +/** + * Package url parses URLs and implements query escaping. + */ +namespace url { + /** + * A URL represents a parsed URL (technically, a URI reference). + * + * The general form represented is: + * + * ``` + * [scheme:][//[userinfo@]host][/]path[?query][#fragment] + * ``` + * + * URLs that do not start with a slash after the scheme are interpreted as: + * + * ``` + * scheme:opaque[?query][#fragment] + * ``` + * + * The Host field contains the host and port subcomponents of the URL. + * When the port is present, it is separated from the host with a colon. + * When the host is an IPv6 address, it must be enclosed in square brackets: + * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port + * into a string suitable for the Host field, adding square brackets to + * the host when necessary. + * + * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/. + * A consequence is that it is impossible to tell which slashes in the Path were + * slashes in the raw URL and which were %2f. This distinction is rarely important, + * but when it is, the code should use the [URL.EscapedPath] method, which preserves + * the original encoding of Path. + * + * The RawPath field is an optional field which is only set when the default + * encoding of Path is different from the escaped path. See the EscapedPath method + * for more details. + * + * URL's String method uses the EscapedPath method to obtain the path. + */ + interface URL { + scheme: string + opaque: string // encoded opaque data + user?: Userinfo // username and password information + host: string // host or host:port (see Hostname and Port methods) + path: string // path (relative paths may omit leading slash) + rawPath: string // encoded path hint (see EscapedPath method) + omitHost: boolean // do not emit empty host (authority) + forceQuery: boolean // append a query ('?') even if RawQuery is empty + rawQuery: string // encoded query values, without '?' + fragment: string // fragment for references, without '#' + rawFragment: string // encoded fragment hint (see EscapedFragment method) + } + interface URL { + /** + * EscapedPath returns the escaped form of u.Path. + * In general there are multiple possible escaped forms of any path. + * EscapedPath returns u.RawPath when it is a valid escaping of u.Path. + * Otherwise EscapedPath ignores u.RawPath and computes an escaped + * form on its own. + * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct + * their results. + * In general, code should call EscapedPath instead of + * reading u.RawPath directly. + */ + escapedPath(): string + } + interface URL { + /** + * EscapedFragment returns the escaped form of u.Fragment. + * In general there are multiple possible escaped forms of any fragment. + * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment. + * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped + * form on its own. + * The [URL.String] method uses EscapedFragment to construct its result. + * In general, code should call EscapedFragment instead of + * reading u.RawFragment directly. + */ + escapedFragment(): string + } + interface URL { + /** + * String reassembles the [URL] into a valid URL string. + * The general form of the result is one of: + * + * ``` + * scheme:opaque?query#fragment + * scheme://userinfo@host/path?query#fragment + * ``` + * + * If u.Opaque is non-empty, String uses the first form; + * otherwise it uses the second form. + * Any non-ASCII characters in host are escaped. + * To obtain the path, String uses u.EscapedPath(). + * + * In the second form, the following rules apply: + * ``` + * - if u.Scheme is empty, scheme: is omitted. + * - if u.User is nil, userinfo@ is omitted. + * - if u.Host is empty, host/ is omitted. + * - if u.Scheme and u.Host are empty and u.User is nil, + * the entire scheme://userinfo@host/ is omitted. + * - if u.Host is non-empty and u.Path begins with a /, + * the form host/path does not add its own /. + * - if u.RawQuery is empty, ?query is omitted. + * - if u.Fragment is empty, #fragment is omitted. + * ``` + */ + string(): string + } + interface URL { + /** + * Redacted is like [URL.String] but replaces any password with "xxxxx". + * Only the password in u.User is redacted. + */ + redacted(): string + } + /** + * Values maps a string key to a list of values. + * It is typically used for query parameters and form values. + * Unlike in the http.Header map, the keys in a Values map + * are case-sensitive. + */ + interface Values extends _TygojaDict{} + interface Values { + /** + * Get gets the first value associated with the given key. + * If there are no values associated with the key, Get returns + * the empty string. To access multiple values, use the map + * directly. + */ + get(key: string): string + } + interface Values { + /** + * Set sets the key to value. It replaces any existing + * values. + */ + set(key: string, value: string): void + } + interface Values { + /** + * Add adds the value to key. It appends to any existing + * values associated with key. + */ + add(key: string, value: string): void + } + interface Values { + /** + * Del deletes the values associated with key. + */ + del(key: string): void + } + interface Values { + /** + * Has checks whether a given key is set. + */ + has(key: string): boolean + } + interface Values { + /** + * Encode encodes the values into “URL encoded” form + * ("bar=baz&foo=quux") sorted by key. + */ + encode(): string + } + interface URL { + /** + * IsAbs reports whether the [URL] is absolute. + * Absolute means that it has a non-empty scheme. + */ + isAbs(): boolean + } + interface URL { + /** + * Parse parses a [URL] in the context of the receiver. The provided URL + * may be relative or absolute. Parse returns nil, err on parse + * failure, otherwise its return value is the same as [URL.ResolveReference]. + */ + parse(ref: string): (URL) + } + interface URL { + /** + * ResolveReference resolves a URI reference to an absolute URI from + * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference + * may be relative or absolute. ResolveReference always returns a new + * [URL] instance, even if the returned URL is identical to either the + * base or reference. If ref is an absolute URL, then ResolveReference + * ignores base and returns a copy of ref. + */ + resolveReference(ref: URL): (URL) + } + interface URL { + /** + * Query parses RawQuery and returns the corresponding values. + * It silently discards malformed value pairs. + * To check errors use [ParseQuery]. + */ + query(): Values + } + interface URL { + /** + * RequestURI returns the encoded path?query or opaque?query + * string that would be used in an HTTP request for u. + */ + requestURI(): string + } + interface URL { + /** + * Hostname returns u.Host, stripping any valid port number if present. + * + * If the result is enclosed in square brackets, as literal IPv6 addresses are, + * the square brackets are removed from the result. + */ + hostname(): string + } + interface URL { + /** + * Port returns the port part of u.Host, without the leading colon. + * + * If u.Host doesn't contain a valid numeric port, Port returns an empty string. + */ + port(): string + } + interface URL { + marshalBinary(): string|Array + } + interface URL { + unmarshalBinary(text: string|Array): void + } + interface URL { + /** + * JoinPath returns a new [URL] with the provided path elements joined to + * any existing path and the resulting path cleaned of any ./ or ../ elements. + * Any sequences of multiple / characters will be reduced to a single /. + */ + joinPath(...elem: string[]): (URL) + } +} + /** * Package sql provides a generic interface around SQL (or SQL-like) * databases. @@ -8823,8 +9368,8 @@ namespace sql { * To protect against malicious inputs, this package sets limits on the size * of the MIME data it processes. * - * Reader.NextPart and Reader.NextRawPart limit the number of headers in a - * part to 10000 and Reader.ReadForm limits the total number of headers in all + * [Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a + * part to 10000 and [Reader.ReadForm] limits the total number of headers in all * FileHeaders to 10000. * These limits may be adjusted with the GODEBUG=multipartmaxheaders= * setting. @@ -8833,11 +9378,6 @@ namespace sql { * This limit may be adjusted with the GODEBUG=multipartmaxparts= * setting. */ -/** - * Copyright 2023 The Go Authors. All rights reserved. - * Use of this source code is governed by a BSD-style - * license that can be found in the LICENSE file. - */ namespace multipart { /** * A FileHeader describes a file part of a multipart request. @@ -8849,7 +9389,7 @@ namespace multipart { } interface FileHeader { /** - * Open opens and returns the FileHeader's associated File. + * Open opens and returns the [FileHeader]'s associated File. */ open(): File } @@ -9222,6 +9762,11 @@ namespace http { * redirects. */ response?: Response + /** + * Pattern is the [ServeMux] pattern that matched the request. + * It is empty if the request was not matched against a pattern. + */ + pattern: string } interface Request { /** @@ -9258,6 +9803,8 @@ namespace http { * Clone returns a deep copy of r with its context changed to ctx. * The provided ctx must be non-nil. * + * Clone only makes a shallow copy of the Body field. + * * For an outgoing client request, the context controls the entire * lifetime of a request and its response: obtaining a connection, * sending the request, and reading the response headers and body. @@ -9283,6 +9830,13 @@ namespace http { */ cookies(): Array<(Cookie | undefined)> } + interface Request { + /** + * CookiesNamed parses and returns the named HTTP cookies sent with the request + * or an empty slice if none matched. + */ + cookiesNamed(name: string): Array<(Cookie | undefined)> + } interface Request { /** * Cookie returns the named cookie provided in the request or @@ -9363,7 +9917,7 @@ namespace http { * Authorization header, if the request uses HTTP Basic Authentication. * See RFC 2617, Section 2. */ - basicAuth(): [string, boolean] + basicAuth(): [string, string, boolean] } interface Request { /** @@ -9586,9 +10140,9 @@ namespace http { * ReadHeaderTimeout is the amount of time allowed to read * request headers. The connection's read deadline is reset * after reading the headers and the Handler can decide what - * is considered too slow for the body. If ReadHeaderTimeout - * is zero, the value of ReadTimeout is used. If both are - * zero, there is no timeout. + * is considered too slow for the body. If zero, the value of + * ReadTimeout is used. If negative, or if zero and ReadTimeout + * is zero or negative, there is no timeout. */ readHeaderTimeout: time.Duration /** @@ -9601,9 +10155,9 @@ namespace http { writeTimeout: time.Duration /** * IdleTimeout is the maximum amount of time to wait for the - * next request when keep-alives are enabled. If IdleTimeout - * is zero, the value of ReadTimeout is used. If both are - * zero, there is no timeout. + * next request when keep-alives are enabled. If zero, the value + * of ReadTimeout is used. If negative, or if zero and ReadTimeout + * is zero or negative, there is no timeout. */ idleTimeout: time.Duration /** @@ -9741,7 +10295,8 @@ namespace http { * * Files containing a certificate and matching private key for the * server must be provided if neither the [Server]'s - * TLSConfig.Certificates nor TLSConfig.GetCertificate are populated. + * TLSConfig.Certificates, TLSConfig.GetCertificate nor + * config.GetConfigForClient are populated. * If the certificate is signed by a certificate authority, the * certFile should be the concatenation of the server's certificate, * any intermediates, and the CA's certificate. @@ -9782,6 +10337,258 @@ namespace http { } } +/** + * Package aws provides the core SDK's utilities and shared types. Use this package's + * utilities to simplify setting and reading API operations parameters. + * + * # Value and Pointer Conversion Utilities + * + * This package includes a helper conversion utility for each scalar type the SDK's + * API use. These utilities make getting a pointer of the scalar, and dereferencing + * a pointer easier. + * + * Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. + * The Pointer to value will safely dereference the pointer and return its value. + * If the pointer was nil, the scalar's zero value will be returned. + * + * The value to pointer functions will be named after the scalar type. So get a + * *string from a string value use the "String" function. This makes it easy to + * to get pointer of a literal string value, because getting the address of a + * literal requires assigning the value to a variable first. + * + * ``` + * var strPtr *string + * + * // Without the SDK's conversion functions + * str := "my string" + * strPtr = &str + * + * // With the SDK's conversion functions + * strPtr = aws.String("my string") + * + * // Convert *string to string value + * str = aws.ToString(strPtr) + * ``` + * + * In addition to scalars the aws package also includes conversion utilities for + * map and slice for commonly types used in API parameters. The map and slice + * conversion functions use similar naming pattern as the scalar conversion + * functions. + * + * ``` + * var strPtrs []*string + * var strs []string = []string{"Go", "Gophers", "Go"} + * + * // Convert []string to []*string + * strPtrs = aws.StringSlice(strs) + * + * // Convert []*string to []string + * strs = aws.ToStringSlice(strPtrs) + * ``` + * + * # SDK Default HTTP Client + * + * The SDK will use the http.DefaultClient if a HTTP client is not provided to + * the SDK's Session, or service client constructor. This means that if the + * http.DefaultClient is modified by other components of your application the + * modifications will be picked up by the SDK as well. + * + * In some cases this might be intended, but it is a better practice to create + * a custom HTTP Client to share explicitly through your application. You can + * configure the SDK to use the custom HTTP Client by setting the HTTPClient + * value of the SDK's Config type when creating a Session or service client. + */ +/** + * Package aws provides core functionality for making requests to AWS services. + */ +namespace aws { + // @ts-ignore + import smithybearer = bearer + /** + * A Config provides service configuration for service clients. + */ + interface Config { + /** + * The region to send requests to. This parameter is required and must + * be configured globally or on a per-client basis unless otherwise + * noted. A full list of regions is found in the "Regions and Endpoints" + * document. + * + * See http://docs.aws.amazon.com/general/latest/gr/rande.html for + * information on AWS regions. + */ + region: string + /** + * The credentials object to use when signing requests. + * Use the LoadDefaultConfig to load configuration from all the SDK's supported + * sources, and resolve credentials using the SDK's default credential chain. + */ + credentials: CredentialsProvider + /** + * The Bearer Authentication token provider to use for authenticating API + * operation calls with a Bearer Authentication token. The API clients and + * operation must support Bearer Authentication scheme in order for the + * token provider to be used. API clients created with NewFromConfig will + * automatically be configured with this option, if the API client support + * Bearer Authentication. + * + * The SDK's config.LoadDefaultConfig can automatically populate this + * option for external configuration options such as SSO session. + * https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html + */ + bearerAuthTokenProvider: smithybearer.TokenProvider + /** + * The HTTP Client the SDK's API clients will use to invoke HTTP requests. + * The SDK defaults to a BuildableClient allowing API clients to create + * copies of the HTTP Client for service specific customizations. + * + * Use a (*http.Client) for custom behavior. Using a custom http.Client + * will prevent the SDK from modifying the HTTP client. + */ + httpClient: HTTPClient + /** + * An endpoint resolver that can be used to provide or override an endpoint + * for the given service and region. + * + * See the `aws.EndpointResolver` documentation for additional usage + * information. + * + * Deprecated: See Config.EndpointResolverWithOptions + */ + endpointResolver: EndpointResolver + /** + * An endpoint resolver that can be used to provide or override an endpoint + * for the given service and region. + * + * When EndpointResolverWithOptions is specified, it will be used by a + * service client rather than using EndpointResolver if also specified. + * + * See the `aws.EndpointResolverWithOptions` documentation for additional + * usage information. + * + * Deprecated: with the release of endpoint resolution v2 in API clients, + * EndpointResolver and EndpointResolverWithOptions are deprecated. + * Providing a value for this field will likely prevent you from using + * newer endpoint-related service features. See API client options + * EndpointResolverV2 and BaseEndpoint. + */ + endpointResolverWithOptions: EndpointResolverWithOptions + /** + * RetryMaxAttempts specifies the maximum number attempts an API client + * will call an operation that fails with a retryable error. + * + * API Clients will only use this value to construct a retryer if the + * Config.Retryer member is not nil. This value will be ignored if + * Retryer is not nil. + */ + retryMaxAttempts: number + /** + * RetryMode specifies the retry model the API client will be created with. + * + * API Clients will only use this value to construct a retryer if the + * Config.Retryer member is not nil. This value will be ignored if + * Retryer is not nil. + */ + retryMode: RetryMode + /** + * Retryer is a function that provides a Retryer implementation. A Retryer + * guides how HTTP requests should be retried in case of recoverable + * failures. When nil the API client will use a default retryer. + * + * In general, the provider function should return a new instance of a + * Retryer if you are attempting to provide a consistent Retryer + * configuration across all clients. This will ensure that each client will + * be provided a new instance of the Retryer implementation, and will avoid + * issues such as sharing the same retry token bucket across services. + * + * If not nil, RetryMaxAttempts, and RetryMode will be ignored by API + * clients. + */ + retryer: () => Retryer + /** + * ConfigSources are the sources that were used to construct the Config. + * Allows for additional configuration to be loaded by clients. + */ + configSources: Array<{ + }> + /** + * APIOptions provides the set of middleware mutations modify how the API + * client requests will be handled. This is useful for adding additional + * tracing data to a request, or changing behavior of the SDK's client. + */ + apiOptions: Array<(_arg0: middleware.Stack) => void> + /** + * The logger writer interface to write logging messages to. Defaults to + * standard error. + */ + logger: logging.Logger + /** + * Configures the events that will be sent to the configured logger. This + * can be used to configure the logging of signing, retries, request, and + * responses of the SDK clients. + * + * See the ClientLogMode type documentation for the complete set of logging + * modes and available configuration. + */ + clientLogMode: ClientLogMode + /** + * The configured DefaultsMode. If not specified, service clients will + * default to legacy. + * + * Supported modes are: auto, cross-region, in-region, legacy, mobile, + * standard + */ + defaultsMode: DefaultsMode + /** + * The RuntimeEnvironment configuration, only populated if the DefaultsMode + * is set to DefaultsModeAuto and is initialized by + * `config.LoadDefaultConfig`. You should not populate this structure + * programmatically, or rely on the values here within your applications. + */ + runtimeEnvironment: RuntimeEnvironment + /** + * AppId is an optional application specific identifier that can be set. + * When set it will be appended to the User-Agent header of every request + * in the form of App/{AppId}. This variable is sourced from environment + * variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id. + * See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for + * more information on environment variables and shared config settings. + */ + appID: string + /** + * BaseEndpoint is an intermediary transfer location to a service specific + * BaseEndpoint on a service's Options. + */ + baseEndpoint?: string + /** + * DisableRequestCompression toggles if an operation request could be + * compressed or not. Will be set to false by default. This variable is sourced from + * environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute + * disable_request_compression + */ + disableRequestCompression: boolean + /** + * RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be + * compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively. + * This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or + * the shared config profile attribute request_min_compression_size_bytes + */ + requestMinCompressSizeBytes: number + /** + * Controls how a resolved AWS account ID is handled for endpoint routing. + */ + accountIDEndpointMode: AccountIDEndpointMode + } + interface Config { + /** + * Copy will return a shallow copy of the Config object. + */ + copy(): Config + } + // @ts-ignore + import sdkrand = rand +} + /** * Package exec runs external commands. It wraps os.StartProcess to make it * easier to remap stdin and stdout, connect I/O with pipes, and do other @@ -9793,7 +10600,7 @@ namespace http { * pipelines, or redirections typically done by shells. The package * behaves more like C's "exec" family of functions. To expand glob * patterns, either call the shell directly, taking care to escape any - * dangerous input, or use the path/filepath package's Glob function. + * dangerous input, or use the [path/filepath] package's Glob function. * To expand environment variables, use package os's ExpandEnv. * * Note that the examples in this package assume a Unix system. @@ -9802,7 +10609,7 @@ namespace http { * * # Executables in the current directory * - * The functions Command and LookPath look for a program + * The functions [Command] and [LookPath] look for a program * in the directories listed in the current path, following the * conventions of the host operating system. * Operating systems have for decades included the current @@ -9813,10 +10620,10 @@ namespace http { * * To avoid those security problems, as of Go 1.19, this package will not resolve a program * using an implicit or explicit path entry relative to the current directory. - * That is, if you run exec.LookPath("go"), it will not successfully return + * That is, if you run [LookPath]("go"), it will not successfully return * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. * Instead, if the usual path algorithms would result in that answer, - * these functions return an error err satisfying errors.Is(err, ErrDot). + * these functions return an error err satisfying [errors.Is](err, [ErrDot]). * * For example, consider these two program snippets: * @@ -9882,7 +10689,7 @@ namespace exec { /** * Cmd represents an external command being prepared or run. * - * A Cmd cannot be reused after calling its Run, Output or CombinedOutput + * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] * methods. */ interface Cmd { @@ -10053,10 +10860,10 @@ namespace exec { * status. * * If the command starts but does not complete successfully, the error is of - * type *ExitError. Other error types may be returned for other situations. + * type [*ExitError]. Other error types may be returned for other situations. * * If the calling goroutine has locked the operating system thread - * with runtime.LockOSThread and modified any inheritable OS-level + * with [runtime.LockOSThread] and modified any inheritable OS-level * thread state (for example, Linux or Plan 9 name spaces), the new * process will inherit the caller's thread state. */ @@ -10068,7 +10875,7 @@ namespace exec { * * If Start returns successfully, the c.Process field will be set. * - * After a successful call to Start the Wait method must be called in + * After a successful call to Start the [Cmd.Wait] method must be called in * order to release associated system resources. */ start(): void @@ -10078,28 +10885,28 @@ namespace exec { * Wait waits for the command to exit and waits for any copying to * stdin or copying from stdout or stderr to complete. * - * The command must have been started by Start. + * The command must have been started by [Cmd.Start]. * * The returned error is nil if the command runs, has no problems * copying stdin, stdout, and stderr, and exits with a zero exit * status. * * If the command fails to run or doesn't complete successfully, the - * error is of type *ExitError. Other error types may be + * error is of type [*ExitError]. Other error types may be * returned for I/O problems. * - * If any of c.Stdin, c.Stdout or c.Stderr are not an *os.File, Wait also waits + * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits * for the respective I/O loop copying to or from the process to complete. * - * Wait releases any resources associated with the Cmd. + * Wait releases any resources associated with the [Cmd]. */ wait(): void } interface Cmd { /** * Output runs the command and returns its standard output. - * Any returned error will usually be of type *ExitError. - * If c.Stderr was nil, Output populates ExitError.Stderr. + * Any returned error will usually be of type [*ExitError]. + * If c.Stderr was nil, Output populates [ExitError.Stderr]. */ output(): string|Array } @@ -10114,7 +10921,7 @@ namespace exec { /** * StdinPipe returns a pipe that will be connected to the command's * standard input when the command starts. - * The pipe will be closed automatically after Wait sees the command exit. + * The pipe will be closed automatically after [Cmd.Wait] sees the command exit. * A caller need only call Close to force the pipe to close sooner. * For example, if the command being run will not exit until standard input * is closed, the caller must close the pipe. @@ -10126,10 +10933,10 @@ namespace exec { * StdoutPipe returns a pipe that will be connected to the command's * standard output when the command starts. * - * Wait will close the pipe after seeing the command exit, so most callers + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers * need not close the pipe themselves. It is thus incorrect to call Wait * before all reads from the pipe have completed. - * For the same reason, it is incorrect to call Run when using StdoutPipe. + * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. * See the example for idiomatic usage. */ stdoutPipe(): io.ReadCloser @@ -10139,10 +10946,10 @@ namespace exec { * StderrPipe returns a pipe that will be connected to the command's * standard error when the command starts. * - * Wait will close the pipe after seeing the command exit, so most callers + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers * need not close the pipe themselves. It is thus incorrect to call Wait * before all reads from the pipe have completed. - * For the same reason, it is incorrect to use Run when using StderrPipe. + * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. * See the StdoutPipe example for idiomatic usage. */ stderrPipe(): io.ReadCloser @@ -10156,6 +10963,5892 @@ namespace exec { } } +/** + * Package s3 provides the API client, operations, and parameter types for Amazon + * Simple Storage Service. + */ +namespace s3 { + // @ts-ignore + import awsmiddleware = middleware + // @ts-ignore + import awshttp = http + // @ts-ignore + import internalauth = auth + // @ts-ignore + import internalauthsmithy = smithy + // @ts-ignore + import internalConfig = configsources + // @ts-ignore + import internalmiddleware = middleware + // @ts-ignore + import acceptencodingcust = accept_encoding + // @ts-ignore + import internalChecksum = checksum + // @ts-ignore + import presignedurlcust = presigned_url + // @ts-ignore + import s3sharedconfig = config + // @ts-ignore + import s3cust = customizations + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithyauth = auth + // @ts-ignore + import smithydocument = document + // @ts-ignore + import smithyhttp = http + /** + * Client provides the API client to make operations call for Amazon Simple + * Storage Service. + */ + interface Client { + } + interface Client { + /** + * Options returns a copy of the client configuration. + * + * Callers SHOULD NOT perform mutations on any inner structures within client + * config. Config overrides should instead be made on a per-operation basis through + * functional options. + */ + options(): Options + } + interface Client { + /** + * This operation aborts a multipart upload. After a multipart upload is aborted, + * no additional parts can be uploaded using that upload ID. The storage consumed + * by any previously uploaded parts will be freed. However, if any part uploads are + * currently in progress, those part uploads might or might not succeed. As a + * result, it might be necessary to abort a given multipart upload multiple times + * in order to completely free all storage consumed by all parts. + * + * To verify that all parts have been removed and prevent getting charged for the + * part storage, you should call the [ListParts]API operation and ensure that the parts list + * is empty. + * + * ``` + * - Directory buckets - If multipart uploads in a directory bucket are in + * progress, you can't delete the bucket until all the in-progress multipart + * uploads are aborted or completed. To delete these in-progress multipart uploads, + * use the ListMultipartUploads operation to list the in-progress multipart + * uploads in the bucket and use the AbortMultupartUpload operation to abort all + * the in-progress multipart uploads. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the + * Amazon S3 User Guide. + * ``` + * + * Permissions + * + * ``` + * - General purpose bucket permissions - For information about permissions + * required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to AbortMultipartUpload : + * + * [CreateMultipartUpload] + * + * [UploadPart] + * + * [CompleteMultipartUpload] + * + * [ListParts] + * + * [ListMultipartUploads] + * + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + * [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + */ + abortMultipartUpload(ctx: context.Context, params: AbortMultipartUploadInput, ...optFns: ((_arg0: Options) => void)[]): (AbortMultipartUploadOutput) + } + interface Client { + /** + * Completes a multipart upload by assembling previously uploaded parts. + * + * You first initiate the multipart upload and then upload all parts using the [UploadPart] + * operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of + * an upload, you call this CompleteMultipartUpload operation to complete the + * upload. Upon receiving this request, Amazon S3 concatenates all the parts in + * ascending order by part number to create a new object. In the + * CompleteMultipartUpload request, you must provide the parts list and ensure that + * the parts list is complete. The CompleteMultipartUpload API operation + * concatenates the parts that you provide in the list. For each part in the list, + * you must provide the PartNumber value and the ETag value that are returned + * after that part was uploaded. + * + * The processing of a CompleteMultipartUpload request could take several minutes + * to finalize. After Amazon S3 begins processing the request, it sends an HTTP + * response header that specifies a 200 OK response. While processing is in + * progress, Amazon S3 periodically sends white space characters to keep the + * connection from timing out. A request could fail after the initial 200 OK + * response has been sent. This means that a 200 OK response can contain either a + * success or an error. The error response might be embedded in the 200 OK + * response. If you call this API operation directly, make sure to design your + * application to parse the contents of the response and handle it appropriately. + * If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect + * the embedded error and apply error handling per your configuration settings + * (including automatically retrying the request as appropriate). If the condition + * persists, the SDKs throw an exception (or, for the SDKs that don't use + * exceptions, they return an error). + * + * Note that if CompleteMultipartUpload fails, applications should be prepared to + * retry any failed requests (including 500 error responses). For more information, + * see [Amazon S3 Error Best Practices]. + * + * You can't use Content-Type: application/x-www-form-urlencoded for the + * CompleteMultipartUpload requests. Also, if you don't provide a Content-Type + * header, CompleteMultipartUpload can still return a 200 OK response. + * + * For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Zonal endpoint. These endpoints support virtual-hosted-style + * requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style + * requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User + * Guide. + * + * Permissions + * ``` + * - General purpose bucket permissions - For information about permissions + * required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. + * ``` + * + * If you provide an [additional checksum value]in your MultipartUpload requests and the object is encrypted + * + * ``` + * with Key Management Service, you must have permission to use the kms:Decrypt + * action for the CompleteMultipartUpload request to succeed. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * If the object is encrypted with SSE-KMS, you must also have the + * + * ``` + * kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies + * and KMS key policies for the KMS key. + * ``` + * + * Special errors + * + * ``` + * - Error Code: EntityTooSmall + * + * - Description: Your proposed upload is smaller than the minimum allowed + * object size. Each part must be at least 5 MB in size, except the last part. + * + * - HTTP Status Code: 400 Bad Request + * + * - Error Code: InvalidPart + * + * - Description: One or more of the specified parts could not be found. The + * part might not have been uploaded, or the specified ETag might not have matched + * the uploaded part's ETag. + * + * - HTTP Status Code: 400 Bad Request + * + * - Error Code: InvalidPartOrder + * + * - Description: The list of parts was not in ascending order. The parts list + * must be specified in order by part number. + * + * - HTTP Status Code: 400 Bad Request + * + * - Error Code: NoSuchUpload + * + * - Description: The specified multipart upload does not exist. The upload ID + * might be invalid, or the multipart upload might have been aborted or completed. + * + * - HTTP Status Code: 404 Not Found + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to CompleteMultipartUpload : + * + * [CreateMultipartUpload] + * + * [UploadPart] + * + * [AbortMultipartUpload] + * + * [ListParts] + * + * [ListMultipartUploads] + * + * [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + * [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html + * [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [additional checksum value]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html + * [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + * [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + * + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + */ + completeMultipartUpload(ctx: context.Context, params: CompleteMultipartUploadInput, ...optFns: ((_arg0: Options) => void)[]): (CompleteMultipartUploadOutput) + } + interface Client { + /** + * Creates a copy of an object that is already stored in Amazon S3. + * + * You can store individual objects of up to 5 TB in Amazon S3. You create a copy + * of your object up to 5 GB in size in a single atomic action using this API. + * However, to copy an object greater than 5 GB, you must use the multipart upload + * Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API]. + * + * You can copy individual objects between general purpose buckets, between + * directory buckets, and between general purpose buckets and directory buckets. + * + * ``` + * - Amazon S3 supports copy operations using Multi-Region Access Points only as + * a destination when using the Multi-Region Access Point ARN. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the + * Amazon S3 User Guide. + * + * - VPC endpoints don't support cross-Region requests (including copies). If + * you're using VPC endpoints, your source and destination buckets should be in the + * same Amazon Web Services Region as your VPC endpoint. + * ``` + * + * Both the Region that you want to copy the object from and the Region that you + * want to copy the object to must be enabled for your account. For more + * information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon + * Web Services Account Management Guide. + * + * Amazon S3 transfer acceleration does not support cross-Region copies. If you + * request a cross-Region copy using a transfer acceleration endpoint, you get a + * 400 Bad Request error. For more information, see [Transfer Acceleration]. + * + * Authentication and authorization All CopyObject requests must be authenticated + * and signed by using IAM credentials (access key ID and secret access key for the + * IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source + * , must be signed. For more information, see [REST Authentication]. + * + * Directory buckets - You must use the IAM credentials to authenticate and + * authorize your access to the CopyObject API operation, instead of using the + * temporary security credentials through the CreateSession API operation. + * + * Amazon Web Services CLI or SDKs handles authentication and authorization on + * your behalf. + * + * Permissions You must have read access to the source object and write access to + * the destination bucket. + * + * ``` + * - General purpose bucket permissions - You must have permissions in an IAM + * policy based on the source and destination bucket types in a CopyObject + * operation. + * + * - If the source object is in a general purpose bucket, you must have + * s3:GetObject permission to read the source object that is being copied. + * + * - If the destination bucket is a general purpose bucket, you must have + * s3:PutObject permission to write the object copy to the destination bucket. + * + * - Directory bucket permissions - You must have permissions in a bucket policy + * or an IAM identity-based policy based on the source and destination bucket types + * in a CopyObject operation. + * + * - If the source object that you want to copy is in a directory bucket, you + * must have the s3express:CreateSession permission in the Action element of a + * policy to read the object. By default, the session is in the ReadWrite mode. + * If you want to restrict the access, you can explicitly set the + * s3express:SessionMode condition key to ReadOnly on the copy source bucket. + * + * - If the copy destination is a directory bucket, you must have the + * s3express:CreateSession permission in the Action element of a policy to write + * the object to the destination. The s3express:SessionMode condition key can't + * be set to ReadOnly on the copy destination bucket. + * ``` + * + * If the object is encrypted with SSE-KMS, you must also have the + * + * ``` + * kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies + * and KMS key policies for the KMS key. + * ``` + * + * For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. + * + * Response and special errors When the request is an HTTP 1.1 request, the + * response is chunk encoded. When the request is not an HTTP 1.1 request, the + * response would not contain the Content-Length . You always need to read the + * entire response body to check if the copy succeeds. + * + * ``` + * - If the copy is successful, you receive a response with information about + * the copied object. + * + * - A copy request might return an error when Amazon S3 receives the copy + * request or while Amazon S3 is copying the files. A 200 OK response can contain + * either a success or an error. + * + * - If the error occurs before the copy action starts, you receive a standard + * Amazon S3 error. + * + * - If the error occurs during the copy operation, the error response is + * embedded in the 200 OK response. For example, in a cross-region copy, you may + * encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3] + * . The 200 OK status code means the copy was accepted, but it doesn't mean the + * copy is complete. Another example is when you disconnect from Amazon S3 before + * the copy is complete, Amazon S3 might cancel the copy and you may receive a + * 200 OK response. You must stay connected to Amazon S3 until the entire + * response is successfully received and processed. + * ``` + * + * If you call this API operation directly, make sure to design your application + * + * ``` + * to parse the content of the response and handle it appropriately. If you use + * Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the + * embedded error and apply error handling per your configuration settings + * (including automatically retrying the request as appropriate). If the condition + * persists, the SDKs throw an exception (or, for the SDKs that don't use + * exceptions, they return an error). + * ``` + * + * Charge The copy request charge is based on the storage class and Region that + * you specify for the destination object. The request can also result in a data + * retrieval charge for the source if the source storage class bills for data + * retrieval. If the copy source is in a different region, the data transfer is + * billed to the copy source account. For pricing information, see [Amazon S3 pricing]. + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to CopyObject : + * + * [PutObject] + * + * [GetObject] + * + * [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html + * [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror + * [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + * [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + * [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone + * [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/ + */ + copyObject(ctx: context.Context, params: CopyObjectInput, ...optFns: ((_arg0: Options) => void)[]): (CopyObjectOutput) + } + interface Client { + /** + * This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts + * bucket, see [CreateBucket]CreateBucket . + * + * Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have + * a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous + * requests are never allowed to create buckets. By creating the bucket, you become + * the bucket owner. + * + * There are two types of buckets: general purpose buckets and directory buckets. + * For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide. + * + * ``` + * - General purpose buckets - If you send your CreateBucket request to the + * s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So + * the signature calculations in Signature Version 4 must use us-east-1 as the + * Region, even if the location constraint in the request specifies another Region + * where the bucket is to be created. If you create a bucket in a Region other than + * US East (N. Virginia), your application must be able to handle 307 redirect. For + * more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Regional endpoint. These endpoints support path-style + * requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in + * the Amazon S3 User Guide. + * ``` + * + * Permissions + * + * ``` + * - General purpose bucket permissions - In addition to the s3:CreateBucket + * permission, the following permissions are required in a policy when your + * CreateBucket request includes specific headers: + * + * - Access control lists (ACLs) - In your CreateBucket request, if you specify + * an access control list (ACL) and set it to public-read , public-read-write , + * authenticated-read , or if you explicitly specify any other custom ACLs, both + * s3:CreateBucket and s3:PutBucketAcl permissions are required. In your + * CreateBucket request, if you set the ACL to private , or if you don't specify + * any ACLs, only the s3:CreateBucket permission is required. + * + * - Object Lock - In your CreateBucket request, if you set + * x-amz-bucket-object-lock-enabled to true, the + * s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are + * required. + * + * - S3 Object Ownership - If your CreateBucket request includes the + * x-amz-object-ownership header, then the s3:PutBucketOwnershipControls + * permission is required. + * ``` + * + * To set an ACL on a bucket as part of a CreateBucket request, you must explicitly + * + * ``` + * set S3 Object Ownership for the bucket to a different value than the default, + * BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public + * access, you must first create the bucket (without the bucket ACL) and then + * explicitly disable Block Public Access on the bucket before using PutBucketAcl + * to set the ACL. If you try to create a bucket with a public ACL, the request + * will fail. + * ``` + * + * For the majority of modern use cases in S3, we recommend that you keep all + * + * ``` + * Block Public Access settings enabled and keep ACLs disabled. If you would like + * to share data with users outside of your account, you can use bucket policies as + * needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. + * + * - S3 Block Public Access - If your specific use case requires granting public + * access to your S3 resources, you can disable Block Public Access. Specifically, + * you can create a new bucket with Block Public Access enabled, then separately + * call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the + * s3:PutBucketPublicAccessBlock permission. For more information about S3 Block + * Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - You must have the s3express:CreateBucket + * permission in an IAM identity-based policy instead of a bucket policy. + * Cross-account access to this API operation isn't supported. This operation can + * only be performed by the Amazon Web Services account that owns the resource. For + * more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the + * Amazon S3 User Guide. + * ``` + * + * The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public + * + * ``` + * Access are not supported for directory buckets. For directory buckets, all Block + * Public Access settings are enabled at the bucket level and S3 Object Ownership + * is set to Bucket owner enforced (ACLs disabled). These settings can't be + * modified. + * ``` + * + * For more information about permissions for creating and working with directory + * + * ``` + * buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about + * supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * The following operations are related to CreateBucket : + * + * [PutObject] + * + * [DeleteBucket] + * + * [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html + * [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + * + * [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html + * [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html + * [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features + * [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + * [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + createBucket(ctx: context.Context, params: CreateBucketInput, ...optFns: ((_arg0: Options) => void)[]): (CreateBucketOutput) + } + interface Client { + /** + * This action initiates a multipart upload and returns an upload ID. This upload + * ID is used to associate all of the parts in the specific multipart upload. You + * specify this upload ID in each of your subsequent upload part requests (see [UploadPart]). + * You also include this upload ID in the final request to either complete or abort + * the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview] + * in the Amazon S3 User Guide. + * + * After you initiate a multipart upload and upload one or more parts, to stop + * being charged for storing the uploaded parts, you must either complete or abort + * the multipart upload. Amazon S3 frees up the space used to store the parts and + * stops charging you for storing them only after you either complete or abort a + * multipart upload. + * + * If you have configured a lifecycle rule to abort incomplete multipart uploads, + * the created multipart upload must be completed within the number of days + * specified in the bucket lifecycle configuration. Otherwise, the incomplete + * multipart upload becomes eligible for an abort action and Amazon S3 aborts the + * multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. + * + * ``` + * - Directory buckets - S3 Lifecycle is not supported by directory buckets. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the + * Amazon S3 User Guide. + * ``` + * + * Request signing For request signing, multipart upload is just a series of + * regular requests. You initiate a multipart upload, send one or more requests to + * upload parts, and then complete the multipart upload process. You sign each + * request individually. There is nothing special about signing multipart upload + * requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - To perform a multipart upload with + * encryption using an Key Management Service (KMS) KMS key, the requester must + * have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. + * The requester must also have permissions for the kms:GenerateDataKey action + * for the CreateMultipartUpload API. Then, the requester needs permissions for + * the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These + * permissions are required because Amazon S3 must decrypt and read data from the + * encrypted file parts before it completes the multipart upload. For more + * information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * Encryption + * + * ``` + * - General purpose buckets - Server-side encryption is for data encryption at + * rest. Amazon S3 encrypts your data as it writes it to disks in its data centers + * and decrypts it when you access it. Amazon S3 automatically encrypts all new + * objects that are uploaded to an S3 bucket. When doing a multipart upload, if you + * don't specify encryption information in your request, the encryption setting of + * the uploaded parts is set to the default encryption configuration of the + * destination bucket. By default, all buckets have a base level of encryption + * configuration that uses server-side encryption with Amazon S3 managed keys + * (SSE-S3). If the destination bucket has a default encryption configuration that + * uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), + * or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding + * KMS key, or a customer-provided key to encrypt the uploaded parts. When you + * perform a CreateMultipartUpload operation, if you want to use a different type + * of encryption setting for the uploaded parts, you can request that Amazon S3 + * encrypts the object with a different encryption key (such as an Amazon S3 + * managed key, a KMS key, or a customer-provided key). When the encryption setting + * in your request is different from the default encryption configuration of the + * destination bucket, the encryption setting in your request takes precedence. If + * you choose to provide your own encryption key, the request headers you provide + * in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload + * request. + * + * - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( + * aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) + * – If you want Amazon Web Services to manage the keys used to encrypt data, + * specify the following headers in the request. + * + * - x-amz-server-side-encryption + * + * - x-amz-server-side-encryption-aws-kms-key-id + * + * - x-amz-server-side-encryption-context + * + * - If you specify x-amz-server-side-encryption:aws:kms , but don't provide + * x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + * Services managed key ( aws/s3 key) in KMS to protect the data. + * + * - To perform a multipart upload with encryption by using an Amazon Web + * Services KMS key, the requester must have permission to the kms:Decrypt and + * kms:GenerateDataKey* actions on the key. These permissions are required + * because Amazon S3 must decrypt and read data from the encrypted file parts + * before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in + * the Amazon S3 User Guide. + * + * - If your Identity and Access Management (IAM) user or role is in the same + * Amazon Web Services account as the KMS key, then you must have these permissions + * on the key policy. If your IAM user or role is in a different account from the + * key, then you must have the permissions on both the key policy and your IAM user + * or role. + * + * - All GET and PUT requests for an object protected by KMS fail if you don't + * make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), + * or Signature Version 4. For information about configuring any of the officially + * supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the + * Amazon S3 User Guide. + * ``` + * + * For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys] + * + * ``` + * in the Amazon S3 User Guide. + * + * - Use customer-provided encryption keys (SSE-C) – If you want to manage your + * own encryption keys, provide all the following headers in the request. + * + * - x-amz-server-side-encryption-customer-algorithm + * + * - x-amz-server-side-encryption-customer-key + * + * - x-amz-server-side-encryption-customer-key-MD5 + * ``` + * + * For more information about server-side encryption with customer-provided + * + * ``` + * encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide. + * + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: server-side encryption with Amazon S3 + * managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + * (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + * the desired encryption configuration and you don't override the bucket default + * encryption in your CreateSession requests or PUT object requests. Then, new + * objects are automatically encrypted with the desired encryption settings. For + * more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + * the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + * ``` + * + * In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + * + * ``` + * encryption request headers must match the encryption settings that are specified + * in the CreateSession request. You can't override the values of the encryption + * settings ( x-amz-server-side-encryption , + * x-amz-server-side-encryption-aws-kms-key-id , + * x-amz-server-side-encryption-context , and + * x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + * CreateSession request. You don't need to explicitly specify these encryption + * settings values in Zonal endpoint API calls, and Amazon S3 will use the + * encryption settings values from the CreateSession request to protect new + * objects in the directory bucket. + * ``` + * + * When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + * + * ``` + * session token refreshes automatically to avoid service interruptions when a + * session expires. The CLI or the Amazon Web Services SDKs use the bucket's + * default encryption configuration for the CreateSession request. It's not + * supported to override the encryption settings values in the CreateSession + * request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + * request headers must match the default encryption configuration of the directory + * bucket. + * ``` + * + * For directory buckets, when you perform a CreateMultipartUpload operation and an + * + * ``` + * UploadPartCopy operation, the request headers you provide in the + * CreateMultipartUpload request must match the default encryption configuration + * of the destination bucket. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to CreateMultipartUpload : + * + * [UploadPart] + * + * [CompleteMultipartUpload] + * + * [AbortMultipartUpload] + * + * [ListParts] + * + * [ListMultipartUploads] + * + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + * [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html + * [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html + * [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html + * [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + * + * [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + * [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + */ + createMultipartUpload(ctx: context.Context, params: CreateMultipartUploadInput, ...optFns: ((_arg0: Options) => void)[]): (CreateMultipartUploadOutput) + } + interface Client { + /** + * Creates a session that establishes temporary security credentials to support + * fast authentication and authorization for the Zonal endpoint API operations on + * directory buckets. For more information about Zonal endpoint API operations that + * include the Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 + * User Guide. + * + * To make Zonal endpoint API requests on a directory bucket, use the CreateSession + * API operation. Specifically, you grant s3express:CreateSession permission to a + * bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM + * credentials to make the CreateSession API request on the bucket, which returns + * temporary security credentials that include the access key ID, secret access + * key, session token, and expiration. These credentials have associated + * permissions to access the Zonal endpoint API operations. After the session is + * created, you don’t need to use other policies to grant permissions to each Zonal + * endpoint API individually. Instead, in your Zonal endpoint API requests, you + * sign your requests by applying the temporary security credentials of the session + * to the request headers and following the SigV4 protocol for authentication. You + * also apply the session token to the x-amz-s3session-token request header for + * authorization. Temporary security credentials are scoped to the bucket and + * expire after 5 minutes. After the expiration time, any calls that you make with + * those credentials will fail. You must use IAM credentials again to make a + * CreateSession API request that generates a new set of temporary credentials for + * use. Temporary credentials cannot be extended or refreshed beyond the original + * specified interval. + * + * If you use Amazon Web Services SDKs, SDKs handle the session token refreshes + * automatically to avoid service interruptions when a session expires. We + * recommend that you use the Amazon Web Services SDKs to initiate and manage + * requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3 + * User Guide. + * + * ``` + * - You must make requests for this API operation to the Zonal endpoint. These + * endpoints support virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests + * are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User Guide. + * + * - CopyObject API operation - Unlike other Zonal endpoint API operations, the + * CopyObject API operation doesn't use the temporary security credentials + * returned from the CreateSession API operation for authentication and + * authorization. For information about authentication and authorization of the + * CopyObject API operation on directory buckets, see [CopyObject]. + * + * - HeadBucket API operation - Unlike other Zonal endpoint API operations, the + * HeadBucket API operation doesn't use the temporary security credentials + * returned from the CreateSession API operation for authentication and + * authorization. For information about authentication and authorization of the + * HeadBucket API operation on directory buckets, see [HeadBucket]. + * ``` + * + * Permissions To obtain temporary security credentials, you must create a bucket + * policy or an IAM identity-based policy that grants s3express:CreateSession + * permission to the bucket. In a policy, you can have the s3express:SessionMode + * condition key to control who can create a ReadWrite or ReadOnly session. For + * more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode] + * x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3 + * User Guide. + * + * To grant cross-account access to Zonal endpoint API operations, the bucket + * policy should also grant both accounts the s3express:CreateSession permission. + * + * If you want to encrypt objects with SSE-KMS, you must also have the + * kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based + * policies and KMS key policies for the target KMS key. + * + * Encryption For directory buckets, there are only two supported options for + * server-side encryption: server-side encryption with Amazon S3 managed keys + * (SSE-S3) ( AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms + * ). We recommend that the bucket's default encryption uses the desired encryption + * configuration and you don't override the bucket default encryption in your + * CreateSession requests or PUT object requests. Then, new objects are + * automatically encrypted with the desired encryption settings. For more + * information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the + * encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + * + * For [Zonal endpoint (object-level) API operations] except [CopyObject] and [UploadPartCopy], you authenticate and authorize requests through [CreateSession] for low + * latency. To encrypt new objects in a directory bucket with SSE-KMS, you must + * specify SSE-KMS as the directory bucket's default encryption configuration with + * a KMS key (specifically, a [customer managed key]). Then, when a session is created for Zonal + * endpoint API operations, new objects are automatically encrypted and decrypted + * with SSE-KMS and S3 Bucket Keys during the session. + * + * Only 1 [customer managed key] is supported per directory bucket for the lifetime of the bucket. [Amazon Web Services managed key] ( + * aws/s3 ) isn't supported. After you specify SSE-KMS as your bucket's default + * encryption configuration with a customer managed key, you can't change the + * customer managed key for the bucket's SSE-KMS configuration. + * + * In the Zonal endpoint API calls (except [CopyObject] and [UploadPartCopy]) using the REST API, you can't + * override the values of the encryption settings ( x-amz-server-side-encryption , + * x-amz-server-side-encryption-aws-kms-key-id , + * x-amz-server-side-encryption-context , and + * x-amz-server-side-encryption-bucket-key-enabled ) from the CreateSession + * request. You don't need to explicitly specify these encryption settings values + * in Zonal endpoint API calls, and Amazon S3 will use the encryption settings + * values from the CreateSession request to protect new objects in the directory + * bucket. + * + * When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + * session token refreshes automatically to avoid service interruptions when a + * session expires. The CLI or the Amazon Web Services SDKs use the bucket's + * default encryption configuration for the CreateSession request. It's not + * supported to override the encryption settings values in the CreateSession + * request. Also, in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), it's not + * supported to override the values of the encryption settings from the + * CreateSession request. + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + * [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html + * [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + * [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html + * [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + * [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters + * [Zonal endpoint (object-level) API operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-differences.html#s3-express-differences-api-operations + */ + createSession(ctx: context.Context, params: CreateSessionInput, ...optFns: ((_arg0: Options) => void)[]): (CreateSessionOutput) + } + interface Client { + /** + * Deletes the S3 bucket. All objects (including all object versions and delete + * markers) in the bucket must be deleted before the bucket itself can be deleted. + * + * ``` + * - Directory buckets - If multipart uploads in a directory bucket are in + * progress, you can't delete the bucket until all the in-progress multipart + * uploads are aborted or completed. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Regional endpoint. These endpoints support path-style + * requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in + * the Amazon S3 User Guide. + * ``` + * + * Permissions + * + * ``` + * - General purpose bucket permissions - You must have the s3:DeleteBucket + * permission on the specified bucket in a policy. + * + * - Directory bucket permissions - You must have the s3express:DeleteBucket + * permission in an IAM identity-based policy instead of a bucket policy. + * Cross-account access to this API operation isn't supported. This operation can + * only be performed by the Amazon Web Services account that owns the resource. For + * more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the + * Amazon S3 User Guide. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * The following operations are related to DeleteBucket : + * + * [CreateBucket] + * + * [DeleteObject] + * + * [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + deleteBucket(ctx: context.Context, params: DeleteBucketInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes an analytics configuration for the bucket (specified by the analytics + * configuration ID). + * + * To use this operation, you must have permissions to perform the + * s3:PutAnalyticsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. + * + * The following operations are related to DeleteBucketAnalyticsConfiguration : + * + * [GetBucketAnalyticsConfiguration] + * + * [ListBucketAnalyticsConfigurations] + * + * [PutBucketAnalyticsConfiguration] + * + * [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html + * [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html + * [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + deleteBucketAnalyticsConfiguration(ctx: context.Context, params: DeleteBucketAnalyticsConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketAnalyticsConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes the cors configuration information set for the bucket. + * + * To use this operation, you must have permission to perform the s3:PutBucketCORS + * action. The bucket owner has this permission by default and can grant this + * permission to others. + * + * For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. + * + * # Related Resources + * + * [PutBucketCors] + * + * [RESTOPTIONSobject] + * + * [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + * [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + * [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html + */ + deleteBucketCors(ctx: context.Context, params: DeleteBucketCorsInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketCorsOutput) + } + interface Client { + /** + * This implementation of the DELETE action resets the default encryption for the + * bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). + * + * ``` + * - General purpose buckets - For information about the bucket default + * encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. + * + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: SSE-S3 and SSE-KMS. For information about + * the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. + * ``` + * + * Permissions + * + * ``` + * - General purpose bucket permissions - The s3:PutEncryptionConfiguration + * permission is required in a policy. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * - Directory bucket permissions - To grant access to this API operation, you + * must have the s3express:PutEncryptionConfiguration permission in an IAM + * identity-based policy instead of a bucket policy. Cross-account access to this + * API operation isn't supported. This operation can only be performed by the + * Amazon Web Services account that owns the resource. For more information about + * directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * The following operations are related to DeleteBucketEncryption : + * + * [PutBucketEncryption] + * + * [GetBucketEncryption] + * + * [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html + * [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html + * [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html + * [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + deleteBucketEncryption(ctx: context.Context, params: DeleteBucketEncryptionInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketEncryptionOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes the S3 Intelligent-Tiering configuration from the specified bucket. + * + * The S3 Intelligent-Tiering storage class is designed to optimize storage costs + * by automatically moving data to the most cost-effective storage access tier, + * without performance impact or operational overhead. S3 Intelligent-Tiering + * delivers automatic cost savings in three low latency and high throughput access + * tiers. To get the lowest storage cost on data that can be accessed in minutes to + * hours, you can choose to activate additional archiving capabilities. + * + * The S3 Intelligent-Tiering storage class is the ideal storage class for data + * with unknown, changing, or unpredictable access patterns, independent of object + * size or retention period. If the size of an object is less than 128 KB, it is + * not monitored and not eligible for auto-tiering. Smaller objects can be stored, + * but they are always charged at the Frequent Access tier rates in the S3 + * Intelligent-Tiering storage class. + * + * For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. + * + * Operations related to DeleteBucketIntelligentTieringConfiguration include: + * + * [GetBucketIntelligentTieringConfiguration] + * + * [PutBucketIntelligentTieringConfiguration] + * + * [ListBucketIntelligentTieringConfigurations] + * + * [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html + * [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html + * [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html + * [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + */ + deleteBucketIntelligentTieringConfiguration(ctx: context.Context, params: DeleteBucketIntelligentTieringConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketIntelligentTieringConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes an inventory configuration (identified by the inventory ID) from the + * bucket. + * + * To use this operation, you must have permissions to perform the + * s3:PutInventoryConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. + * + * Operations related to DeleteBucketInventoryConfiguration include: + * + * [GetBucketInventoryConfiguration] + * + * [PutBucketInventoryConfiguration] + * + * [ListBucketInventoryConfigurations] + * + * [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + * [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html + * [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html + */ + deleteBucketInventoryConfiguration(ctx: context.Context, params: DeleteBucketInventoryConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketInventoryConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes the lifecycle configuration from the specified bucket. Amazon S3 + * removes all the lifecycle configuration rules in the lifecycle subresource + * associated with the bucket. Your objects never expire, and Amazon S3 no longer + * automatically deletes any objects on the basis of rules contained in the deleted + * lifecycle configuration. + * + * To use this operation, you must have permission to perform the + * s3:PutLifecycleConfiguration action. By default, the bucket owner has this + * permission and the bucket owner can grant this permission to others. + * + * There is usually some time lag before lifecycle configuration deletion is fully + * propagated to all the Amazon S3 systems. + * + * For more information about the object expiration, see [Elements to Describe Lifecycle Actions]. + * + * Related actions include: + * + * [PutBucketLifecycleConfiguration] + * + * [GetBucketLifecycleConfiguration] + * + * [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + * [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions + * [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + */ + deleteBucketLifecycle(ctx: context.Context, params: DeleteBucketLifecycleInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketLifecycleOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes a metrics configuration for the Amazon CloudWatch request metrics + * (specified by the metrics configuration ID) from the bucket. Note that this + * doesn't include the daily storage metrics. + * + * To use this operation, you must have permissions to perform the + * s3:PutMetricsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. + * + * The following operations are related to DeleteBucketMetricsConfiguration : + * + * [GetBucketMetricsConfiguration] + * + * [PutBucketMetricsConfiguration] + * + * [ListBucketMetricsConfigurations] + * + * [Monitoring Metrics with Amazon CloudWatch] + * + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + * [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html + * [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html + * [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + deleteBucketMetricsConfiguration(ctx: context.Context, params: DeleteBucketMetricsConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketMetricsConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you + * must have the s3:PutBucketOwnershipControls permission. For more information + * about Amazon S3 permissions, see [Specifying Permissions in a Policy]. + * + * For information about Amazon S3 Object Ownership, see [Using Object Ownership]. + * + * The following operations are related to DeleteBucketOwnershipControls : + * + * # GetBucketOwnershipControls + * + * # PutBucketOwnershipControls + * + * [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html + * [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + */ + deleteBucketOwnershipControls(ctx: context.Context, params: DeleteBucketOwnershipControlsInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketOwnershipControlsOutput) + } + interface Client { + /** + * Deletes the policy of a specified bucket. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Regional endpoint. These endpoints support path-style requests + * in the format https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in + * the Amazon S3 User Guide. + * + * Permissions If you are using an identity other than the root user of the Amazon + * Web Services account that owns the bucket, the calling identity must both have + * the DeleteBucketPolicy permissions on the specified bucket and belong to the + * bucket owner's account in order to use this operation. + * + * If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 + * Access Denied error. If you have the correct permissions, but you're not using + * an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 + * Method Not Allowed error. + * + * To ensure that bucket owners don't inadvertently lock themselves out of their + * own buckets, the root principal in a bucket owner's Amazon Web Services account + * can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API + * actions, even if their bucket policy explicitly denies the root principal's + * access. Bucket owner root principals can only be blocked from performing these + * API actions by VPC endpoint policies and Amazon Web Services Organizations + * policies. + * + * ``` + * - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is + * required in a policy. For more information about general purpose buckets bucket + * policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation, you + * must have the s3express:DeleteBucketPolicy permission in an IAM identity-based + * policy instead of a bucket policy. Cross-account access to this API operation + * isn't supported. This operation can only be performed by the Amazon Web Services + * account that owns the resource. For more information about directory bucket + * policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * # The following operations are related to DeleteBucketPolicy + * + * [CreateBucket] + * + * [DeleteObject] + * + * [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + deleteBucketPolicy(ctx: context.Context, params: DeleteBucketPolicyInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketPolicyOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes the replication configuration from the bucket. + * + * To use this operation, you must have permissions to perform the + * s3:PutReplicationConfiguration action. The bucket owner has these permissions by + * default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] + * and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * It can take a while for the deletion of a replication configuration to fully + * propagate. + * + * For information about replication configuration, see [Replication] in the Amazon S3 User + * Guide. + * + * The following operations are related to DeleteBucketReplication : + * + * [PutBucketReplication] + * + * [GetBucketReplication] + * + * [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html + * [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + deleteBucketReplication(ctx: context.Context, params: DeleteBucketReplicationInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketReplicationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Deletes the tags from the bucket. + * + * To use this operation, you must have permission to perform the + * s3:PutBucketTagging action. By default, the bucket owner has this permission and + * can grant this permission to others. + * + * The following operations are related to DeleteBucketTagging : + * + * [GetBucketTagging] + * + * [PutBucketTagging] + * + * [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html + * [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html + */ + deleteBucketTagging(ctx: context.Context, params: DeleteBucketTaggingInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketTaggingOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * This action removes the website configuration for a bucket. Amazon S3 returns a + * 200 OK response upon successfully deleting a website configuration on the + * specified bucket. You will get a 200 OK response if the website configuration + * you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 + * response if the bucket specified in the request does not exist. + * + * This DELETE action requires the S3:DeleteBucketWebsite permission. By default, + * only the bucket owner can delete the website configuration attached to a bucket. + * However, bucket owners can grant other users permission to delete the website + * configuration by writing a bucket policy granting them the + * S3:DeleteBucketWebsite permission. + * + * For more information about hosting websites, see [Hosting Websites on Amazon S3]. + * + * The following operations are related to DeleteBucketWebsite : + * + * [GetBucketWebsite] + * + * [PutBucketWebsite] + * + * [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html + * [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html + * [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + */ + deleteBucketWebsite(ctx: context.Context, params: DeleteBucketWebsiteInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteBucketWebsiteOutput) + } + interface Client { + /** + * Removes an object from a bucket. The behavior depends on the bucket's + * versioning state: + * + * ``` + * - If bucket versioning is not enabled, the operation permanently deletes the + * object. + * + * - If bucket versioning is enabled, the operation inserts a delete marker, + * which becomes the current version of the object. To permanently delete an object + * in a versioned bucket, you must include the object’s versionId in the request. + * For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket]. + * + * - If bucket versioning is suspended, the operation removes the object that + * has a null versionId , if there is one, and inserts a delete marker that + * becomes the current version of the object. If there isn't an object with a null + * versionId , and all versions of the object have a versionId , Amazon S3 does + * not remove the object and only inserts a delete marker. To permanently delete an + * object that has a versionId , you must include the object’s versionId in the + * request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets]. + * + * - Directory buckets - S3 Versioning isn't enabled and supported for directory + * buckets. For this API operation, only the null value of the version ID is + * supported by directory buckets. You can only specify null to the versionId + * query parameter in the request. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the + * Amazon S3 User Guide. + * ``` + * + * To remove a specific version, you must use the versionId query parameter. Using + * this query parameter permanently deletes the version. If the object deleted is a + * delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. + * + * If the object you want to delete is in a bucket where the bucket versioning + * configuration is MFA Delete enabled, you must include the x-amz-mfa request + * header in the DELETE versionId request. Requests that include x-amz-mfa must + * use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User + * Guide. To see sample requests that use versioning, see [Sample Request]. + * + * Directory buckets - MFA delete is not supported by directory buckets. + * + * You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to + * enable Amazon S3 to remove them for you. If you want to block users or accounts + * from removing or deleting objects from your bucket, you must deny them the + * s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration + * actions. + * + * Directory buckets - S3 Lifecycle is not supported by directory buckets. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - The following permissions are required + * in your policies when your DeleteObjects request includes specific headers. + * + * - s3:DeleteObject - To delete an object from a bucket, you must always have + * the s3:DeleteObject permission. + * + * - s3:DeleteObjectVersion - To delete a specific version of an object from a + * versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following action is related to DeleteObject : + * + * [PutObject] + * + * [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html + * [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html + */ + deleteObject(ctx: context.Context, params: DeleteObjectInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteObjectOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Removes the entire tag set from the specified object. For more information + * about managing object tags, see [Object Tagging]. + * + * To use this operation, you must have permission to perform the + * s3:DeleteObjectTagging action. + * + * To delete tags of a specific object version, add the versionId query parameter + * in the request. You will need permission for the s3:DeleteObjectVersionTagging + * action. + * + * The following operations are related to DeleteObjectTagging : + * + * [PutObjectTagging] + * + * [GetObjectTagging] + * + * [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html + * [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html + * [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + */ + deleteObjectTagging(ctx: context.Context, params: DeleteObjectTaggingInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteObjectTaggingOutput) + } + interface Client { + /** + * This operation enables you to delete multiple objects from a bucket using a + * single HTTP request. If you know the object keys that you want to delete, then + * this operation provides a suitable alternative to sending individual delete + * requests, reducing per-request overhead. + * + * The request can contain a list of up to 1000 keys that you want to delete. In + * the XML, you provide the object key names, and optionally, version IDs if you + * want to delete a specific version of the object from a versioning-enabled + * bucket. For each key, Amazon S3 performs a delete operation and returns the + * result of that delete, success or failure, in the response. Note that if the + * object specified in the request is not found, Amazon S3 returns the result as + * deleted. + * + * ``` + * - Directory buckets - S3 Versioning isn't enabled and supported for directory + * buckets. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the + * Amazon S3 User Guide. + * ``` + * + * The operation supports two modes for the response: verbose and quiet. By + * default, the operation uses verbose mode in which the response includes the + * result of deletion of each key in your request. In quiet mode the response + * includes only keys where the delete operation encountered an error. For a + * successful deletion in a quiet mode, the operation does not return any + * information about the delete in the response body. + * + * When performing this action on an MFA Delete enabled bucket, that attempts to + * delete any versioned objects, you must include an MFA token. If you do not + * provide one, the entire request will fail, even if there are non-versioned + * objects you are trying to delete. If you provide an invalid token, whether there + * are versioned keys in the request or not, the entire Multi-Object Delete request + * will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide. + * + * Directory buckets - MFA delete is not supported by directory buckets. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - The following permissions are required + * in your policies when your DeleteObjects request includes specific headers. + * + * - s3:DeleteObject - To delete an object from a bucket, you must always specify + * the s3:DeleteObject permission. + * + * - s3:DeleteObjectVersion - To delete a specific version of an object from a + * versioning-enabled bucket, you must specify the s3:DeleteObjectVersion + * permission. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * Content-MD5 request header + * + * ``` + * - General purpose bucket - The Content-MD5 request header is required for all + * Multi-Object Delete requests. Amazon S3 uses the header value to ensure that + * your request body has not been altered in transit. + * + * - Directory bucket - The Content-MD5 request header or a additional checksum + * request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , + * x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all + * Multi-Object Delete requests. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to DeleteObjects : + * + * [CreateMultipartUpload] + * + * [UploadPart] + * + * [CompleteMultipartUpload] + * + * [ListParts] + * + * [AbortMultipartUpload] + * + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete + * [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + */ + deleteObjects(ctx: context.Context, params: DeleteObjectsInput, ...optFns: ((_arg0: Options) => void)[]): (DeleteObjectsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use + * this operation, you must have the s3:PutBucketPublicAccessBlock permission. For + * more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * The following operations are related to DeletePublicAccessBlock : + * + * [Using Amazon S3 Block Public Access] + * + * [GetPublicAccessBlock] + * + * [PutPublicAccessBlock] + * + * [GetBucketPolicyStatus] + * + * [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + * [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html + */ + deletePublicAccessBlock(ctx: context.Context, params: DeletePublicAccessBlockInput, ...optFns: ((_arg0: Options) => void)[]): (DeletePublicAccessBlockOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * This implementation of the GET action uses the accelerate subresource to return + * the Transfer Acceleration state of a bucket, which is either Enabled or + * Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that + * enables you to perform faster data transfers to and from Amazon S3. + * + * To use this operation, you must have permission to perform the + * s3:GetAccelerateConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. + * + * You set the Transfer Acceleration state of an existing bucket to Enabled or + * Suspended by using the [PutBucketAccelerateConfiguration] operation. + * + * A GET accelerate request does not return a state value for a bucket that has no + * transfer acceleration state. A bucket has no Transfer Acceleration state if a + * state has never been set on the bucket. + * + * For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User + * Guide. + * + * The following operations are related to GetBucketAccelerateConfiguration : + * + * [PutBucketAccelerateConfiguration] + * + * [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + */ + getBucketAccelerateConfiguration(ctx: context.Context, params: GetBucketAccelerateConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketAccelerateConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * This implementation of the GET action uses the acl subresource to return the + * access control list (ACL) of a bucket. To use GET to return the ACL of the + * bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission + * is granted to the anonymous user, you can return the ACL of the bucket without + * using an authorization header. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * requests to read ACLs are still supported and return the + * bucket-owner-full-control ACL with the owner being the account that created the + * bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. + * + * The following operations are related to GetBucketAcl : + * + * [ListObjects] + * + * [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + */ + getBucketAcl(ctx: context.Context, params: GetBucketAclInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketAclOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * This implementation of the GET action returns an analytics configuration + * (identified by the analytics configuration ID) from the bucket. + * + * To use this operation, you must have permissions to perform the + * s3:GetAnalyticsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. + * + * For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User + * Guide. + * + * The following operations are related to GetBucketAnalyticsConfiguration : + * + * [DeleteBucketAnalyticsConfiguration] + * + * [ListBucketAnalyticsConfigurations] + * + * [PutBucketAnalyticsConfiguration] + * + * [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + * [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html + * [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + getBucketAnalyticsConfiguration(ctx: context.Context, params: GetBucketAnalyticsConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketAnalyticsConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the Cross-Origin Resource Sharing (CORS) configuration information set + * for the bucket. + * + * To use this operation, you must have permission to perform the s3:GetBucketCORS + * action. By default, the bucket owner has this permission and can grant it to + * others. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * For more information about CORS, see [Enabling Cross-Origin Resource Sharing]. + * + * The following operations are related to GetBucketCors : + * + * [PutBucketCors] + * + * [DeleteBucketCors] + * + * [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + * [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html + */ + getBucketCors(ctx: context.Context, params: GetBucketCorsInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketCorsOutput) + } + interface Client { + /** + * Returns the default encryption configuration for an Amazon S3 bucket. By + * default, all buckets have a default encryption configuration that uses + * server-side encryption with Amazon S3 managed keys (SSE-S3). + * + * ``` + * - General purpose buckets - For information about the bucket default + * encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. + * + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: SSE-S3 and SSE-KMS. For information about + * the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. + * ``` + * + * Permissions + * + * ``` + * - General purpose bucket permissions - The s3:GetEncryptionConfiguration + * permission is required in a policy. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * - Directory bucket permissions - To grant access to this API operation, you + * must have the s3express:GetEncryptionConfiguration permission in an IAM + * identity-based policy instead of a bucket policy. Cross-account access to this + * API operation isn't supported. This operation can only be performed by the + * Amazon Web Services account that owns the resource. For more information about + * directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * The following operations are related to GetBucketEncryption : + * + * [PutBucketEncryption] + * + * [DeleteBucketEncryption] + * + * [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html + * [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html + * [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html + * [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + getBucketEncryption(ctx: context.Context, params: GetBucketEncryptionInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketEncryptionOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Gets the S3 Intelligent-Tiering configuration from the specified bucket. + * + * The S3 Intelligent-Tiering storage class is designed to optimize storage costs + * by automatically moving data to the most cost-effective storage access tier, + * without performance impact or operational overhead. S3 Intelligent-Tiering + * delivers automatic cost savings in three low latency and high throughput access + * tiers. To get the lowest storage cost on data that can be accessed in minutes to + * hours, you can choose to activate additional archiving capabilities. + * + * The S3 Intelligent-Tiering storage class is the ideal storage class for data + * with unknown, changing, or unpredictable access patterns, independent of object + * size or retention period. If the size of an object is less than 128 KB, it is + * not monitored and not eligible for auto-tiering. Smaller objects can be stored, + * but they are always charged at the Frequent Access tier rates in the S3 + * Intelligent-Tiering storage class. + * + * For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. + * + * Operations related to GetBucketIntelligentTieringConfiguration include: + * + * [DeleteBucketIntelligentTieringConfiguration] + * + * [PutBucketIntelligentTieringConfiguration] + * + * [ListBucketIntelligentTieringConfigurations] + * + * [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html + * [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html + * [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + * [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html + */ + getBucketIntelligentTieringConfiguration(ctx: context.Context, params: GetBucketIntelligentTieringConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketIntelligentTieringConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns an inventory configuration (identified by the inventory configuration + * ID) from the bucket. + * + * To use this operation, you must have permissions to perform the + * s3:GetInventoryConfiguration action. The bucket owner has this permission by + * default and can grant this permission to others. For more information about + * permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. + * + * The following operations are related to GetBucketInventoryConfiguration : + * + * [DeleteBucketInventoryConfiguration] + * + * [ListBucketInventoryConfigurations] + * + * [PutBucketInventoryConfiguration] + * + * [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + * [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html + */ + getBucketInventoryConfiguration(ctx: context.Context, params: GetBucketInventoryConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketInventoryConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Bucket lifecycle configuration now supports specifying a lifecycle rule using + * an object key name prefix, one or more object tags, object size, or any + * combination of these. Accordingly, this section describes the latest API. The + * previous version of the API supported filtering based only on an object key name + * prefix, which is supported for backward compatibility. For the related API + * description, see [GetBucketLifecycle]. Accordingly, this section describes the latest API. The + * response describes the new filter element that you can use to specify a filter + * to select a subset of objects to which the rule applies. If you are using a + * previous version of the lifecycle configuration, it still works. For the earlier + * action, + * + * Returns the lifecycle configuration information set on the bucket. For + * information about lifecycle configuration, see [Object Lifecycle Management]. + * + * To use this operation, you must have permission to perform the + * s3:GetLifecycleConfiguration action. The bucket owner has this permission, by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * GetBucketLifecycleConfiguration has the following special error: + * + * ``` + * - Error code: NoSuchLifecycleConfiguration + * + * - Description: The lifecycle configuration does not exist. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * ``` + * + * The following operations are related to GetBucketLifecycleConfiguration : + * + * [GetBucketLifecycle] + * + * [PutBucketLifecycle] + * + * [DeleteBucketLifecycle] + * + * [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html + * [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html + */ + getBucketLifecycleConfiguration(ctx: context.Context, params: GetBucketLifecycleConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketLifecycleConfigurationOutput) + } + // @ts-ignore + import smithyxml = xml + // @ts-ignore + import smithyio = io + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the Region the bucket resides in. You set the bucket's Region using the + * LocationConstraint request parameter in a CreateBucket request. For more + * information, see [CreateBucket]. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For + * backward compatibility, Amazon S3 continues to support GetBucketLocation. + * + * The following operations are related to GetBucketLocation : + * + * [GetObject] + * + * [CreateBucket] + * + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html + */ + getBucketLocation(ctx: context.Context, params: GetBucketLocationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketLocationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the logging status of a bucket and the permissions users have to view + * and modify that status. + * + * The following operations are related to GetBucketLogging : + * + * [CreateBucket] + * + * [PutBucketLogging] + * + * [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + */ + getBucketLogging(ctx: context.Context, params: GetBucketLoggingInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketLoggingOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Gets a metrics configuration (specified by the metrics configuration ID) from + * the bucket. Note that this doesn't include the daily storage metrics. + * + * To use this operation, you must have permissions to perform the + * s3:GetMetricsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. + * + * The following operations are related to GetBucketMetricsConfiguration : + * + * [PutBucketMetricsConfiguration] + * + * [DeleteBucketMetricsConfiguration] + * + * [ListBucketMetricsConfigurations] + * + * [Monitoring Metrics with Amazon CloudWatch] + * + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + * [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html + * [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + * [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + getBucketMetricsConfiguration(ctx: context.Context, params: GetBucketMetricsConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketMetricsConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the notification configuration of a bucket. + * + * If notifications are not enabled on the bucket, the action returns an empty + * NotificationConfiguration element. + * + * By default, you must be the bucket owner to read the notification configuration + * of a bucket. However, the bucket owner can use a bucket policy to grant + * permission to other users to read this configuration with the + * s3:GetBucketNotification permission. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * For more information about setting and reading the notification configuration + * on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies]. + * + * The following action is related to GetBucketNotification : + * + * [PutBucketNotification] + * + * [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + * [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html + */ + getBucketNotificationConfiguration(ctx: context.Context, params: GetBucketNotificationConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketNotificationConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you + * must have the s3:GetBucketOwnershipControls permission. For more information + * about Amazon S3 permissions, see [Specifying permissions in a policy]. + * + * For information about Amazon S3 Object Ownership, see [Using Object Ownership]. + * + * The following operations are related to GetBucketOwnershipControls : + * + * # PutBucketOwnershipControls + * + * # DeleteBucketOwnershipControls + * + * [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + * [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html + */ + getBucketOwnershipControls(ctx: context.Context, params: GetBucketOwnershipControlsInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketOwnershipControlsOutput) + } + interface Client { + /** + * Returns the policy of a specified bucket. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Regional endpoint. These endpoints support path-style requests + * in the format https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in + * the Amazon S3 User Guide. + * + * Permissions If you are using an identity other than the root user of the Amazon + * Web Services account that owns the bucket, the calling identity must both have + * the GetBucketPolicy permissions on the specified bucket and belong to the + * bucket owner's account in order to use this operation. + * + * If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access + * Denied error. If you have the correct permissions, but you're not using an + * identity that belongs to the bucket owner's account, Amazon S3 returns a 405 + * Method Not Allowed error. + * + * To ensure that bucket owners don't inadvertently lock themselves out of their + * own buckets, the root principal in a bucket owner's Amazon Web Services account + * can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API + * actions, even if their bucket policy explicitly denies the root principal's + * access. Bucket owner root principals can only be blocked from performing these + * API actions by VPC endpoint policies and Amazon Web Services Organizations + * policies. + * + * ``` + * - General purpose bucket permissions - The s3:GetBucketPolicy permission is + * required in a policy. For more information about general purpose buckets bucket + * policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation, you + * must have the s3express:GetBucketPolicy permission in an IAM identity-based + * policy instead of a bucket policy. Cross-account access to this API operation + * isn't supported. This operation can only be performed by the Amazon Web Services + * account that owns the resource. For more information about directory bucket + * policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. + * ``` + * + * Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] + * in the Amazon S3 User Guide. + * + * Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * The following action is related to GetBucketPolicy : + * + * [GetObject] + * + * [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html + * [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + getBucketPolicy(ctx: context.Context, params: GetBucketPolicyInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketPolicyOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Retrieves the policy status for an Amazon S3 bucket, indicating whether the + * bucket is public. In order to use this operation, you must have the + * s3:GetBucketPolicyStatus permission. For more information about Amazon S3 + * permissions, see [Specifying Permissions in a Policy]. + * + * For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"]. + * + * The following operations are related to GetBucketPolicyStatus : + * + * [Using Amazon S3 Block Public Access] + * + * [GetPublicAccessBlock] + * + * [PutPublicAccessBlock] + * + * [DeletePublicAccessBlock] + * + * [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + * [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html + * [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html + * [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + * [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + * [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + */ + getBucketPolicyStatus(ctx: context.Context, params: GetBucketPolicyStatusInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketPolicyStatusOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the replication configuration of a bucket. + * + * It can take a while to propagate the put or delete a replication configuration + * to all Amazon S3 systems. Therefore, a get request soon after put or delete can + * return a wrong result. + * + * For information about replication configuration, see [Replication] in the Amazon S3 User + * Guide. + * + * This action requires permissions for the s3:GetReplicationConfiguration action. + * For more information about permissions, see [Using Bucket Policies and User Policies]. + * + * If you include the Filter element in a replication configuration, you must also + * include the DeleteMarkerReplication and Priority elements. The response also + * returns those elements. + * + * For information about GetBucketReplication errors, see [List of replication-related error codes] + * + * The following operations are related to GetBucketReplication : + * + * [PutBucketReplication] + * + * [DeleteBucketReplication] + * + * [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html + * [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + * [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + * [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList + * [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html + */ + getBucketReplication(ctx: context.Context, params: GetBucketReplicationInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketReplicationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the request payment configuration of a bucket. To use this version of + * the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets]. + * + * The following operations are related to GetBucketRequestPayment : + * + * [ListObjects] + * + * [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html + * [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html + */ + getBucketRequestPayment(ctx: context.Context, params: GetBucketRequestPaymentInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketRequestPaymentOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the tag set associated with the bucket. + * + * To use this operation, you must have permission to perform the + * s3:GetBucketTagging action. By default, the bucket owner has this permission and + * can grant this permission to others. + * + * GetBucketTagging has the following special error: + * + * ``` + * - Error code: NoSuchTagSet + * + * - Description: There is no tag set associated with the bucket. + * ``` + * + * The following operations are related to GetBucketTagging : + * + * [PutBucketTagging] + * + * [DeleteBucketTagging] + * + * [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html + * [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html + */ + getBucketTagging(ctx: context.Context, params: GetBucketTaggingInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketTaggingOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the versioning state of a bucket. + * + * To retrieve the versioning state of a bucket, you must be the bucket owner. + * + * This implementation also returns the MFA Delete status of the versioning state. + * If the MFA Delete status is enabled , the bucket owner must use an + * authentication device to change the versioning state of the bucket. + * + * The following operations are related to GetBucketVersioning : + * + * [GetObject] + * + * [PutObject] + * + * [DeleteObject] + * + * [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + */ + getBucketVersioning(ctx: context.Context, params: GetBucketVersioningInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketVersioningOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the website configuration for a bucket. To host website on Amazon S3, + * you can configure a bucket as website by adding a website configuration. For + * more information about hosting websites, see [Hosting Websites on Amazon S3]. + * + * This GET action requires the S3:GetBucketWebsite permission. By default, only + * the bucket owner can read the bucket website configuration. However, bucket + * owners can allow other users to read the website configuration by writing a + * bucket policy granting them the S3:GetBucketWebsite permission. + * + * The following operations are related to GetBucketWebsite : + * + * [DeleteBucketWebsite] + * + * [PutBucketWebsite] + * + * [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html + * [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + * [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html + */ + getBucketWebsite(ctx: context.Context, params: GetBucketWebsiteInput, ...optFns: ((_arg0: Options) => void)[]): (GetBucketWebsiteOutput) + } + interface Client { + /** + * Retrieves an object from Amazon S3. + * + * In the GetObject request, specify the full key name for the object. + * + * General purpose buckets - Both the virtual-hosted-style requests and the + * path-style requests are supported. For a virtual hosted-style request example, + * if you have the object photos/2006/February/sample.jpg , specify the object key + * name as /photos/2006/February/sample.jpg . For a path-style request example, if + * you have the object photos/2006/February/sample.jpg in the bucket named + * examplebucket , specify the object key name as + * /examplebucket/photos/2006/February/sample.jpg . For more information about + * request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide. + * + * Directory buckets - Only virtual-hosted-style requests are supported. For a + * virtual hosted-style request example, if you have the object + * photos/2006/February/sample.jpg in the bucket named + * examplebucket--use1-az5--x-s3 , specify the object key name as + * /photos/2006/February/sample.jpg . Also, when you make requests to this API + * operation, your requests are sent to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style + * requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User + * Guide. + * + * Permissions + * ``` + * - General purpose bucket permissions - You must have the required permissions + * in a policy. To use GetObject , you must have the READ access to the object + * (or version). If you grant READ access to the anonymous user, the GetObject + * operation returns the object without using an authorization header. For more + * information, see [Specifying permissions in a policy]in the Amazon S3 User Guide. + * ``` + * + * If you include a versionId in your request header, you must have the + * + * ``` + * s3:GetObjectVersion permission to access a specific version of an object. The + * s3:GetObject permission is not required in this scenario. + * ``` + * + * If you request the current version of an object without a specific versionId in + * + * ``` + * the request header, only the s3:GetObject permission is required. The + * s3:GetObjectVersion permission is not required in this scenario. + * ``` + * + * If the object that you request doesn’t exist, the error that Amazon S3 returns + * + * ``` + * depends on whether you also have the s3:ListBucket permission. + * + * - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an + * HTTP status code 404 Not Found error. + * + * - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP + * status code 403 Access Denied error. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * If the object is encrypted using SSE-KMS, you must also have the + * + * ``` + * kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies + * and KMS key policies for the KMS key. + * ``` + * + * Storage classes If the object you are retrieving is stored in the S3 Glacier + * Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the + * S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep + * Archive Access tier, before you can retrieve the object you must first restore a + * copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For + * information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. + * + * Directory buckets - For directory buckets, only the S3 Express One Zone storage + * class is supported to store newly created objects. Unsupported storage class + * values won't write a destination object and will respond with the HTTP status + * code 400 Bad Request . + * + * Encryption Encryption request headers, like x-amz-server-side-encryption , + * should not be sent for the GetObject requests, if your object uses server-side + * encryption with Amazon S3 managed encryption keys (SSE-S3), server-side + * encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer + * server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you + * include the header in your GetObject requests for the object that uses these + * types of keys, you’ll get an HTTP 400 Bad Request error. + * + * Directory buckets - For directory buckets, there are only two supported options + * for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more + * information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. + * + * Overriding response header values through the request There are times when you + * want to override certain response header values of a GetObject response. For + * example, you might override the Content-Disposition response header value + * through your GetObject request. + * + * You can override values for a set of response headers. These modified response + * header values are included only in a successful response, that is, when the HTTP + * status code 200 OK is returned. The headers you can override using the + * following query parameters in the request are a subset of the headers that + * Amazon S3 accepts when you create an object. + * + * The response headers that you can override for the GetObject response are + * Cache-Control , Content-Disposition , Content-Encoding , Content-Language , + * Content-Type , and Expires . + * + * To override values for a set of response headers in the GetObject response, you + * can use the following query parameters in the request. + * + * ``` + * - response-cache-control + * + * - response-content-disposition + * + * - response-content-encoding + * + * - response-content-language + * + * - response-content-type + * + * - response-expires + * ``` + * + * When you use these parameters, you must sign the request by using either an + * Authorization header or a presigned URL. These parameters cannot be used with an + * unsigned (anonymous) request. + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to GetObject : + * + * [ListBuckets] + * + * [GetObjectAcl] + * + * [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + * [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + * [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket + * [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html + * [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html + * [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + * + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + */ + getObject(ctx: context.Context, params: GetObjectInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the access control list (ACL) of an object. To use this operation, you + * must have s3:GetObjectAcl permissions or READ_ACP access to the object. For + * more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * By default, GET returns ACL information about the current version of an object. + * To return ACL information about a different version, use the versionId + * subresource. + * + * If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * requests to read ACLs are still supported and return the + * bucket-owner-full-control ACL with the owner being the account that created the + * bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. + * + * The following operations are related to GetObjectAcl : + * + * [GetObject] + * + * [GetObjectAttributes] + * + * [DeleteObject] + * + * [PutObject] + * + * [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + * [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping + * [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + */ + getObjectAcl(ctx: context.Context, params: GetObjectAclInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectAclOutput) + } + interface Client { + /** + * Retrieves all the metadata from an object without returning the object itself. + * This operation is useful if you're interested only in an object's metadata. + * + * GetObjectAttributes combines the functionality of HeadObject and ListParts . All + * of the data returned with each of those individual calls can be returned with a + * single call to GetObjectAttributes . + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Zonal endpoint. These endpoints support virtual-hosted-style + * requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style + * requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User + * Guide. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - To use GetObjectAttributes , you must + * have READ access to the object. The permissions that you need to use this + * operation depend on whether the bucket is versioned. If the bucket is versioned, + * you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes + * permissions for this operation. If the bucket is not versioned, you need the + * s3:GetObject and s3:GetObjectAttributes permissions. For more information, see [Specifying Permissions in a Policy] + * in the Amazon S3 User Guide. If the object that you request does not exist, the + * error Amazon S3 returns depends on whether you also have the s3:ListBucket + * permission. + * + * - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an + * HTTP status code 404 Not Found ("no such key") error. + * + * - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP + * status code 403 Forbidden ("access denied") error. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * If the object is encrypted with SSE-KMS, you must also have the + * + * ``` + * kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies + * and KMS key policies for the KMS key. + * ``` + * + * Encryption Encryption request headers, like x-amz-server-side-encryption , + * should not be sent for HEAD requests if your object uses server-side encryption + * with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side + * encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side + * encryption with Amazon S3 managed encryption keys (SSE-S3). The + * x-amz-server-side-encryption header is used when you PUT an object to S3 and + * want to specify the encryption method. If you include this header in a GET + * request for an object that uses these types of keys, you’ll get an HTTP 400 Bad + * Request error. It's because the encryption method can't be changed when you + * retrieve the object. + * + * If you encrypt an object by using server-side encryption with customer-provided + * encryption keys (SSE-C) when you store the object in Amazon S3, then when you + * retrieve the metadata from the object, you must use the following headers to + * provide the encryption key for the server to be able to retrieve the object's + * metadata. The headers are: + * + * ``` + * - x-amz-server-side-encryption-customer-algorithm + * + * - x-amz-server-side-encryption-customer-key + * + * - x-amz-server-side-encryption-customer-key-MD5 + * ``` + * + * For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + * + * Directory bucket permissions - For directory buckets, there are only two + * supported options for server-side encryption: server-side encryption with Amazon + * S3 managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + * (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + * the desired encryption configuration and you don't override the bucket default + * encryption in your CreateSession requests or PUT object requests. Then, new + * objects are automatically encrypted with the desired encryption settings. For + * more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + * the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + * + * Versioning Directory buckets - S3 Versioning isn't enabled and supported for + * directory buckets. For this API operation, only the null value of the version + * ID is supported by directory buckets. You can only specify null to the versionId + * query parameter in the request. + * + * Conditional request headers Consider the following when using request headers: + * + * ``` + * - If both of the If-Match and If-Unmodified-Since headers are present in the + * request as follows, then Amazon S3 returns the HTTP status code 200 OK and the + * data requested: + * + * - If-Match condition evaluates to true . + * + * - If-Unmodified-Since condition evaluates to false . + * ``` + * + * For more information about conditional requests, see [RFC 7232]. + * + * ``` + * - If both of the If-None-Match and If-Modified-Since headers are present in + * the request as follows, then Amazon S3 returns the HTTP status code 304 Not + * Modified : + * + * - If-None-Match condition evaluates to false . + * + * - If-Modified-Since condition evaluates to true . + * ``` + * + * For more information about conditional requests, see [RFC 7232]. + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following actions are related to GetObjectAttributes : + * + * [GetObject] + * + * [GetObjectAcl] + * + * [GetObjectLegalHold] + * + * [GetObjectLockConfiguration] + * + * [GetObjectRetention] + * + * [GetObjectTagging] + * + * [HeadObject] + * + * [ListParts] + * + * [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + * [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + * [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + * [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html + * [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + * [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html + * [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + */ + getObjectAttributes(ctx: context.Context, params: GetObjectAttributesInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectAttributesOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Gets an object's current legal hold status. For more information, see [Locking Objects]. + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * The following action is related to GetObjectLegalHold : + * + * [GetObjectAttributes] + * + * [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + * [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + getObjectLegalHold(ctx: context.Context, params: GetObjectLegalHoldInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectLegalHoldOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Gets the Object Lock configuration for a bucket. The rule specified in the + * Object Lock configuration will be applied by default to every new object placed + * in the specified bucket. For more information, see [Locking Objects]. + * + * The following action is related to GetObjectLockConfiguration : + * + * [GetObjectAttributes] + * + * [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + * [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + getObjectLockConfiguration(ctx: context.Context, params: GetObjectLockConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectLockConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Retrieves an object's retention settings. For more information, see [Locking Objects]. + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * The following action is related to GetObjectRetention : + * + * [GetObjectAttributes] + * + * [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + * [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + getObjectRetention(ctx: context.Context, params: GetObjectRetentionInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectRetentionOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns the tag-set of an object. You send the GET request against the tagging + * subresource associated with the object. + * + * To use this operation, you must have permission to perform the + * s3:GetObjectTagging action. By default, the GET action returns information about + * current version of an object. For a versioned bucket, you can have multiple + * versions of an object in your bucket. To retrieve tags of any other version, use + * the versionId query parameter. You also need permission for the + * s3:GetObjectVersionTagging action. + * + * By default, the bucket owner has this permission and can grant this permission + * to others. + * + * For information about the Amazon S3 object tagging feature, see [Object Tagging]. + * + * The following actions are related to GetObjectTagging : + * + * [DeleteObjectTagging] + * + * [GetObjectAttributes] + * + * [PutObjectTagging] + * + * [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html + * [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html + * [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + * [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html + */ + getObjectTagging(ctx: context.Context, params: GetObjectTaggingInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectTaggingOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns torrent files from a bucket. BitTorrent can save you bandwidth when + * you're distributing large files. + * + * You can get torrent only for objects that are less than 5 GB in size, and that + * are not encrypted using server-side encryption with a customer-provided + * encryption key. + * + * To use GET, you must have READ access to the object. + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * The following action is related to GetObjectTorrent : + * + * [GetObject] + * + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + */ + getObjectTorrent(ctx: context.Context, params: GetObjectTorrentInput, ...optFns: ((_arg0: Options) => void)[]): (GetObjectTorrentOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use + * this operation, you must have the s3:GetBucketPublicAccessBlock permission. For + * more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. + * + * When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an + * object, it checks the PublicAccessBlock configuration for both the bucket (or + * the bucket that contains the object) and the bucket owner's account. If the + * PublicAccessBlock settings are different between the bucket and the account, + * Amazon S3 uses the most restrictive combination of the bucket-level and + * account-level settings. + * + * For more information about when Amazon S3 considers a bucket or an object + * public, see [The Meaning of "Public"]. + * + * The following operations are related to GetPublicAccessBlock : + * + * [Using Amazon S3 Block Public Access] + * + * [PutPublicAccessBlock] + * + * [GetPublicAccessBlock] + * + * [DeletePublicAccessBlock] + * + * [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + * [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html + * [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html + * [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + * [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + * [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + */ + getPublicAccessBlock(ctx: context.Context, params: GetPublicAccessBlockInput, ...optFns: ((_arg0: Options) => void)[]): (GetPublicAccessBlockOutput) + } + // @ts-ignore + import smithytime = time + // @ts-ignore + import smithywaiter = waiter + interface Client { + /** + * You can use this operation to determine if a bucket exists and if you have + * permission to access it. The action returns a 200 OK if the bucket exists and + * you have permission to access it. + * + * If the bucket does not exist or you do not have permission to access it, the + * HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found + * code. A message body is not included, so you cannot determine the exception + * beyond these HTTP response codes. + * + * Authentication and authorization General purpose buckets - Request to public + * buckets that grant the s3:ListBucket permission publicly do not need to be + * signed. All other HeadBucket requests must be authenticated and signed by using + * IAM credentials (access key ID and secret access key for the IAM identities). + * All headers with the x-amz- prefix, including x-amz-copy-source , must be + * signed. For more information, see [REST Authentication]. + * + * Directory buckets - You must use IAM credentials to authenticate and authorize + * your access to the HeadBucket API operation, instead of using the temporary + * security credentials through the CreateSession API operation. + * + * Amazon Web Services CLI or SDKs handles authentication and authorization on + * your behalf. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - To use this operation, you must have + * permissions to perform the s3:ListBucket action. The bucket owner has this + * permission by default and can grant this permission to others. For more + * information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - You must have the s3express:CreateSession + * permission in the Action element of a policy. By default, the session is in + * the ReadWrite mode. If you want to restrict the access, you can explicitly set + * the s3express:SessionMode condition key to ReadOnly on the bucket. + * ``` + * + * For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 + * + * ``` + * User Guide. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * You must make requests for this API operation to the Zonal endpoint. These + * endpoints support virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests + * are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User Guide. + * + * [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html + * [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + * [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + headBucket(ctx: context.Context, params: HeadBucketInput, ...optFns: ((_arg0: Options) => void)[]): (HeadBucketOutput) + } + interface Client { + /** + * The HEAD operation retrieves metadata from an object without returning the + * object itself. This operation is useful if you're interested only in an object's + * metadata. + * + * A HEAD request has the same options as a GET operation on an object. The + * response is identical to the GET response except that there is no response + * body. Because of this, if the HEAD request generates an error, it returns a + * generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 + * Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not + * possible to retrieve the exact exception of these error codes. + * + * Request headers are limited to 8 KB in size. For more information, see [Common Request Headers]. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - To use HEAD , you must have the + * s3:GetObject permission. You need the relevant read object (or version) + * permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3 + * User Guide. For more information about the permissions to S3 API operations by + * S3 resource types, see Required permissions for Amazon S3 API operationsin the Amazon S3 User Guide. + * ``` + * + * If the object you request doesn't exist, the error that Amazon S3 returns + * + * ``` + * depends on whether you also have the s3:ListBucket permission. + * + * - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an + * HTTP status code 404 Not Found error. + * + * - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP + * status code 403 Forbidden error. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * If you enable x-amz-checksum-mode in the request and the object is encrypted + * + * ``` + * with Amazon Web Services Key Management Service (Amazon Web Services KMS), you + * must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM + * identity-based policies and KMS key policies for the KMS key to retrieve the + * checksum of the object. + * ``` + * + * Encryption Encryption request headers, like x-amz-server-side-encryption , + * should not be sent for HEAD requests if your object uses server-side encryption + * with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side + * encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side + * encryption with Amazon S3 managed encryption keys (SSE-S3). The + * x-amz-server-side-encryption header is used when you PUT an object to S3 and + * want to specify the encryption method. If you include this header in a HEAD + * request for an object that uses these types of keys, you’ll get an HTTP 400 Bad + * Request error. It's because the encryption method can't be changed when you + * retrieve the object. + * + * If you encrypt an object by using server-side encryption with customer-provided + * encryption keys (SSE-C) when you store the object in Amazon S3, then when you + * retrieve the metadata from the object, you must use the following headers to + * provide the encryption key for the server to be able to retrieve the object's + * metadata. The headers are: + * + * ``` + * - x-amz-server-side-encryption-customer-algorithm + * + * - x-amz-server-side-encryption-customer-key + * + * - x-amz-server-side-encryption-customer-key-MD5 + * ``` + * + * For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + * + * Directory bucket - For directory buckets, there are only two supported options + * for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more + * information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. + * + * Versioning + * + * ``` + * - If the current version of the object is a delete marker, Amazon S3 behaves + * as if the object was deleted and includes x-amz-delete-marker: true in the + * response. + * + * - If the specified version is a delete marker, the response returns a 405 + * Method Not Allowed error and the Last-Modified: timestamp response header. + * + * - Directory buckets - Delete marker is not supported by directory buckets. + * + * - Directory buckets - S3 Versioning isn't enabled and supported for directory + * buckets. For this API operation, only the null value of the version ID is + * supported by directory buckets. You can only specify null to the versionId + * query parameter in the request. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * For directory buckets, you must make requests for this API operation to the + * Zonal endpoint. These endpoints support virtual-hosted-style requests in the + * format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon + * S3 User Guide. + * + * The following actions are related to HeadObject : + * + * [GetObject] + * + * [GetObjectAttributes] + * + * [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + * [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html + * + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + */ + headObject(ctx: context.Context, params: HeadObjectInput, ...optFns: ((_arg0: Options) => void)[]): (HeadObjectOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Lists the analytics configurations for the bucket. You can have up to 1,000 + * analytics configurations per bucket. + * + * This action supports list pagination and does not return more than 100 + * configurations at a time. You should always check the IsTruncated element in + * the response. If there are no more configurations to list, IsTruncated is set + * to false. If there are more configurations to list, IsTruncated is set to true, + * and there will be a value in NextContinuationToken . You use the + * NextContinuationToken value to continue the pagination of the list by passing + * the value in continuation-token in the request to GET the next page. + * + * To use this operation, you must have permissions to perform the + * s3:GetAnalyticsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. + * + * The following operations are related to ListBucketAnalyticsConfigurations : + * + * [GetBucketAnalyticsConfiguration] + * + * [DeleteBucketAnalyticsConfiguration] + * + * [PutBucketAnalyticsConfiguration] + * + * [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + * [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html + * [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + listBucketAnalyticsConfigurations(ctx: context.Context, params: ListBucketAnalyticsConfigurationsInput, ...optFns: ((_arg0: Options) => void)[]): (ListBucketAnalyticsConfigurationsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Lists the S3 Intelligent-Tiering configuration from the specified bucket. + * + * The S3 Intelligent-Tiering storage class is designed to optimize storage costs + * by automatically moving data to the most cost-effective storage access tier, + * without performance impact or operational overhead. S3 Intelligent-Tiering + * delivers automatic cost savings in three low latency and high throughput access + * tiers. To get the lowest storage cost on data that can be accessed in minutes to + * hours, you can choose to activate additional archiving capabilities. + * + * The S3 Intelligent-Tiering storage class is the ideal storage class for data + * with unknown, changing, or unpredictable access patterns, independent of object + * size or retention period. If the size of an object is less than 128 KB, it is + * not monitored and not eligible for auto-tiering. Smaller objects can be stored, + * but they are always charged at the Frequent Access tier rates in the S3 + * Intelligent-Tiering storage class. + * + * For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. + * + * Operations related to ListBucketIntelligentTieringConfigurations include: + * + * [DeleteBucketIntelligentTieringConfiguration] + * + * [PutBucketIntelligentTieringConfiguration] + * + * [GetBucketIntelligentTieringConfiguration] + * + * [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html + * [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html + * [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + * [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html + */ + listBucketIntelligentTieringConfigurations(ctx: context.Context, params: ListBucketIntelligentTieringConfigurationsInput, ...optFns: ((_arg0: Options) => void)[]): (ListBucketIntelligentTieringConfigurationsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns a list of inventory configurations for the bucket. You can have up to + * 1,000 analytics configurations per bucket. + * + * This action supports list pagination and does not return more than 100 + * configurations at a time. Always check the IsTruncated element in the response. + * If there are no more configurations to list, IsTruncated is set to false. If + * there are more configurations to list, IsTruncated is set to true, and there is + * a value in NextContinuationToken . You use the NextContinuationToken value to + * continue the pagination of the list by passing the value in continuation-token + * in the request to GET the next page. + * + * To use this operation, you must have permissions to perform the + * s3:GetInventoryConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory] + * + * The following operations are related to ListBucketInventoryConfigurations : + * + * [GetBucketInventoryConfiguration] + * + * [DeleteBucketInventoryConfiguration] + * + * [PutBucketInventoryConfiguration] + * + * [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html + * [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html + */ + listBucketInventoryConfigurations(ctx: context.Context, params: ListBucketInventoryConfigurationsInput, ...optFns: ((_arg0: Options) => void)[]): (ListBucketInventoryConfigurationsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Lists the metrics configurations for the bucket. The metrics configurations are + * only for the request metrics of the bucket and do not provide information on + * daily storage metrics. You can have up to 1,000 configurations per bucket. + * + * This action supports list pagination and does not return more than 100 + * configurations at a time. Always check the IsTruncated element in the response. + * If there are no more configurations to list, IsTruncated is set to false. If + * there are more configurations to list, IsTruncated is set to true, and there is + * a value in NextContinuationToken . You use the NextContinuationToken value to + * continue the pagination of the list by passing the value in continuation-token + * in the request to GET the next page. + * + * To use this operation, you must have permissions to perform the + * s3:GetMetricsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For more information about metrics configurations and CloudWatch request + * metrics, see [Monitoring Metrics with Amazon CloudWatch]. + * + * The following operations are related to ListBucketMetricsConfigurations : + * + * [PutBucketMetricsConfiguration] + * + * [GetBucketMetricsConfiguration] + * + * [DeleteBucketMetricsConfiguration] + * + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + * [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html + * [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + * [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + listBucketMetricsConfigurations(ctx: context.Context, params: ListBucketMetricsConfigurationsInput, ...optFns: ((_arg0: Options) => void)[]): (ListBucketMetricsConfigurationsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns a list of all buckets owned by the authenticated sender of the request. + * To use this operation, you must have the s3:ListAllMyBuckets permission. + * + * For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets]. + * + * [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html + */ + listBuckets(ctx: context.Context, params: ListBucketsInput, ...optFns: ((_arg0: Options) => void)[]): (ListBucketsOutput) + } + interface Client { + /** + * Returns a list of all Amazon S3 directory buckets owned by the authenticated + * sender of the request. For more information about directory buckets, see [Directory buckets]in the + * Amazon S3 User Guide. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Regional endpoint. These endpoints support path-style requests + * in the format https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in + * the Amazon S3 User Guide. + * + * Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in + * an IAM identity-based policy instead of a bucket policy. Cross-account access to + * this API operation isn't supported. This operation can only be performed by the + * Amazon Web Services account that owns the resource. For more information about + * directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + listDirectoryBuckets(ctx: context.Context, params: ListDirectoryBucketsInput, ...optFns: ((_arg0: Options) => void)[]): (ListDirectoryBucketsOutput) + } + interface Client { + /** + * This operation lists in-progress multipart uploads in a bucket. An in-progress + * multipart upload is a multipart upload that has been initiated by the + * CreateMultipartUpload request, but has not yet been completed or aborted. + * + * Directory buckets - If multipart uploads in a directory bucket are in progress, + * you can't delete the bucket until all the in-progress multipart uploads are + * aborted or completed. To delete these in-progress multipart uploads, use the + * ListMultipartUploads operation to list the in-progress multipart uploads in the + * bucket and use the AbortMultupartUpload operation to abort all the in-progress + * multipart uploads. + * + * The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads + * in the response. The limit of 1,000 multipart uploads is also the default value. + * You can further limit the number of uploads in a response by specifying the + * max-uploads request parameter. If there are more than 1,000 multipart uploads + * that satisfy your ListMultipartUploads request, the response returns an + * IsTruncated element with the value of true , a NextKeyMarker element, and a + * NextUploadIdMarker element. To list the remaining multipart uploads, you need to + * make subsequent ListMultipartUploads requests. In these requests, include two + * query parameters: key-marker and upload-id-marker . Set the value of key-marker + * to the NextKeyMarker value from the previous response. Similarly, set the value + * of upload-id-marker to the NextUploadIdMarker value from the previous response. + * + * Directory buckets - The upload-id-marker element and the NextUploadIdMarker + * element aren't supported by directory buckets. To list the additional multipart + * uploads, you only need to set the value of key-marker to the NextKeyMarker + * value from the previous response. + * + * For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Zonal endpoint. These endpoints support virtual-hosted-style + * requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style + * requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User + * Guide. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - For information about permissions + * required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * Sorting of multipart uploads in response + * + * ``` + * - General purpose bucket - In the ListMultipartUploads response, the multipart + * uploads are sorted based on two criteria: + * + * - Key-based sorting - Multipart uploads are initially sorted in ascending + * order based on their object keys. + * + * - Time-based sorting - For uploads that share the same object key, they are + * further sorted in ascending order based on the upload initiation time. Among + * uploads with the same key, the one that was initiated first will appear before + * the ones that were initiated later. + * + * - Directory bucket - In the ListMultipartUploads response, the multipart + * uploads aren't sorted lexicographically based on the object keys. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to ListMultipartUploads : + * + * [CreateMultipartUpload] + * + * [UploadPart] + * + * [CompleteMultipartUpload] + * + * [ListParts] + * + * [AbortMultipartUpload] + * + * [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + * [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + */ + listMultipartUploads(ctx: context.Context, params: ListMultipartUploadsInput, ...optFns: ((_arg0: Options) => void)[]): (ListMultipartUploadsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns metadata about all versions of the objects in a bucket. You can also + * use request parameters as selection criteria to return metadata about a subset + * of all the object versions. + * + * To use this operation, you must have permission to perform the + * s3:ListBucketVersions action. Be aware of the name difference. + * + * A 200 OK response can contain valid or invalid XML. Make sure to design your + * application to parse the contents of the response and handle it appropriately. + * + * To use this operation, you must have READ access to the bucket. + * + * The following operations are related to ListObjectVersions : + * + * [ListObjectsV2] + * + * [GetObject] + * + * [PutObject] + * + * [DeleteObject] + * + * [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html + */ + listObjectVersions(ctx: context.Context, params: ListObjectVersionsInput, ...optFns: ((_arg0: Options) => void)[]): (ListObjectVersionsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Returns some or all (up to 1,000) of the objects in a bucket. You can use the + * request parameters as selection criteria to return a subset of the objects in a + * bucket. A 200 OK response can contain valid or invalid XML. Be sure to design + * your application to parse the contents of the response and handle it + * appropriately. + * + * This action has been revised. We recommend that you use the newer version, [ListObjectsV2], + * when developing applications. For backward compatibility, Amazon S3 continues to + * support ListObjects . + * + * The following operations are related to ListObjects : + * + * [ListObjectsV2] + * + * [GetObject] + * + * [PutObject] + * + * [CreateBucket] + * + * [ListBuckets] + * + * [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html + */ + listObjects(ctx: context.Context, params: ListObjectsInput, ...optFns: ((_arg0: Options) => void)[]): (ListObjectsOutput) + } + interface Client { + /** + * Returns some or all (up to 1,000) of the objects in a bucket with each request. + * You can use the request parameters as selection criteria to return a subset of + * the objects in a bucket. A 200 OK response can contain valid or invalid XML. + * Make sure to design your application to parse the contents of the response and + * handle it appropriately. + * + * For more information about listing objects, see [Listing object keys programmatically] in the Amazon S3 User Guide. + * To get a list of your buckets, see [ListBuckets]. + * + * ``` + * - General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't + * return prefixes that are related only to in-progress multipart uploads. + * + * - Directory buckets - For directory buckets, ListObjectsV2 response includes + * the prefixes that are related only to in-progress multipart uploads. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the + * Amazon S3 User Guide. + * ``` + * + * Permissions + * + * ``` + * - General purpose bucket permissions - To use this operation, you must have + * READ access to the bucket. You must have permission to perform the + * s3:ListBucket action. The bucket owner has this permission by default and can + * grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] + * and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * Sorting order of returned objects + * + * ``` + * - General purpose bucket - For general purpose buckets, ListObjectsV2 returns + * objects in lexicographical order based on their key names. + * + * - Directory bucket - For directory buckets, ListObjectsV2 does not return + * objects in lexicographical order. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * This section describes the latest revision of this action. We recommend that + * you use this revised API operation for application development. For backward + * compatibility, Amazon S3 continues to support the prior version of this API + * operation, [ListObjects]. + * + * The following operations are related to ListObjectsV2 : + * + * [GetObject] + * + * [PutObject] + * + * [CreateBucket] + * + * [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + */ + listObjectsV2(ctx: context.Context, params: ListObjectsV2Input, ...optFns: ((_arg0: Options) => void)[]): (ListObjectsV2Output) + } + interface Client { + /** + * Lists the parts that have been uploaded for a specific multipart upload. + * + * To use this operation, you must provide the upload ID in the request. You + * obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload]. + * + * The ListParts request returns a maximum of 1,000 uploaded parts. The limit of + * 1,000 parts is also the default value. You can restrict the number of parts in a + * response by specifying the max-parts request parameter. If your multipart + * upload consists of more than 1,000 parts, the response returns an IsTruncated + * field with the value of true , and a NextPartNumberMarker element. To list + * remaining uploaded parts, in subsequent ListParts requests, include the + * part-number-marker query string parameter and set its value to the + * NextPartNumberMarker field value from the previous response. + * + * For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Zonal endpoint. These endpoints support virtual-hosted-style + * requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style + * requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User + * Guide. + * + * Permissions + * ``` + * - General purpose bucket permissions - For information about permissions + * required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. + * ``` + * + * If the upload was created using server-side encryption with Key Management + * + * ``` + * Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon + * Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt + * action for the ListParts request to succeed. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to ListParts : + * + * [CreateMultipartUpload] + * + * [UploadPart] + * + * [CompleteMultipartUpload] + * + * [AbortMultipartUpload] + * + * [GetObjectAttributes] + * + * [ListMultipartUploads] + * + * [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + * [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + * [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + * [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + * [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + * + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + */ + listParts(ctx: context.Context, params: ListPartsInput, ...optFns: ((_arg0: Options) => void)[]): (ListPartsOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer + * Acceleration is a bucket-level feature that enables you to perform faster data + * transfers to Amazon S3. + * + * To use this operation, you must have permission to perform the + * s3:PutAccelerateConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * The Transfer Acceleration state of a bucket can be set to one of the following + * two values: + * + * ``` + * - Enabled – Enables accelerated data transfers to the bucket. + * + * - Suspended – Disables accelerated data transfers to the bucket. + * ``` + * + * The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket. + * + * After setting the Transfer Acceleration state of a bucket to Enabled, it might + * take up to thirty minutes before the data transfer rates to the bucket increase. + * + * The name of the bucket used for Transfer Acceleration must be DNS-compliant and + * must not contain periods ("."). + * + * For more information about transfer acceleration, see [Transfer Acceleration]. + * + * The following operations are related to PutBucketAccelerateConfiguration : + * + * [GetBucketAccelerateConfiguration] + * + * [CreateBucket] + * + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + * [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + */ + putBucketAccelerateConfiguration(ctx: context.Context, params: PutBucketAccelerateConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketAccelerateConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets the permissions on an existing bucket using access control lists (ACL). + * For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the + * WRITE_ACP permission. + * + * You can use one of the following two ways to set a bucket's permissions: + * + * ``` + * - Specify the ACL in the request body + * + * - Specify permissions using request headers + * ``` + * + * You cannot specify access permission using both the body and the request + * headers. + * + * Depending on your application needs, you may choose to set the ACL on a bucket + * using either the request body or the headers. For example, if you have an + * existing application that updates a bucket ACL using the request body, then you + * can continue to use that approach. + * + * If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * ACLs are disabled and no longer affect permissions. You must use policies to + * grant access to your bucket and the objects in it. Requests to set ACLs or + * update ACLs fail and return the AccessControlListNotSupported error code. + * Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the + * Amazon S3 User Guide. + * + * Permissions You can set access permissions by using one of the following + * methods: + * + * ``` + * - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a + * set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined + * set of grantees and permissions. Specify the canned ACL name as the value of + * x-amz-acl . If you use this header, you cannot use other access + * control-specific headers in your request. For more information, see [Canned ACL]. + * + * - Specify access permissions explicitly with the x-amz-grant-read , + * x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control + * headers. When using these headers, you specify explicit access permissions and + * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the + * permission. If you use these ACL-specific headers, you cannot use the + * x-amz-acl header to set a canned ACL. These parameters map to the set of + * permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. + * ``` + * + * You specify each grantee as a type=value pair, where the type is one of the + * + * ``` + * following: + * + * - id – if the value specified is the canonical user ID of an Amazon Web + * Services account + * + * - uri – if you are granting permissions to a predefined group + * + * - emailAddress – if the value specified is the email address of an Amazon Web + * Services account + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * + * ``` + * Amazon Web Services General Reference. + * ``` + * + * For example, the following x-amz-grant-write header grants create, overwrite, + * + * ``` + * and delete objects permission to LogDelivery group predefined by Amazon S3 and + * two Amazon Web Services accounts identified by their email addresses. + * ``` + * + * x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", + * + * ``` + * id="111122223333", id="555566667777" + * ``` + * + * You can use either a canned ACL or specify access permissions explicitly. You + * cannot do both. + * + * Grantee Values You can specify the person (grantee) to whom you're assigning + * access rights (using request elements) in the following ways: + * + * ``` + * - By the person's ID: + * ``` + * + * <>ID<><>GranteesEmail<> + * + * DisplayName is optional and ignored in the request + * + * ``` + * - By URI: + * ``` + * + * <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> + * + * ``` + * - By Email address: + * ``` + * + * <>Grantees@email.com<>& + * + * The grantee is resolved to the CanonicalUser and, in a response to a GET Object + * + * ``` + * acl request, appears as the CanonicalUser. + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * + * ``` + * Amazon Web Services General Reference. + * ``` + * + * The following operations are related to PutBucketAcl : + * + * [CreateBucket] + * + * [DeleteBucket] + * + * [GetObjectAcl] + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + * [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + * [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + * [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + * [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + * [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + */ + putBucketAcl(ctx: context.Context, params: PutBucketAclInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketAclOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets an analytics configuration for the bucket (specified by the analytics + * configuration ID). You can have up to 1,000 analytics configurations per bucket. + * + * You can choose to have storage class analysis export analysis reports sent to a + * comma-separated values (CSV) flat file. See the DataExport request element. + * Reports are updated daily and are based on the object filters that you + * configure. When selecting data export, you specify a destination bucket and an + * optional destination prefix where the file is written. You can export the data + * to a destination bucket in a different account. However, the destination bucket + * must be in the same Region as the bucket that you are making the PUT analytics + * configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis]. + * + * You must create a bucket policy on the destination bucket where the exported + * file is written to grant permissions to Amazon S3 to write objects to the + * bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. + * + * To use this operation, you must have permissions to perform the + * s3:PutAnalyticsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * PutBucketAnalyticsConfiguration has the following special errors: + * + * ``` + * - HTTP Error: HTTP 400 Bad Request + * + * - Code: InvalidArgument + * + * - Cause: Invalid argument. + * + * - HTTP Error: HTTP 400 Bad Request + * + * - Code: TooManyConfigurations + * + * - Cause: You are attempting to create a new configuration but have already + * reached the 1,000-configuration limit. + * + * - HTTP Error: HTTP 403 Forbidden + * + * - Code: AccessDenied + * + * - Cause: You are not the owner of the specified bucket, or you do not have + * the s3:PutAnalyticsConfiguration bucket permission to set the configuration on + * the bucket. + * ``` + * + * The following operations are related to PutBucketAnalyticsConfiguration : + * + * [GetBucketAnalyticsConfiguration] + * + * [DeleteBucketAnalyticsConfiguration] + * + * [ListBucketAnalyticsConfigurations] + * + * [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + * [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 + * [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html + * [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + putBucketAnalyticsConfiguration(ctx: context.Context, params: PutBucketAnalyticsConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketAnalyticsConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets the cors configuration for your bucket. If the configuration exists, + * Amazon S3 replaces it. + * + * To use this operation, you must be allowed to perform the s3:PutBucketCORS + * action. By default, the bucket owner has this permission and can grant it to + * others. + * + * You set this configuration on a bucket so that the bucket can service + * cross-origin requests. For example, you might want to enable a request whose + * origin is http://www.example.com to access your Amazon S3 bucket at + * my.example.bucket.com by using the browser's XMLHttpRequest capability. + * + * To enable cross-origin resource sharing (CORS) on a bucket, you add the cors + * subresource to the bucket. The cors subresource is an XML document in which you + * configure rules that identify origins and the HTTP methods that can be executed + * on your bucket. The document is limited to 64 KB in size. + * + * When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS + * request) against a bucket, it evaluates the cors configuration on the bucket + * and uses the first CORSRule rule that matches the incoming browser request to + * enable a cross-origin request. For a rule to match, the following conditions + * must be met: + * + * ``` + * - The request's Origin header must match AllowedOrigin elements. + * + * - The request method (for example, GET, PUT, HEAD, and so on) or the + * Access-Control-Request-Method header in case of a pre-flight OPTIONS request + * must be one of the AllowedMethod elements. + * + * - Every header specified in the Access-Control-Request-Headers request header + * of a pre-flight request must match an AllowedHeader element. + * ``` + * + * For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. + * + * The following operations are related to PutBucketCors : + * + * [GetBucketCors] + * + * [DeleteBucketCors] + * + * [RESTOPTIONSobject] + * + * [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html + * [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + * [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html + * [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html + */ + putBucketCors(ctx: context.Context, params: PutBucketCorsInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketCorsOutput) + } + interface Client { + /** + * This operation configures default encryption and Amazon S3 Bucket Keys for an + * existing bucket. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Regional endpoint. These endpoints support path-style requests + * in the format https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in + * the Amazon S3 User Guide. + * + * By default, all buckets have a default encryption configuration that uses + * server-side encryption with Amazon S3 managed keys (SSE-S3). + * + * ``` + * - General purpose buckets + * + * - You can optionally configure default encryption for a bucket by using + * server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or + * dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). + * If you specify default encryption by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. + * For information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the + * Amazon S3 User Guide. + * + * - If you use PutBucketEncryption to set your [default bucket encryption]to SSE-KMS, you should verify + * that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID + * provided in PutBucketEncryption requests. + * + * - Directory buckets - You can optionally configure default encryption for a + * bucket by using server-side encryption with Key Management Service (KMS) keys + * (SSE-KMS). + * + * - We recommend that the bucket's default encryption uses the desired + * encryption configuration and you don't override the bucket default encryption in + * your CreateSession requests or PUT object requests. Then, new objects are + * automatically encrypted with the desired encryption settings. For more + * information about the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads] + * . + * + * - Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket for the + * lifetime of the bucket. [Amazon Web Services managed key]( aws/s3 ) isn't supported. + * + * - S3 Bucket Keys are always enabled for GET and PUT operations in a directory + * bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy + * SSE-KMS encrypted objects from general purpose buckets to directory buckets, + * from directory buckets to general purpose buckets, or between directory buckets, + * through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a + * copy request is made for a KMS-encrypted object. + * + * - When you specify an [KMS customer managed key]for encryption in your directory bucket, only use the + * key ID or key ARN. The key alias format of the KMS key isn't supported. + * + * - For directory buckets, if you use PutBucketEncryption to set your [default bucket encryption]to + * SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption + * requests. + * ``` + * + * If you're specifying a customer managed KMS key, we recommend using a fully + * qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the + * key within the requester’s account. This behavior can result in data that's + * encrypted with a KMS key that belongs to the requester, and not the bucket + * owner. + * + * Also, this action requires Amazon Web Services Signature Version 4. For more + * information, see [Authenticating Requests (Amazon Web Services Signature Version 4)]. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - The s3:PutEncryptionConfiguration + * permission is required in a policy. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation, you + * must have the s3express:PutEncryptionConfiguration permission in an IAM + * identity-based policy instead of a bucket policy. Cross-account access to this + * API operation isn't supported. This operation can only be performed by the + * Amazon Web Services account that owns the resource. For more information about + * directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. + * ``` + * + * To set a directory bucket default encryption with SSE-KMS, you must also have + * + * ``` + * the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based + * policies and KMS key policies for the target KMS key. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * The following operations are related to PutBucketEncryption : + * + * [GetBucketEncryption] + * + * [DeleteBucketEncryption] + * + * [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + * [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + * [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + * [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html + * [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + * [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + * [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + */ + putBucketEncryption(ctx: context.Context, params: PutBucketEncryptionInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketEncryptionOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can + * have up to 1,000 S3 Intelligent-Tiering configurations per bucket. + * + * The S3 Intelligent-Tiering storage class is designed to optimize storage costs + * by automatically moving data to the most cost-effective storage access tier, + * without performance impact or operational overhead. S3 Intelligent-Tiering + * delivers automatic cost savings in three low latency and high throughput access + * tiers. To get the lowest storage cost on data that can be accessed in minutes to + * hours, you can choose to activate additional archiving capabilities. + * + * The S3 Intelligent-Tiering storage class is the ideal storage class for data + * with unknown, changing, or unpredictable access patterns, independent of object + * size or retention period. If the size of an object is less than 128 KB, it is + * not monitored and not eligible for auto-tiering. Smaller objects can be stored, + * but they are always charged at the Frequent Access tier rates in the S3 + * Intelligent-Tiering storage class. + * + * For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. + * + * Operations related to PutBucketIntelligentTieringConfiguration include: + * + * [DeleteBucketIntelligentTieringConfiguration] + * + * [GetBucketIntelligentTieringConfiguration] + * + * [ListBucketIntelligentTieringConfigurations] + * + * You only need S3 Intelligent-Tiering enabled on a bucket if you want to + * automatically move objects stored in the S3 Intelligent-Tiering storage class to + * the Archive Access or Deep Archive Access tier. + * + * PutBucketIntelligentTieringConfiguration has the following special errors: + * + * HTTP 400 Bad Request Error Code: InvalidArgument + * + * Cause: Invalid Argument + * + * HTTP 400 Bad Request Error Code: TooManyConfigurations + * + * Cause: You are attempting to create a new configuration but have already + * reached the 1,000-configuration limit. + * + * HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, + * or you do not have the s3:PutIntelligentTieringConfiguration bucket permission + * to set the configuration on the bucket. + * + * [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html + * [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html + * [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + * [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html + */ + putBucketIntelligentTieringConfiguration(ctx: context.Context, params: PutBucketIntelligentTieringConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketIntelligentTieringConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * This implementation of the PUT action adds an inventory configuration + * (identified by the inventory ID) to the bucket. You can have up to 1,000 + * inventory configurations per bucket. + * + * Amazon S3 inventory generates inventories of the objects in the bucket on a + * daily or weekly basis, and the results are published to a flat file. The bucket + * that is inventoried is called the source bucket, and the bucket where the + * inventory flat file is stored is called the destination bucket. The destination + * bucket must be in the same Amazon Web Services Region as the source bucket. + * + * When you configure an inventory for a source bucket, you specify the + * destination bucket where you want the inventory to be stored, and whether to + * generate the inventory daily or weekly. You can also configure what object + * metadata to include and whether to inventory all object versions or only current + * versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide. + * + * You must create a bucket policy on the destination bucket to grant permissions + * to Amazon S3 to write objects to the bucket in the defined location. For an + * example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. + * + * Permissions To use this operation, you must have permission to perform the + * s3:PutInventoryConfiguration action. The bucket owner has this permission by + * default and can grant this permission to others. + * + * The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report + * that includes all object metadata fields available and to specify the + * destination bucket to store the inventory. A user with read access to objects in + * the destination bucket can also access all object metadata fields that are + * available in the inventory report. + * + * To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide. + * For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists] + * in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3] + * in the Amazon S3 User Guide. + * + * PutBucketInventoryConfiguration has the following special errors: + * + * HTTP 400 Bad Request Error Code: InvalidArgument + * + * Cause: Invalid Argument + * + * HTTP 400 Bad Request Error Code: TooManyConfigurations + * + * Cause: You are attempting to create a new configuration but have already + * reached the 1,000-configuration limit. + * + * HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, + * or you do not have the s3:PutInventoryConfiguration bucket permission to set + * the configuration on the bucket. + * + * The following operations are related to PutBucketInventoryConfiguration : + * + * [GetBucketInventoryConfiguration] + * + * [DeleteBucketInventoryConfiguration] + * + * [ListBucketInventoryConfigurations] + * + * [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 + * [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + * [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html + * [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html + * [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html + * [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 + * [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents + * [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html + */ + putBucketInventoryConfiguration(ctx: context.Context, params: PutBucketInventoryConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketInventoryConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Creates a new lifecycle configuration for the bucket or replaces an existing + * lifecycle configuration. Keep in mind that this will overwrite an existing + * lifecycle configuration, so if you want to retain any configuration details, + * they must be included in the new lifecycle configuration. For information about + * lifecycle configuration, see [Managing your storage lifecycle]. + * + * Rules You specify the lifecycle configuration in your request body. The + * lifecycle configuration is specified as XML consisting of one or more rules. An + * Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not + * adjustable. + * + * Bucket lifecycle configuration supports specifying a lifecycle rule using an + * object key name prefix, one or more object tags, object size, or any combination + * of these. Accordingly, this section describes the latest API. The previous + * version of the API supported filtering based only on an object key name prefix, + * which is supported for backward compatibility. For the related API description, + * see [PutBucketLifecycle]. + * + * A lifecycle rule consists of the following: + * + * ``` + * - A filter identifying a subset of objects to which the rule applies. The + * filter can be based on a key name prefix, object tags, object size, or any + * combination of these. + * + * - A status indicating whether the rule is in effect. + * + * - One or more lifecycle transition and expiration actions that you want + * Amazon S3 to perform on the objects identified by the filter. If the state of + * your bucket is versioning-enabled or versioning-suspended, you can have many + * versions of the same object (one current version and zero or more noncurrent + * versions). Amazon S3 provides predefined actions that you can specify for + * current and noncurrent object versions. + * ``` + * + * For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements]. + * + * Permissions By default, all Amazon S3 resources are private, including buckets, + * objects, and related subresources (for example, lifecycle configuration and + * website configuration). Only the resource owner (that is, the Amazon Web + * Services account that created it) can access the resource. The resource owner + * can optionally grant access permissions to others by writing an access policy. + * For this operation, a user must get the s3:PutLifecycleConfiguration permission. + * + * You can also explicitly deny permissions. An explicit deny also supersedes any + * other permissions. If you want to block users or accounts from removing or + * deleting objects from your bucket, you must deny them permissions for the + * following actions: + * + * ``` + * - s3:DeleteObject + * + * - s3:DeleteObjectVersion + * + * - s3:PutLifecycleConfiguration + * ``` + * + * For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. + * + * The following operations are related to PutBucketLifecycleConfiguration : + * + * [Examples of Lifecycle Configuration] + * + * [GetBucketLifecycleConfiguration] + * + * [DeleteBucketLifecycle] + * + * [Examples of Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html + * [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + * [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html + * [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + * [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html + * [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html + */ + putBucketLifecycleConfiguration(ctx: context.Context, params: PutBucketLifecycleConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketLifecycleConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Set the logging parameters for a bucket and to specify permissions for who can + * view and modify the logging parameters. All logs are saved to buckets in the + * same Amazon Web Services Region as the source bucket. To set the logging status + * of a bucket, you must be the bucket owner. + * + * The bucket owner is automatically granted FULL_CONTROL to all logs. You use the + * Grantee request element to grant access to other people. The Permissions + * request element specifies the kind of access the grantee has to the logs. + * + * If the target bucket for log delivery uses the bucket owner enforced setting + * for S3 Object Ownership, you can't use the Grantee request element to grant + * access to others. Permissions can only be granted using policies. For more + * information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. + * + * Grantee Values You can specify the person (grantee) to whom you're assigning + * access rights (by using request elements) in the following ways: + * + * ``` + * - By the person's ID: + * ``` + * + * <>ID<><>GranteesEmail<> + * + * DisplayName is optional and ignored in the request. + * + * ``` + * - By Email address: + * ``` + * + * <>Grantees@email.com<> + * + * The grantee is resolved to the CanonicalUser and, in a response to a + * + * ``` + * GETObjectAcl request, appears as the CanonicalUser. + * + * - By URI: + * ``` + * + * <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> + * + * To enable logging, you use LoggingEnabled and its children request elements. To + * disable logging, you use an empty BucketLoggingStatus request element: + * + * For more information about server access logging, see [Server Access Logging] in the Amazon S3 User + * Guide. + * + * For more information about creating a bucket, see [CreateBucket]. For more information about + * returning the logging status of a bucket, see [GetBucketLogging]. + * + * The following operations are related to PutBucketLogging : + * + * [PutObject] + * + * [DeleteBucket] + * + * [CreateBucket] + * + * [GetBucketLogging] + * + * [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general + * [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + * [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html + */ + putBucketLogging(ctx: context.Context, params: PutBucketLoggingInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketLoggingOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets a metrics configuration (specified by the metrics configuration ID) for + * the bucket. You can have up to 1,000 metrics configurations per bucket. If + * you're updating an existing metrics configuration, note that this is a full + * replacement of the existing metrics configuration. If you don't include the + * elements you want to keep, they are erased. + * + * To use this operation, you must have permissions to perform the + * s3:PutMetricsConfiguration action. The bucket owner has this permission by + * default. The bucket owner can grant this permission to others. For more + * information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. + * + * The following operations are related to PutBucketMetricsConfiguration : + * + * [DeleteBucketMetricsConfiguration] + * + * [GetBucketMetricsConfiguration] + * + * [ListBucketMetricsConfigurations] + * + * PutBucketMetricsConfiguration has the following special error: + * + * ``` + * - Error code: TooManyConfigurations + * + * - Description: You are attempting to create a new configuration but have + * already reached the 1,000-configuration limit. + * + * - HTTP Status Code: HTTP 400 Bad Request + * ``` + * + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + * [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html + * [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html + * [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + putBucketMetricsConfiguration(ctx: context.Context, params: PutBucketMetricsConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketMetricsConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Enables notifications of specified events for a bucket. For more information + * about event notifications, see [Configuring Event Notifications]. + * + * Using this API, you can replace an existing notification configuration. The + * configuration is an XML file that defines the event types that you want Amazon + * S3 to publish and the destination where you want Amazon S3 to publish an event + * notification when it detects an event of the specified type. + * + * By default, your bucket has no event notifications configured. That is, the + * notification configuration will be an empty NotificationConfiguration . + * + * This action replaces the existing notification configuration with the + * configuration you include in the request body. + * + * After Amazon S3 receives this request, it first verifies that any Amazon Simple + * Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) + * destination exists, and that the bucket owner has permission to publish to it by + * sending a test notification. In the case of Lambda destinations, Amazon S3 + * verifies that the Lambda function permissions grant Amazon S3 permission to + * invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events]. + * + * You can disable notifications by adding the empty NotificationConfiguration + * element. + * + * For more information about the number of event notification configurations that + * you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference. + * + * By default, only the bucket owner can configure notifications on a bucket. + * However, bucket owners can use a bucket policy to grant permission to other + * users to set this configuration with the required s3:PutBucketNotification + * permission. + * + * The PUT notification is an atomic operation. For example, suppose your + * notification configuration includes SNS topic, SQS queue, and Lambda function + * configurations. When you send a PUT request with this configuration, Amazon S3 + * sends test messages to your SNS topic. If the message fails, the entire PUT + * action will fail, and Amazon S3 will not add the configuration to your bucket. + * + * If the configuration in the request body includes only one TopicConfiguration + * specifying only the s3:ReducedRedundancyLostObject event type, the response + * will also include the x-amz-sns-test-message-id header containing the message + * ID of the test notification sent to the topic. + * + * The following action is related to PutBucketNotificationConfiguration : + * + * [GetBucketNotificationConfiguration] + * + * [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + * [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 + * [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html + * [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + */ + putBucketNotificationConfiguration(ctx: context.Context, params: PutBucketNotificationConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketNotificationConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this + * operation, you must have the s3:PutBucketOwnershipControls permission. For more + * information about Amazon S3 permissions, see [Specifying permissions in a policy]. + * + * For information about Amazon S3 Object Ownership, see [Using object ownership]. + * + * The following operations are related to PutBucketOwnershipControls : + * + * # GetBucketOwnershipControls + * + * # DeleteBucketOwnershipControls + * + * [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html + * [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html + */ + putBucketOwnershipControls(ctx: context.Context, params: PutBucketOwnershipControlsInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketOwnershipControlsOutput) + } + interface Client { + /** + * Applies an Amazon S3 bucket policy to an Amazon S3 bucket. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Regional endpoint. These endpoints support path-style requests + * in the format https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in + * the Amazon S3 User Guide. + * + * Permissions If you are using an identity other than the root user of the Amazon + * Web Services account that owns the bucket, the calling identity must both have + * the PutBucketPolicy permissions on the specified bucket and belong to the + * bucket owner's account in order to use this operation. + * + * If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access + * Denied error. If you have the correct permissions, but you're not using an + * identity that belongs to the bucket owner's account, Amazon S3 returns a 405 + * Method Not Allowed error. + * + * To ensure that bucket owners don't inadvertently lock themselves out of their + * own buckets, the root principal in a bucket owner's Amazon Web Services account + * can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API + * actions, even if their bucket policy explicitly denies the root principal's + * access. Bucket owner root principals can only be blocked from performing these + * API actions by VPC endpoint policies and Amazon Web Services Organizations + * policies. + * + * ``` + * - General purpose bucket permissions - The s3:PutBucketPolicy permission is + * required in a policy. For more information about general purpose buckets bucket + * policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation, you + * must have the s3express:PutBucketPolicy permission in an IAM identity-based + * policy instead of a bucket policy. Cross-account access to this API operation + * isn't supported. This operation can only be performed by the Amazon Web Services + * account that owns the resource. For more information about directory bucket + * policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. + * ``` + * + * Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] + * in the Amazon S3 User Guide. + * + * Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * s3express-control.region.amazonaws.com . + * + * The following operations are related to PutBucketPolicy : + * + * [CreateBucket] + * + * [DeleteBucket] + * + * [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html + * [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + * [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html + */ + putBucketPolicy(ctx: context.Context, params: PutBucketPolicyInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketPolicyOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Creates a replication configuration or replaces an existing one. For more + * information, see [Replication]in the Amazon S3 User Guide. + * + * Specify the replication configuration in the request body. In the replication + * configuration, you provide the name of the destination bucket or buckets where + * you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume + * to replicate objects on your behalf, and other relevant information. You can + * invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion] + * aws:RequestedRegion condition key. + * + * A replication configuration must include at least one rule, and can contain a + * maximum of 1,000. Each rule identifies a subset of objects to replicate by + * filtering the objects in the source bucket. To choose additional subsets of + * objects to replicate, add a rule for each subset. + * + * To specify a subset of the objects in the source bucket to apply a replication + * rule to, add the Filter element as a child of the Rule element. You can filter + * objects based on an object key prefix, one or more object tags, or both. When + * you add the Filter element in the configuration, you must also add the following + * elements: DeleteMarkerReplication , Status , and Priority . + * + * If you are using an earlier version of the replication configuration, Amazon S3 + * handles replication of delete markers differently. For more information, see [Backward Compatibility]. + * + * For information about enabling versioning on a bucket, see [Using Versioning]. + * + * Handling Replication of Encrypted Objects By default, Amazon S3 doesn't + * replicate objects that are stored at rest using server-side encryption with KMS + * keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: + * SourceSelectionCriteria , SseKmsEncryptedObjects , Status , + * EncryptionConfiguration , and ReplicaKmsKeyID . For information about + * replication configuration, see [Replicating Objects Created with SSE Using KMS keys]. + * + * For information on PutBucketReplication errors, see [List of replication-related error codes] + * + * Permissions To create a PutBucketReplication request, you must have + * s3:PutReplicationConfiguration permissions for the bucket. + * + * By default, a resource owner, in this case the Amazon Web Services account that + * created the bucket, can perform this operation. The resource owner can also + * grant others permissions to perform the operation. For more information about + * permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * To perform this operation, the user or role performing the action must have the [iam:PassRole] + * permission. + * + * The following operations are related to PutBucketReplication : + * + * [GetBucketReplication] + * + * [DeleteBucketReplication] + * + * [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html + * [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html + * [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion + * [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html + * [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html + * [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + * [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList + * [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations + * [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + */ + putBucketReplication(ctx: context.Context, params: PutBucketReplicationInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketReplicationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets the request payment configuration for a bucket. By default, the bucket + * owner pays for downloads from the bucket. This configuration parameter enables + * the bucket owner (only) to specify that the person requesting the download will + * be charged for the download. For more information, see [Requester Pays Buckets]. + * + * The following operations are related to PutBucketRequestPayment : + * + * [CreateBucket] + * + * [GetBucketRequestPayment] + * + * [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html + * [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + */ + putBucketRequestPayment(ctx: context.Context, params: PutBucketRequestPaymentInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketRequestPaymentOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets the tags for a bucket. + * + * Use tags to organize your Amazon Web Services bill to reflect your own cost + * structure. To do this, sign up to get your Amazon Web Services account bill with + * tag key values included. Then, to see the cost of combined resources, organize + * your billing information according to resources with the same tag key values. + * For example, you can tag several resources with a specific application name, and + * then organize your billing information to see the total cost of that application + * across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags]. + * + * When this operation sets the tags for a bucket, it will overwrite any current + * tags the bucket already has. You cannot use this operation to add tags to an + * existing list of tags. + * + * To use this operation, you must have permissions to perform the + * s3:PutBucketTagging action. The bucket owner has this permission by default and + * can grant this permission to others. For more information about permissions, see + * [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. + * + * PutBucketTagging has the following special errors. For more Amazon S3 errors + * see, [Error Responses]. + * + * ``` + * - InvalidTag - The tag provided was not a valid tag. This error can occur if + * the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags]. + * + * - MalformedXML - The XML provided does not match the schema. + * + * - OperationAborted - A conflicting conditional action is currently in progress + * against this resource. Please try again. + * + * - InternalError - The service was unable to apply the provided tag to the + * bucket. + * ``` + * + * The following operations are related to PutBucketTagging : + * + * [GetBucketTagging] + * + * [DeleteBucketTagging] + * + * [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + * [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html + * [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html + * [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + */ + putBucketTagging(ctx: context.Context, params: PutBucketTaggingInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketTaggingOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * When you enable versioning on a bucket for the first time, it might take a + * short amount of time for the change to be fully propagated. We recommend that + * you wait for 15 minutes after enabling versioning before issuing write + * operations ( PUT or DELETE ) on objects in the bucket. + * + * Sets the versioning state of an existing bucket. + * + * You can set the versioning state with one of the following values: + * + * Enabled—Enables versioning for the objects in the bucket. All objects added to + * the bucket receive a unique version ID. + * + * Suspended—Disables versioning for the objects in the bucket. All objects added + * to the bucket receive the version ID null. + * + * If the versioning state has never been set on a bucket, it has no versioning + * state; a [GetBucketVersioning]request does not return a versioning state value. + * + * In order to enable MFA Delete, you must be the bucket owner. If you are the + * bucket owner and want to enable MFA Delete in the bucket versioning + * configuration, you must include the x-amz-mfa request header and the Status and + * the MfaDelete request elements in a request to set the versioning state of the + * bucket. + * + * If you have an object expiration lifecycle configuration in your non-versioned + * bucket and you want to maintain the same permanent delete behavior when you + * enable versioning, you must add a noncurrent expiration policy. The noncurrent + * expiration lifecycle configuration will manage the deletes of the noncurrent + * object versions in the version-enabled bucket. (A version-enabled bucket + * maintains one current and zero or more noncurrent object versions.) For more + * information, see [Lifecycle and Versioning]. + * + * The following operations are related to PutBucketVersioning : + * + * [CreateBucket] + * + * [DeleteBucket] + * + * [GetBucketVersioning] + * + * [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + * [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config + * [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html + */ + putBucketVersioning(ctx: context.Context, params: PutBucketVersioningInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketVersioningOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets the configuration of the website that is specified in the website + * subresource. To configure a bucket as a website, you can add this subresource on + * the bucket with website configuration information such as the file name of the + * index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3]. + * + * This PUT action requires the S3:PutBucketWebsite permission. By default, only + * the bucket owner can configure the website attached to a bucket; however, bucket + * owners can allow other users to set the website configuration by writing a + * bucket policy that grants them the S3:PutBucketWebsite permission. + * + * To redirect all website requests sent to the bucket's website endpoint, you add + * a website configuration with the following elements. Because all requests are + * sent to another website, you don't need to provide index document name for the + * bucket. + * + * ``` + * - WebsiteConfiguration + * + * - RedirectAllRequestsTo + * + * - HostName + * + * - Protocol + * ``` + * + * If you want granular control over redirects, you can use the following elements + * to add routing rules that describe conditions for redirecting requests and + * information about the redirect destination. In this case, the website + * configuration must provide an index document for the bucket, because some + * requests might not be redirected. + * + * ``` + * - WebsiteConfiguration + * + * - IndexDocument + * + * - Suffix + * + * - ErrorDocument + * + * - Key + * + * - RoutingRules + * + * - RoutingRule + * + * - Condition + * + * - HttpErrorCodeReturnedEquals + * + * - KeyPrefixEquals + * + * - Redirect + * + * - Protocol + * + * - HostName + * + * - ReplaceKeyPrefixWith + * + * - ReplaceKeyWith + * + * - HttpRedirectCode + * ``` + * + * Amazon S3 has a limitation of 50 routing rules per website configuration. If + * you require more than 50 routing rules, you can use object redirect. For more + * information, see [Configuring an Object Redirect]in the Amazon S3 User Guide. + * + * The maximum request length is limited to 128 KB. + * + * [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + * [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + */ + putBucketWebsite(ctx: context.Context, params: PutBucketWebsiteInput, ...optFns: ((_arg0: Options) => void)[]): (PutBucketWebsiteOutput) + } + interface Client { + /** + * Adds an object to a bucket. + * + * ``` + * - Amazon S3 never adds partial objects; if you receive a success response, + * Amazon S3 added the entire object to the bucket. You cannot use PutObject to + * only update a single piece of metadata for an existing object. You must put the + * entire object with updated metadata if you want to update some values. + * + * - If your bucket uses the bucket owner enforced setting for Object Ownership, + * ACLs are disabled and no longer affect permissions. All objects written to the + * bucket by any account will be owned by the bucket owner. + * + * - Directory buckets - For directory buckets, you must make requests for this + * API operation to the Zonal endpoint. These endpoints support + * virtual-hosted-style requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . + * Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the + * Amazon S3 User Guide. + * ``` + * + * Amazon S3 is a distributed system. If it receives multiple write requests for + * the same object simultaneously, it overwrites all but the last object written. + * However, Amazon S3 provides features that can modify this behavior: + * + * ``` + * - S3 Object Lock - To prevent objects from being deleted or overwritten, you + * can use [Amazon S3 Object Lock]in the Amazon S3 User Guide. + * ``` + * + * This functionality is not supported for directory buckets. + * + * ``` + * - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 + * receives multiple write requests for the same object simultaneously, it stores + * all versions of the objects. For each write request that is made to the same + * object, Amazon S3 automatically generates a unique version ID of that object + * being stored in Amazon S3. You can retrieve, replace, or delete any version of + * the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User + * Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning] + * . + * ``` + * + * This functionality is not supported for directory buckets. + * + * Permissions + * + * ``` + * - General purpose bucket permissions - The following permissions are required + * in your policies when your PutObject request includes specific headers. + * + * - s3:PutObject - To successfully complete the PutObject request, you must + * always have the s3:PutObject permission on a bucket to add an object to it. + * + * - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject + * request, you must have the s3:PutObjectAcl . + * + * - s3:PutObjectTagging - To successfully set the tag-set with your PutObject + * request, you must have the s3:PutObjectTagging . + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * If the object is encrypted with SSE-KMS, you must also have the + * + * ``` + * kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies + * and KMS key policies for the KMS key. + * ``` + * + * Data integrity with Content-MD5 + * + * ``` + * - General purpose bucket - To ensure that data is not corrupted traversing + * the network, use the Content-MD5 header. When you use this header, Amazon S3 + * checks the object against the provided MD5 value and, if they do not match, + * Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 + * digest, you can calculate the MD5 while putting the object to Amazon S3 and + * compare the returned ETag to the calculated MD5 value. + * + * - Directory bucket - This functionality is not supported for directory + * buckets. + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * For more information about related Amazon S3 APIs, see the following: + * + * [CopyObject] + * + * [DeleteObject] + * + * [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html + * [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html + */ + putObject(ctx: context.Context, params: PutObjectInput, ...optFns: ((_arg0: Options) => void)[]): (PutObjectOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Uses the acl subresource to set the access control list (ACL) permissions for a + * new or existing object in an S3 bucket. You must have the WRITE_ACP permission + * to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * Depending on your application needs, you can choose to set the ACL on an object + * using either the request body or the headers. For example, if you have an + * existing application that updates a bucket ACL using the request body, you can + * continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User + * Guide. + * + * If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * ACLs are disabled and no longer affect permissions. You must use policies to + * grant access to your bucket and the objects in it. Requests to set ACLs or + * update ACLs fail and return the AccessControlListNotSupported error code. + * Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the + * Amazon S3 User Guide. + * + * Permissions You can set access permissions using one of the following methods: + * + * ``` + * - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a + * set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined + * set of grantees and permissions. Specify the canned ACL name as the value of + * x-amz-ac l. If you use this header, you cannot use other access + * control-specific headers in your request. For more information, see [Canned ACL]. + * + * - Specify access permissions explicitly with the x-amz-grant-read , + * x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control + * headers. When using these headers, you specify explicit access permissions and + * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the + * permission. If you use these ACL-specific headers, you cannot use x-amz-acl + * header to set a canned ACL. These parameters map to the set of permissions that + * Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. + * ``` + * + * You specify each grantee as a type=value pair, where the type is one of the + * + * ``` + * following: + * + * - id – if the value specified is the canonical user ID of an Amazon Web + * Services account + * + * - uri – if you are granting permissions to a predefined group + * + * - emailAddress – if the value specified is the email address of an Amazon Web + * Services account + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * + * ``` + * Amazon Web Services General Reference. + * ``` + * + * For example, the following x-amz-grant-read header grants list objects + * + * ``` + * permission to the two Amazon Web Services accounts identified by their email + * addresses. + * ``` + * + * x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" + * + * You can use either a canned ACL or specify access permissions explicitly. You + * cannot do both. + * + * Grantee Values You can specify the person (grantee) to whom you're assigning + * access rights (using request elements) in the following ways: + * + * ``` + * - By the person's ID: + * ``` + * + * <>ID<><>GranteesEmail<> + * + * DisplayName is optional and ignored in the request. + * + * ``` + * - By URI: + * ``` + * + * <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> + * + * ``` + * - By Email address: + * ``` + * + * <>Grantees@email.com<>lt;/Grantee> + * + * The grantee is resolved to the CanonicalUser and, in a response to a GET Object + * + * ``` + * acl request, appears as the CanonicalUser. + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * + * ``` + * Amazon Web Services General Reference. + * ``` + * + * Versioning The ACL of an object is set at the object version level. By default, + * PUT sets the ACL of the current version of an object. To set the ACL of a + * different version, use the versionId subresource. + * + * The following operations are related to PutObjectAcl : + * + * [CopyObject] + * + * [GetObject] + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + * [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + * [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + */ + putObjectAcl(ctx: context.Context, params: PutObjectAclInput, ...optFns: ((_arg0: Options) => void)[]): (PutObjectAclOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Applies a legal hold configuration to the specified object. For more + * information, see [Locking Objects]. + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + putObjectLegalHold(ctx: context.Context, params: PutObjectLegalHoldInput, ...optFns: ((_arg0: Options) => void)[]): (PutObjectLegalHoldOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Places an Object Lock configuration on the specified bucket. The rule specified + * in the Object Lock configuration will be applied by default to every new object + * placed in the specified bucket. For more information, see [Locking Objects]. + * + * ``` + * - The DefaultRetention settings require both a mode and a period. + * + * - The DefaultRetention period can be either Days or Years but you must select + * one. You cannot specify Days and Years at the same time. + * + * - You can enable Object Lock for new or existing buckets. For more + * information, see [Configuring Object Lock]. + * ``` + * + * [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html + * [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + putObjectLockConfiguration(ctx: context.Context, params: PutObjectLockConfigurationInput, ...optFns: ((_arg0: Options) => void)[]): (PutObjectLockConfigurationOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Places an Object Retention configuration on an object. For more information, + * see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order + * to place an Object Retention configuration on objects. Bypassing a Governance + * Retention configuration requires the s3:BypassGovernanceRetention permission. + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + putObjectRetention(ctx: context.Context, params: PutObjectRetentionInput, ...optFns: ((_arg0: Options) => void)[]): (PutObjectRetentionOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Sets the supplied tag-set to an object that already exists in a bucket. A tag + * is a key-value pair. For more information, see [Object Tagging]. + * + * You can associate tags with an object by sending a PUT request against the + * tagging subresource that is associated with the object. You can retrieve tags by + * sending a GET request. For more information, see [GetObjectTagging]. + * + * For tagging-related restrictions related to characters and encodings, see [Tag Restrictions]. + * Note that Amazon S3 limits the maximum number of tags to 10 tags per object. + * + * To use this operation, you must have permission to perform the + * s3:PutObjectTagging action. By default, the bucket owner has this permission and + * can grant this permission to others. + * + * To put tags of any other version, use the versionId query parameter. You also + * need permission for the s3:PutObjectVersionTagging action. + * + * PutObjectTagging has the following special errors. For more Amazon S3 errors + * see, [Error Responses]. + * + * ``` + * - InvalidTag - The tag provided was not a valid tag. This error can occur if + * the tag did not pass input validation. For more information, see [Object Tagging]. + * + * - MalformedXML - The XML provided does not match the schema. + * + * - OperationAborted - A conflicting conditional action is currently in progress + * against this resource. Please try again. + * + * - InternalError - The service was unable to apply the provided tag to the + * object. + * ``` + * + * The following operations are related to PutObjectTagging : + * + * [GetObjectTagging] + * + * [DeleteObjectTagging] + * + * [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + * [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html + * [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html + * [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html + * [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + */ + putObjectTagging(ctx: context.Context, params: PutObjectTaggingInput, ...optFns: ((_arg0: Options) => void)[]): (PutObjectTaggingOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Creates or modifies the PublicAccessBlock configuration for an Amazon S3 + * bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock + * permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. + * + * When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an + * object, it checks the PublicAccessBlock configuration for both the bucket (or + * the bucket that contains the object) and the bucket owner's account. If the + * PublicAccessBlock configurations are different between the bucket and the + * account, Amazon S3 uses the most restrictive combination of the bucket-level and + * account-level settings. + * + * For more information about when Amazon S3 considers a bucket or an object + * public, see [The Meaning of "Public"]. + * + * The following operations are related to PutPublicAccessBlock : + * + * [GetPublicAccessBlock] + * + * [DeletePublicAccessBlock] + * + * [GetBucketPolicyStatus] + * + * [Using Amazon S3 Block Public Access] + * + * [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + * [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html + * [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + * [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html + * [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + * [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + */ + putPublicAccessBlock(ctx: context.Context, params: PutPublicAccessBlockInput, ...optFns: ((_arg0: Options) => void)[]): (PutPublicAccessBlockOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * The SELECT job type for the RestoreObject operation is no longer available to + * new customers. Existing customers of Amazon S3 Select can continue to use the + * feature as usual. [Learn more] + * + * # Restores an archived copy of an object back into Amazon S3 + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * This action performs the following types of requests: + * + * ``` + * - restore an archive - Restore an archived object + * ``` + * + * For more information about the S3 structure in the request body, see the + * following: + * + * [PutObject] + * + * [Managing Access with ACLs] + * ``` + * - in the Amazon S3 User Guide + * ``` + * + * [Protecting Data Using Server-Side Encryption] + * ``` + * - in the Amazon S3 User Guide + * ``` + * + * Permissions To use this operation, you must have permissions to perform the + * s3:RestoreObject action. The bucket owner has this permission by default and can + * grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] + * and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. + * + * Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval + * Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 + * Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are + * not accessible in real time. For objects in the S3 Glacier Flexible Retrieval + * Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first + * initiate a restore request, and then wait until a temporary copy of the object + * is available. If you want a permanent copy of the object, create a copy of it in + * the Amazon S3 Standard storage class in your S3 bucket. To access an archived + * object, you must restore the object for the duration (number of days) that you + * specify. For objects in the Archive Access or Deep Archive Access tiers of S3 + * Intelligent-Tiering, you must first initiate a restore request, and then wait + * until the object is moved into the Frequent Access tier. + * + * To restore a specific object version, you can provide a version ID. If you + * don't provide a version ID, Amazon S3 restores the current version. + * + * When restoring an archived object, you can specify one of the following data + * access tier options in the Tier element of the request body: + * + * ``` + * - Expedited - Expedited retrievals allow you to quickly access your data + * stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or + * S3 Intelligent-Tiering Archive tier when occasional urgent requests for + * restoring archives are required. For all but the largest archived objects (250 + * MB+), data accessed using Expedited retrievals is typically made available + * within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for + * Expedited retrievals is available when you need it. Expedited retrievals and + * provisioned capacity are not available for objects stored in the S3 Glacier Deep + * Archive storage class or S3 Intelligent-Tiering Deep Archive tier. + * + * - Standard - Standard retrievals allow you to access any of your archived + * objects within several hours. This is the default option for retrieval requests + * that do not specify the retrieval option. Standard retrievals typically finish + * within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval + * Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They + * typically finish within 12 hours for objects stored in the S3 Glacier Deep + * Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard + * retrievals are free for objects stored in S3 Intelligent-Tiering. + * + * - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible + * Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve + * large amounts, even petabytes, of data at no cost. Bulk retrievals typically + * finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval + * Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk + * retrievals are also the lowest-cost retrieval option when restoring objects from + * S3 Glacier Deep Archive. They typically finish within 48 hours for objects + * stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering + * Deep Archive tier. + * ``` + * + * For more information about archive retrieval options and provisioned capacity + * for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide. + * + * You can use Amazon S3 restore speed upgrade to change the restore speed to a + * faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon + * S3 User Guide. + * + * To get the status of object restoration, you can send a HEAD request. + * Operations return the x-amz-restore header, which provides information about + * the restoration status, in the response. You can use Amazon S3 event + * notifications to notify you when a restore is initiated or completed. For more + * information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide. + * + * After restoring an archived object, you can update the restoration period by + * reissuing the request with a new period. Amazon S3 updates the restoration + * period relative to the current time and charges only for the request-there are + * no data transfer charges. You cannot update the restoration period when Amazon + * S3 is actively processing your current restore request for the object. + * + * If your bucket has a lifecycle configuration with a rule that includes an + * expiration action, the object expiration overrides the life span that you + * specify in a restore request. For example, if you restore an object copy for 10 + * days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the + * object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management] + * in Amazon S3 User Guide. + * + * Responses A successful action returns either the 200 OK or 202 Accepted status + * code. + * + * ``` + * - If the object is not previously restored, then Amazon S3 returns 202 + * Accepted in the response. + * + * - If the object is previously restored, Amazon S3 returns 200 OK in the + * response. + * + * - Special errors: + * + * - Code: RestoreAlreadyInProgress + * + * - Cause: Object restore is already in progress. + * + * - HTTP Status Code: 409 Conflict + * + * - SOAP Fault Code Prefix: Client + * + * - Code: GlacierExpeditedRetrievalNotAvailable + * + * - Cause: expedited retrievals are currently not available. Try again later. + * (Returned if there is insufficient capacity to process the Expedited request. + * This error applies only to Expedited retrievals and not to S3 Standard or Bulk + * retrievals.) + * + * - HTTP Status Code: 503 + * + * - SOAP Fault Code Prefix: N/A + * ``` + * + * The following operations are related to RestoreObject : + * + * [PutBucketLifecycleConfiguration] + * + * [GetBucketNotificationConfiguration] + * + * [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + * [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + * [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + * [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + * [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + * [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + * [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + * [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html + * [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html + * [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + * [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html + */ + restoreObject(ctx: context.Context, params: RestoreObjectInput, ...optFns: ((_arg0: Options) => void)[]): (RestoreObjectOutput) + } + // @ts-ignore + import smithysync = sync + interface Client { + /** + * This operation is not supported by directory buckets. + * + * The SelectObjectContent operation is no longer available to new customers. + * Existing customers of Amazon S3 Select can continue to use the operation as + * usual. [Learn more] + * + * This action filters the contents of an Amazon S3 object based on a simple + * structured query language (SQL) statement. In the request, along with the SQL + * expression, you must also specify a data serialization format (JSON, CSV, or + * Apache Parquet) of the object. Amazon S3 uses this format to parse object data + * into records, and returns only records that match the specified SQL expression. + * You must also specify the data serialization format for the response. + * + * This functionality is not supported for Amazon S3 on Outposts. + * + * For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User + * Guide. + * + * Permissions You must have the s3:GetObject permission for this operation. + * Amazon S3 Select does not support anonymous access. For more information about + * permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. + * + * Object Data Formats You can use Amazon S3 Select to query objects that have the + * following format properties: + * + * ``` + * - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. + * + * - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. + * + * - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. + * GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports + * for CSV and JSON files. Amazon S3 Select supports columnar compression for + * Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object + * compression for Parquet objects. + * + * - Server-side encryption - Amazon S3 Select supports querying objects that + * are protected with server-side encryption. + * ``` + * + * For objects that are encrypted with customer-provided encryption keys (SSE-C), + * + * ``` + * you must use HTTPS, and you must use the headers that are documented in the [GetObject]. + * For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide. + * ``` + * + * For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon + * + * ``` + * Web Services KMS keys (SSE-KMS), server-side encryption is handled + * transparently, so you don't need to specify anything. For more information about + * server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3 + * User Guide. + * ``` + * + * Working with the Response Body Given the response size is unknown, Amazon S3 + * Select streams the response as a series of messages and includes a + * Transfer-Encoding header with chunked as its value in the response. For more + * information, see [Appendix: SelectObjectContent Response]. + * + * GetObject Support The SelectObjectContent action does not support the following + * GetObject functionality. For more information, see [GetObject]. + * + * ``` + * - Range : Although you can specify a scan range for an Amazon S3 Select + * request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of + * bytes of an object to return. + * + * - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the + * ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING + * storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or + * REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or + * DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For + * more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide. + * ``` + * + * Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes] + * + * The following operations are related to SelectObjectContent : + * + * [GetObject] + * + * [GetBucketLifecycleConfiguration] + * + * [PutBucketLifecycleConfiguration] + * + * [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + * [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html + * [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + * [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList + * [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + * [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html + * [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html + * [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + * [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html + * [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * + * [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + * [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + */ + selectObjectContent(ctx: context.Context, params: SelectObjectContentInput, ...optFns: ((_arg0: Options) => void)[]): (SelectObjectContentOutput) + } + interface Client { + /** + * Uploads a part in a multipart upload. + * + * In this operation, you provide new data as a part of an object in your request. + * However, you have an option to specify your existing Amazon S3 object as a data + * source for the part you are uploading. To upload a part from an existing object, + * you use the [UploadPartCopy]operation. + * + * You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In + * response to your initiate request, Amazon S3 returns an upload ID, a unique + * identifier that you must include in your upload part request. + * + * Part numbers can be any number from 1 to 10,000, inclusive. A part number + * uniquely identifies a part and also defines its position within the object being + * created. If you upload a new part using the same part number that was used with + * a previous part, the previously uploaded part is overwritten. + * + * For information about maximum and minimum part sizes and other multipart upload + * specifications, see [Multipart upload limits]in the Amazon S3 User Guide. + * + * After you initiate multipart upload and upload one or more parts, you must + * either complete or abort multipart upload in order to stop getting charged for + * storage of the uploaded parts. Only after you either complete or abort multipart + * upload, Amazon S3 frees up the parts storage and stops charging you for the + * parts storage. + * + * For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide . + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Zonal endpoint. These endpoints support virtual-hosted-style + * requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style + * requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User + * Guide. + * + * Permissions + * ``` + * - General purpose bucket permissions - To perform a multipart upload with + * encryption using an Key Management Service key, the requester must have + * permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The + * requester must also have permissions for the kms:GenerateDataKey action for + * the CreateMultipartUpload API. Then, the requester needs permissions for the + * kms:Decrypt action on the UploadPart and UploadPartCopy APIs. + * ``` + * + * These permissions are required because Amazon S3 must decrypt and read data + * + * ``` + * from the encrypted file parts before it completes the multipart upload. For more + * information about KMS permissions, see [Protecting data using server-side encryption with KMS]in the Amazon S3 User Guide. For + * information about the permissions required to use the multipart upload API, see [Multipart upload and permissions] + * and [Multipart upload API and permissions]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - To grant access to this API operation on a + * directory bucket, we recommend that you use the [CreateSession]CreateSession API operation + * for session-based authorization. Specifically, you grant the + * s3express:CreateSession permission to the directory bucket in a bucket policy + * or an IAM identity-based policy. Then, you make the CreateSession API call on + * the bucket to obtain a session token. With the session token in your request + * header, you can make API requests to this operation. After the session token + * expires, you make another CreateSession API call to generate a new session + * token for use. Amazon Web Services CLI or SDKs create session and refresh the + * session token automatically to avoid service interruptions when a session + * expires. For more information about authorization, see [CreateSession]CreateSession . + * ``` + * + * If the object is encrypted with SSE-KMS, you must also have the + * + * ``` + * kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies + * and KMS key policies for the KMS key. + * ``` + * + * Data integrity General purpose bucket - To ensure that data is not corrupted + * traversing the network, specify the Content-MD5 header in the upload part + * request. Amazon S3 checks the part data against the provided MD5 value. If they + * do not match, Amazon S3 returns an error. If the upload request is signed with + * Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 + * header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]. + * + * Directory buckets - MD5 is not supported by directory buckets. You can use + * checksum algorithms to check object integrity. + * + * Encryption + * ``` + * - General purpose bucket - Server-side encryption is for data encryption at + * rest. Amazon S3 encrypts your data as it writes it to disks in its data centers + * and decrypts it when you access it. You have mutually exclusive options to + * protect data using server-side encryption in Amazon S3, depending on how you + * choose to manage the encryption keys. Specifically, the encryption key options + * are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and + * Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side + * encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally + * tell Amazon S3 to encrypt data at rest using server-side encryption with other + * key options. The option you use depends on whether you want to use KMS keys + * (SSE-KMS) or provide your own encryption key (SSE-C). + * ``` + * + * Server-side encryption is supported by the S3 Multipart Upload operations. + * + * ``` + * Unless you are using a customer-provided encryption key (SSE-C), you don't need + * to specify the encryption parameters in each UploadPart request. Instead, you + * only need to specify the server-side encryption parameters in the initial + * Initiate Multipart request. For more information, see [CreateMultipartUpload]. + * ``` + * + * If you request server-side encryption using a customer-provided encryption key + * + * ``` + * (SSE-C) in your initiate multipart upload request, you must provide identical + * encryption information in each part upload using the following request headers. + * + * - x-amz-server-side-encryption-customer-algorithm + * + * - x-amz-server-side-encryption-customer-key + * + * - x-amz-server-side-encryption-customer-key-MD5 + * ``` + * + * For more information, see [Using Server-Side Encryption]in the Amazon S3 User Guide. + * + * ``` + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: server-side encryption with Amazon S3 + * managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + * (SSE-KMS) ( aws:kms ). + * ``` + * + * Special errors + * + * ``` + * - Error Code: NoSuchUpload + * + * - Description: The specified multipart upload does not exist. The upload ID + * might be invalid, or the multipart upload might have been aborted or completed. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to UploadPart : + * + * [CreateMultipartUpload] + * + * [CompleteMultipartUpload] + * + * [AbortMultipartUpload] + * + * [ListParts] + * + * [ListMultipartUploads] + * + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + * [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + * [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html + * [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html + * [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + * + * [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html + * [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + * [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html + * [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions + */ + uploadPart(ctx: context.Context, params: UploadPartInput, ...optFns: ((_arg0: Options) => void)[]): (UploadPartOutput) + } + interface Client { + /** + * Uploads a part by copying data from an existing object as data source. To + * specify the data source, you add the request header x-amz-copy-source in your + * request. To specify a byte range, you add the request header + * x-amz-copy-source-range in your request. + * + * For information about maximum and minimum part sizes and other multipart upload + * specifications, see [Multipart upload limits]in the Amazon S3 User Guide. + * + * Instead of copying data from an existing object as part data, you might use the [UploadPart] + * action to upload new data as a part of an object in your request. + * + * You must initiate a multipart upload before you can upload any part. In + * response to your initiate request, Amazon S3 returns the upload ID, a unique + * identifier that you must include in your upload part request. + * + * For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User + * Guide. For information about copying objects using a single atomic action vs. a + * multipart upload, see [Operations on Objects]in the Amazon S3 User Guide. + * + * Directory buckets - For directory buckets, you must make requests for this API + * operation to the Zonal endpoint. These endpoints support virtual-hosted-style + * requests in the format + * https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style + * requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User + * Guide. + * + * Authentication and authorization All UploadPartCopy requests must be + * authenticated and signed by using IAM credentials (access key ID and secret + * access key for the IAM identities). All headers with the x-amz- prefix, + * including x-amz-copy-source , must be signed. For more information, see [REST Authentication]. + * + * Directory buckets - You must use IAM credentials to authenticate and authorize + * your access to the UploadPartCopy API operation, instead of using the temporary + * security credentials through the CreateSession API operation. + * + * Amazon Web Services CLI or SDKs handles authentication and authorization on + * your behalf. + * + * Permissions You must have READ access to the source object and WRITE access to + * the destination bucket. + * + * ``` + * - General purpose bucket permissions - You must have the permissions in a + * policy based on the bucket types of your source bucket and destination bucket in + * an UploadPartCopy operation. + * + * - If the source object is in a general purpose bucket, you must have the + * s3:GetObject permission to read the source object that is being copied. + * + * - If the destination bucket is a general purpose bucket, you must have the + * s3:PutObject permission to write the object copy to the destination bucket. + * + * - To perform a multipart upload with encryption using an Key Management + * Service key, the requester must have permission to the kms:Decrypt and + * kms:GenerateDataKey actions on the key. The requester must also have + * permissions for the kms:GenerateDataKey action for the CreateMultipartUpload + * API. Then, the requester needs permissions for the kms:Decrypt action on the + * UploadPart and UploadPartCopy APIs. These permissions are required because + * Amazon S3 must decrypt and read data from the encrypted file parts before it + * completes the multipart upload. For more information about KMS permissions, see [Protecting data using server-side encryption with KMS] + * in the Amazon S3 User Guide. For information about the permissions required to + * use the multipart upload API, see [Multipart upload and permissions]and [Multipart upload API and permissions]in the Amazon S3 User Guide. + * + * - Directory bucket permissions - You must have permissions in a bucket policy + * or an IAM identity-based policy based on the source and destination bucket types + * in an UploadPartCopy operation. + * + * - If the source object that you want to copy is in a directory bucket, you + * must have the s3express:CreateSession permission in the Action element of a + * policy to read the object. By default, the session is in the ReadWrite mode. + * If you want to restrict the access, you can explicitly set the + * s3express:SessionMode condition key to ReadOnly on the copy source bucket. + * + * - If the copy destination is a directory bucket, you must have the + * s3express:CreateSession permission in the Action element of a policy to write + * the object to the destination. The s3express:SessionMode condition key cannot + * be set to ReadOnly on the copy destination. + * ``` + * + * If the object is encrypted with SSE-KMS, you must also have the + * + * ``` + * kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies + * and KMS key policies for the KMS key. + * ``` + * + * For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. + * + * Encryption + * ``` + * - General purpose buckets - + * ``` + * + * For information about using server-side encryption with customer-provided + * + * ``` + * encryption keys with the UploadPartCopy operation, see [CopyObject]and [UploadPart]. + * + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: server-side encryption with Amazon S3 + * managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + * (SSE-KMS) ( aws:kms ). For more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. + * ``` + * + * For directory buckets, when you perform a CreateMultipartUpload operation and an + * + * ``` + * UploadPartCopy operation, + * ``` + * + * the request headers you provide in the CreateMultipartUpload request must match + * + * ``` + * the default encryption configuration of the destination bucket. + * ``` + * + * S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from + * + * ``` + * general purpose buckets + * ``` + * + * to directory buckets, from directory buckets to general purpose buckets, or + * + * ``` + * between directory buckets, through [UploadPartCopy]. In this case, Amazon S3 makes a call to + * KMS every time a copy request is made for a KMS-encrypted object. + * ``` + * + * Special errors + * + * ``` + * - Error Code: NoSuchUpload + * + * - Description: The specified multipart upload does not exist. The upload ID + * might be invalid, or the multipart upload might have been aborted or completed. + * + * - HTTP Status Code: 404 Not Found + * + * - Error Code: InvalidRequest + * + * - Description: The specified copy source is not supported as a byte-range + * copy source. + * + * - HTTP Status Code: 400 Bad Request + * ``` + * + * HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + * Bucket_name.s3express-az_id.region.amazonaws.com . + * + * The following operations are related to UploadPartCopy : + * + * [CreateMultipartUpload] + * + * [UploadPart] + * + * [CompleteMultipartUpload] + * + * [AbortMultipartUpload] + * + * [ListParts] + * + * [ListMultipartUploads] + * + * [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + * [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html + * [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html + * [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + * [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions + * [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + * [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html + * [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html + * [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + * [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html + * [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html + * [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + * + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + */ + uploadPartCopy(ctx: context.Context, params: UploadPartCopyInput, ...optFns: ((_arg0: Options) => void)[]): (UploadPartCopyOutput) + } + interface Client { + /** + * This operation is not supported by directory buckets. + * + * Passes transformed objects to a GetObject operation when using Object Lambda + * access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the + * Amazon S3 User Guide. + * + * This operation supports metadata that can be returned by [GetObject], in addition to + * RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The + * GetObject response metadata is supported so that the WriteGetObjectResponse + * caller, typically an Lambda function, can provide the same metadata when it + * internally invokes GetObject . When WriteGetObjectResponse is called by a + * customer-owned Lambda function, the metadata returned to the end user GetObject + * call might differ from what Amazon S3 would normally return. + * + * You can include any number of metadata headers. When including a metadata + * header, it should be prefaced with x-amz-meta . For example, + * x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to + * forward GetObject metadata. + * + * Amazon Web Services provides some prebuilt Lambda functions that you can use + * with S3 Object Lambda to detect and redact personally identifiable information + * (PII) and decompress S3 objects. These Lambda functions are available in the + * Amazon Web Services Serverless Application Repository, and can be selected + * through the Amazon Web Services Management Console when you create your Object + * Lambda access point. + * + * Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a + * natural language processing (NLP) service using machine learning to find + * insights and relationships in text. It automatically detects personally + * identifiable information (PII) such as names, addresses, dates, credit card + * numbers, and social security numbers from documents in your Amazon S3 bucket. + * + * Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a + * natural language processing (NLP) service using machine learning to find + * insights and relationships in text. It automatically redacts personally + * identifiable information (PII) such as names, addresses, dates, credit card + * numbers, and social security numbers from documents in your Amazon S3 bucket. + * + * Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is + * equipped to decompress objects stored in S3 in one of six compressed file + * formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. + * + * For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3 + * User Guide. + * + * [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html + * [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html + * [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + */ + writeGetObjectResponse(ctx: context.Context, params: WriteGetObjectResponseInput, ...optFns: ((_arg0: Options) => void)[]): (WriteGetObjectResponseOutput) + } + // @ts-ignore + import internalcontext = context + // @ts-ignore + import awsxml = xml + // @ts-ignore + import internalendpoints = endpoints + // @ts-ignore + import smithyendpoints = endpoints + // @ts-ignore + import v4 = signer +} + /** * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html * @@ -10213,6 +16906,18 @@ namespace jwt { } } +/** + * Package gcerrors provides support for getting error codes from + * errors returned by Go CDK APIs. + */ +namespace gcerrors { + /** + * An ErrorCode describes the error's category. Programs should act upon an error's + * code, not its message. + */ + interface ErrorCode extends gcerr.ErrorCode{} +} + /** * Package blob provides an easy and portable way to interact with blobs * within a storage location. Subpackages contain driver implementations of @@ -10442,6 +17147,250 @@ namespace blob { as(i: { }): boolean } + /** + * Bucket provides an easy and portable way to interact with blobs + * within a "bucket", including read, write, and list operations. + * To create a Bucket, use constructors found in driver subpackages. + */ + interface Bucket { + } + interface Bucket { + /** + * As converts i to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information, the "As" + * examples in this package for examples, and the driver package + * documentation for the specific types supported for that driver. + */ + as(i: { + }): boolean + } + interface Bucket { + /** + * ErrorAs converts err to driver-specific types. + * ErrorAs panics if i is nil or not a pointer. + * ErrorAs returns false if err == nil. + * See https://gocloud.dev/concepts/as/ for background information. + */ + errorAs(err: Error, i: { + }): boolean + } + interface Bucket { + /** + * ReadAll is a shortcut for creating a Reader via NewReader with nil + * ReaderOptions, and reading the entire blob. + * + * Using Download may be more efficient. + */ + readAll(ctx: context.Context, key: string): string|Array + } + interface Bucket { + /** + * Download writes the content of a blob into an io.Writer w. + */ + download(ctx: context.Context, key: string, w: io.Writer, opts: ReaderOptions): void + } + interface Bucket { + /** + * List returns a ListIterator that can be used to iterate over blobs in a + * bucket, in lexicographical order of UTF-8 encoded keys. The underlying + * implementation fetches results in pages. + * + * A nil ListOptions is treated the same as the zero value. + * + * List is not guaranteed to include all recently-written blobs; + * some services are only eventually consistent. + */ + list(opts: ListOptions): (ListIterator) + } + interface Bucket { + /** + * ListPage returns a page of ListObject results for blobs in a bucket, in lexicographical + * order of UTF-8 encoded keys. + * + * To fetch the first page, pass FirstPageToken as the pageToken. For subsequent pages, pass + * the pageToken returned from a previous call to ListPage. + * It is not possible to "skip ahead" pages. + * + * Each call will return pageSize results, unless there are not enough blobs to fill the + * page, in which case it will return fewer results (possibly 0). + * + * If there are no more blobs available, ListPage will return an empty pageToken. Note that + * this may happen regardless of the number of returned results -- the last page might have + * 0 results (i.e., if the last item was deleted), pageSize results, or anything in between. + * + * Calling ListPage with an empty pageToken will immediately return io.EOF. When looping + * over pages, callers can either check for an empty pageToken, or they can make one more + * call and check for io.EOF. + * + * The underlying implementation fetches results in pages, but one call to ListPage may + * require multiple page fetches (and therefore, multiple calls to the BeforeList callback). + * + * A nil ListOptions is treated the same as the zero value. + * + * ListPage is not guaranteed to include all recently-written blobs; + * some services are only eventually consistent. + */ + listPage(ctx: context.Context, pageToken: string|Array, pageSize: number, opts: ListOptions): [Array<(ListObject | undefined)>, string|Array] + } + interface Bucket { + /** + * IsAccessible returns true if the bucket is accessible, false otherwise. + * It is a shortcut for calling ListPage and checking if it returns an error + * with code gcerrors.NotFound. + */ + isAccessible(ctx: context.Context): boolean + } + interface Bucket { + /** + * Exists returns true if a blob exists at key, false if it does not exist, or + * an error. + * It is a shortcut for calling Attributes and checking if it returns an error + * with code gcerrors.NotFound. + */ + exists(ctx: context.Context, key: string): boolean + } + interface Bucket { + /** + * Attributes returns attributes for the blob stored at key. + * + * If the blob does not exist, Attributes returns an error for which + * gcerrors.Code will return gcerrors.NotFound. + */ + attributes(ctx: context.Context, key: string): (Attributes) + } + interface Bucket { + /** + * NewReader is a shortcut for NewRangeReader with offset=0 and length=-1. + */ + newReader(ctx: context.Context, key: string, opts: ReaderOptions): (Reader) + } + interface Bucket { + /** + * NewRangeReader returns a Reader to read content from the blob stored at key. + * It reads at most length bytes starting at offset (>= 0). + * If length is negative, it will read till the end of the blob. + * + * For the purposes of Seek, the returned Reader will start at offset and + * end at the minimum of the actual end of the blob or (if length > 0) offset + length. + * + * Note that ctx is used for all reads performed during the lifetime of the reader. + * + * If the blob does not exist, NewRangeReader returns an error for which + * gcerrors.Code will return gcerrors.NotFound. Exists is a lighter-weight way + * to check for existence. + * + * A nil ReaderOptions is treated the same as the zero value. + * + * The caller must call Close on the returned Reader when done reading. + */ + newRangeReader(ctx: context.Context, key: string, offset: number, length: number, opts: ReaderOptions): (Reader) + } + interface Bucket { + /** + * WriteAll is a shortcut for creating a Writer via NewWriter and writing p. + * + * If opts.ContentMD5 is not set, WriteAll will compute the MD5 of p and use it + * as the ContentMD5 option for the Writer it creates. + * + * Using Upload may be more efficient. + */ + writeAll(ctx: context.Context, key: string, p: string|Array, opts: WriterOptions): void + } + interface Bucket { + /** + * Upload reads from an io.Reader r and writes into a blob. + * + * opts.ContentType is required. + */ + upload(ctx: context.Context, key: string, r: io.Reader, opts: WriterOptions): void + } + interface Bucket { + /** + * NewWriter returns a Writer that writes to the blob stored at key. + * A nil WriterOptions is treated the same as the zero value. + * + * If a blob with this key already exists, it will be replaced. + * The blob being written is not guaranteed to be readable until Close + * has been called; until then, any previous blob will still be readable. + * Even after Close is called, newly written blobs are not guaranteed to be + * returned from List; some services are only eventually consistent. + * + * The returned Writer will store ctx for later use in Write and/or Close. + * To abort a write, cancel ctx; otherwise, it must remain open until + * Close is called. + * + * The caller must call Close on the returned Writer, even if the write is + * aborted. + */ + newWriter(ctx: context.Context, key: string, opts: WriterOptions): (Writer) + } + interface Bucket { + /** + * Copy the blob stored at srcKey to dstKey. + * A nil CopyOptions is treated the same as the zero value. + * + * If the source blob does not exist, Copy returns an error for which + * gcerrors.Code will return gcerrors.NotFound. + * + * If the destination blob already exists, it is overwritten. + */ + copy(ctx: context.Context, dstKey: string, srcKey: string, opts: CopyOptions): void + } + interface Bucket { + /** + * Delete deletes the blob stored at key. + * + * If the blob does not exist, Delete returns an error for which + * gcerrors.Code will return gcerrors.NotFound. + */ + delete(ctx: context.Context, key: string): void + } + interface Bucket { + /** + * SignedURL returns a URL that can be used to GET (default), PUT or DELETE + * the blob for the duration specified in opts.Expiry. + * + * A nil SignedURLOptions is treated the same as the zero value. + * + * It is valid to call SignedURL for a key that does not exist. + * + * If the driver does not support this functionality, SignedURL + * will return an error for which gcerrors.Code will return gcerrors.Unimplemented. + */ + signedURL(ctx: context.Context, key: string, opts: SignedURLOptions): string + } + interface Bucket { + /** + * Close releases any resources used for the bucket. + */ + close(): void + } + interface Bucket { + /** + * SetIOFSCallback sets a callback that is used during Open and calls on the objects + * returned from Open. + * + * fn should return a context.Context and *ReaderOptions that can be used in + * calls to List and NewReader on b. It may be called more than once. + */ + setIOFSCallback(fn: () => [context.Context, (ReaderOptions)]): void + } + interface Bucket { + /** + * Open implements fs.FS.Open (https://pkg.go.dev/io/fs#FS). + * + * SetIOFSCallback must be called prior to calling this function. + */ + open(path: string): fs.File + } + interface Bucket { + /** + * Sub implements fs.SubFS.Sub. + * + * SetIOFSCallback must be called prior to calling this function. + */ + sub(dir: string): fs.FS + } } /** @@ -10625,8 +17574,8 @@ namespace schema { * Package models implements all PocketBase DB models and DTOs. */ namespace models { - type _subxyKhr = BaseModel - interface Admin extends _subxyKhr { + type _subTPaAx = BaseModel + interface Admin extends _subTPaAx { avatar: number email: string tokenKey: string @@ -10661,8 +17610,8 @@ namespace models { } // @ts-ignore import validation = ozzo_validation - type _subUyFbk = BaseModel - interface Collection extends _subUyFbk { + type _subxVwLG = BaseModel + interface Collection extends _subxVwLG { name: string type: string system: boolean @@ -10755,8 +17704,8 @@ namespace models { */ setOptions(typedOptions: any): void } - type _subeDKbD = BaseModel - interface ExternalAuth extends _subeDKbD { + type _subnirpX = BaseModel + interface ExternalAuth extends _subnirpX { collectionId: string recordId: string provider: string @@ -10765,8 +17714,8 @@ namespace models { interface ExternalAuth { tableName(): string } - type _subkdarp = BaseModel - interface Record extends _subkdarp { + type _subbYdTV = BaseModel + interface Record extends _subbYdTV { } interface Record { /** @@ -11120,6 +18069,20 @@ namespace models { */ setLastVerificationSentAt(dateTime: types.DateTime): void } + interface Record { + /** + * LastLoginAlertSentAt returns the "lastLoginAlertSentAt" auth record data value. + */ + lastLoginAlertSentAt(): types.DateTime + } + interface Record { + /** + * SetLastLoginAlertSentAt sets an "lastLoginAlertSentAt" auth record data value. + * + * Returns an error if the record is not from an auth collection. + */ + setLastLoginAlertSentAt(dateTime: types.DateTime): void + } interface Record { /** * PasswordHash returns the "passwordHash" auth record data value. @@ -11741,1071 +18704,6 @@ namespace echo { } } -/** - * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. - * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. - */ -namespace cobra { - interface Command { - /** - * GenBashCompletion generates bash completion file and writes to the passed writer. - */ - genBashCompletion(w: io.Writer): void - } - interface Command { - /** - * GenBashCompletionFile generates bash completion file. - */ - genBashCompletionFile(filename: string): void - } - interface Command { - /** - * GenBashCompletionFileV2 generates Bash completion version 2. - */ - genBashCompletionFileV2(filename: string, includeDesc: boolean): void - } - interface Command { - /** - * GenBashCompletionV2 generates Bash completion file version 2 - * and writes it to the passed writer. - */ - genBashCompletionV2(w: io.Writer, includeDesc: boolean): void - } - // @ts-ignore - import flag = pflag - /** - * Command is just that, a command for your application. - * E.g. 'go run ...' - 'run' is the command. Cobra requires - * you to define the usage and description as part of your command - * definition to ensure usability. - */ - interface Command { - /** - * Use is the one-line usage message. - * Recommended syntax is as follows: - * ``` - * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. - * ... indicates that you can specify multiple values for the previous argument. - * | indicates mutually exclusive information. You can use the argument to the left of the separator or the - * argument to the right of the separator. You cannot use both arguments in a single use of the command. - * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are - * optional, they are enclosed in brackets ([ ]). - * ``` - * Example: add [-F file | -D dir]... [-f format] profile - */ - use: string - /** - * Aliases is an array of aliases that can be used instead of the first word in Use. - */ - aliases: Array - /** - * SuggestFor is an array of command names for which this command will be suggested - - * similar to aliases but only suggests. - */ - suggestFor: Array - /** - * Short is the short description shown in the 'help' output. - */ - short: string - /** - * The group id under which this subcommand is grouped in the 'help' output of its parent. - */ - groupID: string - /** - * Long is the long message shown in the 'help ' output. - */ - long: string - /** - * Example is examples of how to use the command. - */ - example: string - /** - * ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - */ - validArgs: Array - /** - * ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. - * It is a dynamic version of using ValidArgs. - * Only one of ValidArgs and ValidArgsFunction can be used for a command. - */ - validArgsFunction: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective] - /** - * Expected arguments - */ - args: PositionalArgs - /** - * ArgAliases is List of aliases for ValidArgs. - * These are not suggested to the user in the shell completion, - * but accepted if entered manually. - */ - argAliases: Array - /** - * BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. - * For portability with other shells, it is recommended to instead use ValidArgsFunction - */ - bashCompletionFunction: string - /** - * Deprecated defines, if this command is deprecated and should print this string when used. - */ - deprecated: string - /** - * Annotations are key/value pairs that can be used by applications to identify or - * group commands or set special options. - */ - annotations: _TygojaDict - /** - * Version defines the version for this command. If this value is non-empty and the command does not - * define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - * will print content of the "Version" variable. A shorthand "v" flag will also be added if the - * command does not define one. - */ - version: string - /** - * The *Run functions are executed in the following order: - * ``` - * * PersistentPreRun() - * * PreRun() - * * Run() - * * PostRun() - * * PersistentPostRun() - * ``` - * All functions get the same args, the arguments after the command name. - * The *PreRun and *PostRun functions will only be executed if the Run function of the current - * command has been declared. - * - * PersistentPreRun: children of this command will inherit and execute. - */ - persistentPreRun: (cmd: Command, args: Array) => void - /** - * PersistentPreRunE: PersistentPreRun but returns an error. - */ - persistentPreRunE: (cmd: Command, args: Array) => void - /** - * PreRun: children of this command will not inherit. - */ - preRun: (cmd: Command, args: Array) => void - /** - * PreRunE: PreRun but returns an error. - */ - preRunE: (cmd: Command, args: Array) => void - /** - * Run: Typically the actual work function. Most commands will only implement this. - */ - run: (cmd: Command, args: Array) => void - /** - * RunE: Run but returns an error. - */ - runE: (cmd: Command, args: Array) => void - /** - * PostRun: run after the Run command. - */ - postRun: (cmd: Command, args: Array) => void - /** - * PostRunE: PostRun but returns an error. - */ - postRunE: (cmd: Command, args: Array) => void - /** - * PersistentPostRun: children of this command will inherit and execute after PostRun. - */ - persistentPostRun: (cmd: Command, args: Array) => void - /** - * PersistentPostRunE: PersistentPostRun but returns an error. - */ - persistentPostRunE: (cmd: Command, args: Array) => void - /** - * FParseErrWhitelist flag parse errors to be ignored - */ - fParseErrWhitelist: FParseErrWhitelist - /** - * CompletionOptions is a set of options to control the handling of shell completion - */ - completionOptions: CompletionOptions - /** - * TraverseChildren parses flags on all parents before executing child command. - */ - traverseChildren: boolean - /** - * Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - */ - hidden: boolean - /** - * SilenceErrors is an option to quiet errors down stream. - */ - silenceErrors: boolean - /** - * SilenceUsage is an option to silence usage when an error occurs. - */ - silenceUsage: boolean - /** - * DisableFlagParsing disables the flag parsing. - * If this is true all flags will be passed to the command as arguments. - */ - disableFlagParsing: boolean - /** - * DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - * will be printed by generating docs for this command. - */ - disableAutoGenTag: boolean - /** - * DisableFlagsInUseLine will disable the addition of [flags] to the usage - * line of a command when printing help or generating docs - */ - disableFlagsInUseLine: boolean - /** - * DisableSuggestions disables the suggestions based on Levenshtein distance - * that go along with 'unknown command' messages. - */ - disableSuggestions: boolean - /** - * SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - * Must be > 0. - */ - suggestionsMinimumDistance: number - } - interface Command { - /** - * Context returns underlying command context. If command was executed - * with ExecuteContext or the context was set with SetContext, the - * previously set context will be returned. Otherwise, nil is returned. - * - * Notice that a call to Execute and ExecuteC will replace a nil context of - * a command with a context.Background, so a background context will be - * returned by Context after one of these functions has been called. - */ - context(): context.Context - } - interface Command { - /** - * SetContext sets context for the command. This context will be overwritten by - * Command.ExecuteContext or Command.ExecuteContextC. - */ - setContext(ctx: context.Context): void - } - interface Command { - /** - * SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden - * particularly useful when testing. - */ - setArgs(a: Array): void - } - interface Command { - /** - * SetOutput sets the destination for usage and error messages. - * If output is nil, os.Stderr is used. - * Deprecated: Use SetOut and/or SetErr instead - */ - setOutput(output: io.Writer): void - } - interface Command { - /** - * SetOut sets the destination for usage messages. - * If newOut is nil, os.Stdout is used. - */ - setOut(newOut: io.Writer): void - } - interface Command { - /** - * SetErr sets the destination for error messages. - * If newErr is nil, os.Stderr is used. - */ - setErr(newErr: io.Writer): void - } - interface Command { - /** - * SetIn sets the source for input data - * If newIn is nil, os.Stdin is used. - */ - setIn(newIn: io.Reader): void - } - interface Command { - /** - * SetUsageFunc sets usage function. Usage can be defined by application. - */ - setUsageFunc(f: (_arg0: Command) => void): void - } - interface Command { - /** - * SetUsageTemplate sets usage template. Can be defined by Application. - */ - setUsageTemplate(s: string): void - } - interface Command { - /** - * SetFlagErrorFunc sets a function to generate an error when flag parsing - * fails. - */ - setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void - } - interface Command { - /** - * SetHelpFunc sets help function. Can be defined by Application. - */ - setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void - } - interface Command { - /** - * SetHelpCommand sets help command. - */ - setHelpCommand(cmd: Command): void - } - interface Command { - /** - * SetHelpCommandGroupID sets the group id of the help command. - */ - setHelpCommandGroupID(groupID: string): void - } - interface Command { - /** - * SetCompletionCommandGroupID sets the group id of the completion command. - */ - setCompletionCommandGroupID(groupID: string): void - } - interface Command { - /** - * SetHelpTemplate sets help template to be used. Application can use it to set custom template. - */ - setHelpTemplate(s: string): void - } - interface Command { - /** - * SetVersionTemplate sets version template to be used. Application can use it to set custom template. - */ - setVersionTemplate(s: string): void - } - interface Command { - /** - * SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. - */ - setErrPrefix(s: string): void - } - interface Command { - /** - * SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. - * The user should not have a cyclic dependency on commands. - */ - setGlobalNormalizationFunc(n: (f: any, name: string) => any): void - } - interface Command { - /** - * OutOrStdout returns output to stdout. - */ - outOrStdout(): io.Writer - } - interface Command { - /** - * OutOrStderr returns output to stderr - */ - outOrStderr(): io.Writer - } - interface Command { - /** - * ErrOrStderr returns output to stderr - */ - errOrStderr(): io.Writer - } - interface Command { - /** - * InOrStdin returns input to stdin - */ - inOrStdin(): io.Reader - } - interface Command { - /** - * UsageFunc returns either the function set by SetUsageFunc for this command - * or a parent, or it returns a default usage function. - */ - usageFunc(): (_arg0: Command) => void - } - interface Command { - /** - * Usage puts out the usage for the command. - * Used when a user provides invalid input. - * Can be defined by user by overriding UsageFunc. - */ - usage(): void - } - interface Command { - /** - * HelpFunc returns either the function set by SetHelpFunc for this command - * or a parent, or it returns a function with default help behavior. - */ - helpFunc(): (_arg0: Command, _arg1: Array) => void - } - interface Command { - /** - * Help puts out the help for the command. - * Used when a user calls help [command]. - * Can be defined by user by overriding HelpFunc. - */ - help(): void - } - interface Command { - /** - * UsageString returns usage string. - */ - usageString(): string - } - interface Command { - /** - * FlagErrorFunc returns either the function set by SetFlagErrorFunc for this - * command or a parent, or it returns a function which returns the original - * error. - */ - flagErrorFunc(): (_arg0: Command, _arg1: Error) => void - } - interface Command { - /** - * UsagePadding return padding for the usage. - */ - usagePadding(): number - } - interface Command { - /** - * CommandPathPadding return padding for the command path. - */ - commandPathPadding(): number - } - interface Command { - /** - * NamePadding returns padding for the name. - */ - namePadding(): number - } - interface Command { - /** - * UsageTemplate returns usage template for the command. - */ - usageTemplate(): string - } - interface Command { - /** - * HelpTemplate return help template for the command. - */ - helpTemplate(): string - } - interface Command { - /** - * VersionTemplate return version template for the command. - */ - versionTemplate(): string - } - interface Command { - /** - * ErrPrefix return error message prefix for the command - */ - errPrefix(): string - } - interface Command { - /** - * Find the target command given the args and command tree - * Meant to be run on the highest node. Only searches down. - */ - find(args: Array): [(Command), Array] - } - interface Command { - /** - * Traverse the command tree to find the command, and parse args for - * each parent. - */ - traverse(args: Array): [(Command), Array] - } - interface Command { - /** - * SuggestionsFor provides suggestions for the typedName. - */ - suggestionsFor(typedName: string): Array - } - interface Command { - /** - * VisitParents visits all parents of the command and invokes fn on each parent. - */ - visitParents(fn: (_arg0: Command) => void): void - } - interface Command { - /** - * Root finds root command. - */ - root(): (Command) - } - interface Command { - /** - * ArgsLenAtDash will return the length of c.Flags().Args at the moment - * when a -- was found during args parsing. - */ - argsLenAtDash(): number - } - interface Command { - /** - * ExecuteContext is the same as Execute(), but sets the ctx on the command. - * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs - * functions. - */ - executeContext(ctx: context.Context): void - } - interface Command { - /** - * Execute uses the args (os.Args[1:] by default) - * and run through the command tree finding appropriate matches - * for commands and then corresponding flags. - */ - execute(): void - } - interface Command { - /** - * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. - * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs - * functions. - */ - executeContextC(ctx: context.Context): (Command) - } - interface Command { - /** - * ExecuteC executes the command. - */ - executeC(): (Command) - } - interface Command { - validateArgs(args: Array): void - } - interface Command { - /** - * ValidateRequiredFlags validates all required flags are present and returns an error otherwise - */ - validateRequiredFlags(): void - } - interface Command { - /** - * InitDefaultHelpFlag adds default help flag to c. - * It is called automatically by executing the c or by calling help and usage. - * If c already has help flag, it will do nothing. - */ - initDefaultHelpFlag(): void - } - interface Command { - /** - * InitDefaultVersionFlag adds default version flag to c. - * It is called automatically by executing the c. - * If c already has a version flag, it will do nothing. - * If c.Version is empty, it will do nothing. - */ - initDefaultVersionFlag(): void - } - interface Command { - /** - * InitDefaultHelpCmd adds default help command to c. - * It is called automatically by executing the c or by calling help and usage. - * If c already has help command or c has no subcommands, it will do nothing. - */ - initDefaultHelpCmd(): void - } - interface Command { - /** - * ResetCommands delete parent, subcommand and help command from c. - */ - resetCommands(): void - } - interface Command { - /** - * Commands returns a sorted slice of child commands. - */ - commands(): Array<(Command | undefined)> - } - interface Command { - /** - * AddCommand adds one or more commands to this parent command. - */ - addCommand(...cmds: (Command | undefined)[]): void - } - interface Command { - /** - * Groups returns a slice of child command groups. - */ - groups(): Array<(Group | undefined)> - } - interface Command { - /** - * AllChildCommandsHaveGroup returns if all subcommands are assigned to a group - */ - allChildCommandsHaveGroup(): boolean - } - interface Command { - /** - * ContainsGroup return if groupID exists in the list of command groups. - */ - containsGroup(groupID: string): boolean - } - interface Command { - /** - * AddGroup adds one or more command groups to this parent command. - */ - addGroup(...groups: (Group | undefined)[]): void - } - interface Command { - /** - * RemoveCommand removes one or more commands from a parent command. - */ - removeCommand(...cmds: (Command | undefined)[]): void - } - interface Command { - /** - * Print is a convenience method to Print to the defined output, fallback to Stderr if not set. - */ - print(...i: { - }[]): void - } - interface Command { - /** - * Println is a convenience method to Println to the defined output, fallback to Stderr if not set. - */ - println(...i: { - }[]): void - } - interface Command { - /** - * Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. - */ - printf(format: string, ...i: { - }[]): void - } - interface Command { - /** - * PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. - */ - printErr(...i: { - }[]): void - } - interface Command { - /** - * PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. - */ - printErrln(...i: { - }[]): void - } - interface Command { - /** - * PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. - */ - printErrf(format: string, ...i: { - }[]): void - } - interface Command { - /** - * CommandPath returns the full path to this command. - */ - commandPath(): string - } - interface Command { - /** - * UseLine puts out the full usage for a given command (including parents). - */ - useLine(): string - } - interface Command { - /** - * DebugFlags used to determine which flags have been assigned to which commands - * and which persist. - * nolint:goconst - */ - debugFlags(): void - } - interface Command { - /** - * Name returns the command's name: the first word in the use line. - */ - name(): string - } - interface Command { - /** - * HasAlias determines if a given string is an alias of the command. - */ - hasAlias(s: string): boolean - } - interface Command { - /** - * CalledAs returns the command name or alias that was used to invoke - * this command or an empty string if the command has not been called. - */ - calledAs(): string - } - interface Command { - /** - * NameAndAliases returns a list of the command name and all aliases - */ - nameAndAliases(): string - } - interface Command { - /** - * HasExample determines if the command has example. - */ - hasExample(): boolean - } - interface Command { - /** - * Runnable determines if the command is itself runnable. - */ - runnable(): boolean - } - interface Command { - /** - * HasSubCommands determines if the command has children commands. - */ - hasSubCommands(): boolean - } - interface Command { - /** - * IsAvailableCommand determines if a command is available as a non-help command - * (this includes all non deprecated/hidden commands). - */ - isAvailableCommand(): boolean - } - interface Command { - /** - * IsAdditionalHelpTopicCommand determines if a command is an additional - * help topic command; additional help topic command is determined by the - * fact that it is NOT runnable/hidden/deprecated, and has no sub commands that - * are runnable/hidden/deprecated. - * Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. - */ - isAdditionalHelpTopicCommand(): boolean - } - interface Command { - /** - * HasHelpSubCommands determines if a command has any available 'help' sub commands - * that need to be shown in the usage/help default template under 'additional help - * topics'. - */ - hasHelpSubCommands(): boolean - } - interface Command { - /** - * HasAvailableSubCommands determines if a command has available sub commands that - * need to be shown in the usage/help default template under 'available commands'. - */ - hasAvailableSubCommands(): boolean - } - interface Command { - /** - * HasParent determines if the command is a child command. - */ - hasParent(): boolean - } - interface Command { - /** - * GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. - */ - globalNormalizationFunc(): (f: any, name: string) => any - } - interface Command { - /** - * Flags returns the complete FlagSet that applies - * to this command (local and persistent declared here and by all parents). - */ - flags(): (any) - } - interface Command { - /** - * LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. - */ - localNonPersistentFlags(): (any) - } - interface Command { - /** - * LocalFlags returns the local FlagSet specifically set in the current command. - */ - localFlags(): (any) - } - interface Command { - /** - * InheritedFlags returns all flags which were inherited from parent commands. - */ - inheritedFlags(): (any) - } - interface Command { - /** - * NonInheritedFlags returns all flags which were not inherited from parent commands. - */ - nonInheritedFlags(): (any) - } - interface Command { - /** - * PersistentFlags returns the persistent FlagSet specifically set in the current command. - */ - persistentFlags(): (any) - } - interface Command { - /** - * ResetFlags deletes all flags from command. - */ - resetFlags(): void - } - interface Command { - /** - * HasFlags checks if the command contains any flags (local plus persistent from the entire structure). - */ - hasFlags(): boolean - } - interface Command { - /** - * HasPersistentFlags checks if the command contains persistent flags. - */ - hasPersistentFlags(): boolean - } - interface Command { - /** - * HasLocalFlags checks if the command has flags specifically declared locally. - */ - hasLocalFlags(): boolean - } - interface Command { - /** - * HasInheritedFlags checks if the command has flags inherited from its parent command. - */ - hasInheritedFlags(): boolean - } - interface Command { - /** - * HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire - * structure) which are not hidden or deprecated. - */ - hasAvailableFlags(): boolean - } - interface Command { - /** - * HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. - */ - hasAvailablePersistentFlags(): boolean - } - interface Command { - /** - * HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden - * or deprecated. - */ - hasAvailableLocalFlags(): boolean - } - interface Command { - /** - * HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are - * not hidden or deprecated. - */ - hasAvailableInheritedFlags(): boolean - } - interface Command { - /** - * Flag climbs up the command tree looking for matching flag. - */ - flag(name: string): (any) - } - interface Command { - /** - * ParseFlags parses persistent flag tree and local flags. - */ - parseFlags(args: Array): void - } - interface Command { - /** - * Parent returns a commands parent command. - */ - parent(): (Command) - } - interface Command { - /** - * RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. - */ - registerFlagCompletionFunc(flagName: string, f: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective]): void - } - interface Command { - /** - * GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. - */ - getFlagCompletionFunc(flagName: string): [(_arg0: Command, _arg1: Array, _arg2: string) => [Array, ShellCompDirective], boolean] - } - interface Command { - /** - * InitDefaultCompletionCmd adds a default 'completion' command to c. - * This function will do nothing if any of the following is true: - * 1- the feature has been explicitly disabled by the program, - * 2- c has no subcommands (to avoid creating one), - * 3- c already has a 'completion' command provided by the program. - */ - initDefaultCompletionCmd(): void - } - interface Command { - /** - * GenFishCompletion generates fish completion file and writes to the passed writer. - */ - genFishCompletion(w: io.Writer, includeDesc: boolean): void - } - interface Command { - /** - * GenFishCompletionFile generates fish completion file. - */ - genFishCompletionFile(filename: string, includeDesc: boolean): void - } - interface Command { - /** - * MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors - * if the command is invoked with a subset (but not all) of the given flags. - */ - markFlagsRequiredTogether(...flagNames: string[]): void - } - interface Command { - /** - * MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors - * if the command is invoked without at least one flag from the given set of flags. - */ - markFlagsOneRequired(...flagNames: string[]): void - } - interface Command { - /** - * MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors - * if the command is invoked with more than one flag from the given set of flags. - */ - markFlagsMutuallyExclusive(...flagNames: string[]): void - } - interface Command { - /** - * ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the - * first error encountered. - */ - validateFlagGroups(): void - } - interface Command { - /** - * GenPowerShellCompletionFile generates powershell completion file without descriptions. - */ - genPowerShellCompletionFile(filename: string): void - } - interface Command { - /** - * GenPowerShellCompletion generates powershell completion file without descriptions - * and writes it to the passed writer. - */ - genPowerShellCompletion(w: io.Writer): void - } - interface Command { - /** - * GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. - */ - genPowerShellCompletionFileWithDesc(filename: string): void - } - interface Command { - /** - * GenPowerShellCompletionWithDesc generates powershell completion file with descriptions - * and writes it to the passed writer. - */ - genPowerShellCompletionWithDesc(w: io.Writer): void - } - interface Command { - /** - * MarkFlagRequired instructs the various shell completion implementations to - * prioritize the named flag when performing completion, - * and causes your command to report an error if invoked without the flag. - */ - markFlagRequired(name: string): void - } - interface Command { - /** - * MarkPersistentFlagRequired instructs the various shell completion implementations to - * prioritize the named persistent flag when performing completion, - * and causes your command to report an error if invoked without the flag. - */ - markPersistentFlagRequired(name: string): void - } - interface Command { - /** - * MarkFlagFilename instructs the various shell completion implementations to - * limit completions for the named flag to the specified file extensions. - */ - markFlagFilename(name: string, ...extensions: string[]): void - } - interface Command { - /** - * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. - * The bash completion script will call the bash function f for the flag. - * - * This will only work for bash completion. - * It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows - * to register a Go function which will work across all shells. - */ - markFlagCustom(name: string, f: string): void - } - interface Command { - /** - * MarkPersistentFlagFilename instructs the various shell completion - * implementations to limit completions for the named persistent flag to the - * specified file extensions. - */ - markPersistentFlagFilename(name: string, ...extensions: string[]): void - } - interface Command { - /** - * MarkFlagDirname instructs the various shell completion implementations to - * limit completions for the named flag to directory names. - */ - markFlagDirname(name: string): void - } - interface Command { - /** - * MarkPersistentFlagDirname instructs the various shell completion - * implementations to limit completions for the named persistent flag to - * directory names. - */ - markPersistentFlagDirname(name: string): void - } - interface Command { - /** - * GenZshCompletionFile generates zsh completion file including descriptions. - */ - genZshCompletionFile(filename: string): void - } - interface Command { - /** - * GenZshCompletion generates zsh completion file including descriptions - * and writes it to the passed writer. - */ - genZshCompletion(w: io.Writer): void - } - interface Command { - /** - * GenZshCompletionFileNoDesc generates zsh completion file without descriptions. - */ - genZshCompletionFileNoDesc(filename: string): void - } - interface Command { - /** - * GenZshCompletionNoDesc generates zsh completion file without descriptions - * and writes it to the passed writer. - */ - genZshCompletionNoDesc(w: io.Writer): void - } - interface Command { - /** - * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was - * not consistent with Bash completion. It has therefore been disabled. - * Instead, when no other completion is specified, file completion is done by - * default for every argument. One can disable file completion on a per-argument - * basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. - * To achieve file extension filtering, one can use ValidArgsFunction and - * ShellCompDirectiveFilterFileExt. - * - * Deprecated - */ - markZshCompPositionalArgumentFile(argPosition: number, ...patterns: string[]): void - } - interface Command { - /** - * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore - * been disabled. - * To achieve the same behavior across all shells, one can use - * ValidArgs (for the first argument only) or ValidArgsFunction for - * any argument (can include the first one also). - * - * Deprecated - */ - markZshCompPositionalArgumentWords(argPosition: number, ...words: string[]): void - } -} - namespace auth { /** * AuthUser defines a standardized oauth2 user data structure. @@ -14577,27 +20475,1070 @@ namespace migrate { } /** - * Package io provides basic interfaces to I/O primitives. - * Its primary job is to wrap existing implementations of such primitives, - * such as those in package os, into shared public interfaces that - * abstract the functionality, plus some other related primitives. - * - * Because these interfaces and primitives wrap lower-level operations with - * various implementations, unless otherwise informed clients should not - * assume they are safe for parallel execution. + * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. + * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. */ -namespace io { - /** - * ReadCloser is the interface that groups the basic Read and Close methods. - */ - interface ReadCloser { - [key:string]: any; +namespace cobra { + interface Command { + /** + * GenBashCompletion generates bash completion file and writes to the passed writer. + */ + genBashCompletion(w: io.Writer): void } + interface Command { + /** + * GenBashCompletionFile generates bash completion file. + */ + genBashCompletionFile(filename: string): void + } + interface Command { + /** + * GenBashCompletionFileV2 generates Bash completion version 2. + */ + genBashCompletionFileV2(filename: string, includeDesc: boolean): void + } + interface Command { + /** + * GenBashCompletionV2 generates Bash completion file version 2 + * and writes it to the passed writer. + */ + genBashCompletionV2(w: io.Writer, includeDesc: boolean): void + } + // @ts-ignore + import flag = pflag /** - * WriteCloser is the interface that groups the basic Write and Close methods. + * Command is just that, a command for your application. + * E.g. 'go run ...' - 'run' is the command. Cobra requires + * you to define the usage and description as part of your command + * definition to ensure usability. */ - interface WriteCloser { - [key:string]: any; + interface Command { + /** + * Use is the one-line usage message. + * Recommended syntax is as follows: + * ``` + * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. + * ... indicates that you can specify multiple values for the previous argument. + * | indicates mutually exclusive information. You can use the argument to the left of the separator or the + * argument to the right of the separator. You cannot use both arguments in a single use of the command. + * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are + * optional, they are enclosed in brackets ([ ]). + * ``` + * Example: add [-F file | -D dir]... [-f format] profile + */ + use: string + /** + * Aliases is an array of aliases that can be used instead of the first word in Use. + */ + aliases: Array + /** + * SuggestFor is an array of command names for which this command will be suggested - + * similar to aliases but only suggests. + */ + suggestFor: Array + /** + * Short is the short description shown in the 'help' output. + */ + short: string + /** + * The group id under which this subcommand is grouped in the 'help' output of its parent. + */ + groupID: string + /** + * Long is the long message shown in the 'help ' output. + */ + long: string + /** + * Example is examples of how to use the command. + */ + example: string + /** + * ValidArgs is list of all valid non-flag arguments that are accepted in shell completions + */ + validArgs: Array + /** + * ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. + * It is a dynamic version of using ValidArgs. + * Only one of ValidArgs and ValidArgsFunction can be used for a command. + */ + validArgsFunction: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective] + /** + * Expected arguments + */ + args: PositionalArgs + /** + * ArgAliases is List of aliases for ValidArgs. + * These are not suggested to the user in the shell completion, + * but accepted if entered manually. + */ + argAliases: Array + /** + * BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + * For portability with other shells, it is recommended to instead use ValidArgsFunction + */ + bashCompletionFunction: string + /** + * Deprecated defines, if this command is deprecated and should print this string when used. + */ + deprecated: string + /** + * Annotations are key/value pairs that can be used by applications to identify or + * group commands or set special options. + */ + annotations: _TygojaDict + /** + * Version defines the version for this command. If this value is non-empty and the command does not + * define a "version" flag, a "version" boolean flag will be added to the command and, if specified, + * will print content of the "Version" variable. A shorthand "v" flag will also be added if the + * command does not define one. + */ + version: string + /** + * The *Run functions are executed in the following order: + * ``` + * * PersistentPreRun() + * * PreRun() + * * Run() + * * PostRun() + * * PersistentPostRun() + * ``` + * All functions get the same args, the arguments after the command name. + * The *PreRun and *PostRun functions will only be executed if the Run function of the current + * command has been declared. + * + * PersistentPreRun: children of this command will inherit and execute. + */ + persistentPreRun: (cmd: Command, args: Array) => void + /** + * PersistentPreRunE: PersistentPreRun but returns an error. + */ + persistentPreRunE: (cmd: Command, args: Array) => void + /** + * PreRun: children of this command will not inherit. + */ + preRun: (cmd: Command, args: Array) => void + /** + * PreRunE: PreRun but returns an error. + */ + preRunE: (cmd: Command, args: Array) => void + /** + * Run: Typically the actual work function. Most commands will only implement this. + */ + run: (cmd: Command, args: Array) => void + /** + * RunE: Run but returns an error. + */ + runE: (cmd: Command, args: Array) => void + /** + * PostRun: run after the Run command. + */ + postRun: (cmd: Command, args: Array) => void + /** + * PostRunE: PostRun but returns an error. + */ + postRunE: (cmd: Command, args: Array) => void + /** + * PersistentPostRun: children of this command will inherit and execute after PostRun. + */ + persistentPostRun: (cmd: Command, args: Array) => void + /** + * PersistentPostRunE: PersistentPostRun but returns an error. + */ + persistentPostRunE: (cmd: Command, args: Array) => void + /** + * FParseErrWhitelist flag parse errors to be ignored + */ + fParseErrWhitelist: FParseErrWhitelist + /** + * CompletionOptions is a set of options to control the handling of shell completion + */ + completionOptions: CompletionOptions + /** + * TraverseChildren parses flags on all parents before executing child command. + */ + traverseChildren: boolean + /** + * Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + */ + hidden: boolean + /** + * SilenceErrors is an option to quiet errors down stream. + */ + silenceErrors: boolean + /** + * SilenceUsage is an option to silence usage when an error occurs. + */ + silenceUsage: boolean + /** + * DisableFlagParsing disables the flag parsing. + * If this is true all flags will be passed to the command as arguments. + */ + disableFlagParsing: boolean + /** + * DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + * will be printed by generating docs for this command. + */ + disableAutoGenTag: boolean + /** + * DisableFlagsInUseLine will disable the addition of [flags] to the usage + * line of a command when printing help or generating docs + */ + disableFlagsInUseLine: boolean + /** + * DisableSuggestions disables the suggestions based on Levenshtein distance + * that go along with 'unknown command' messages. + */ + disableSuggestions: boolean + /** + * SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + * Must be > 0. + */ + suggestionsMinimumDistance: number + } + interface Command { + /** + * Context returns underlying command context. If command was executed + * with ExecuteContext or the context was set with SetContext, the + * previously set context will be returned. Otherwise, nil is returned. + * + * Notice that a call to Execute and ExecuteC will replace a nil context of + * a command with a context.Background, so a background context will be + * returned by Context after one of these functions has been called. + */ + context(): context.Context + } + interface Command { + /** + * SetContext sets context for the command. This context will be overwritten by + * Command.ExecuteContext or Command.ExecuteContextC. + */ + setContext(ctx: context.Context): void + } + interface Command { + /** + * SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden + * particularly useful when testing. + */ + setArgs(a: Array): void + } + interface Command { + /** + * SetOutput sets the destination for usage and error messages. + * If output is nil, os.Stderr is used. + * Deprecated: Use SetOut and/or SetErr instead + */ + setOutput(output: io.Writer): void + } + interface Command { + /** + * SetOut sets the destination for usage messages. + * If newOut is nil, os.Stdout is used. + */ + setOut(newOut: io.Writer): void + } + interface Command { + /** + * SetErr sets the destination for error messages. + * If newErr is nil, os.Stderr is used. + */ + setErr(newErr: io.Writer): void + } + interface Command { + /** + * SetIn sets the source for input data + * If newIn is nil, os.Stdin is used. + */ + setIn(newIn: io.Reader): void + } + interface Command { + /** + * SetUsageFunc sets usage function. Usage can be defined by application. + */ + setUsageFunc(f: (_arg0: Command) => void): void + } + interface Command { + /** + * SetUsageTemplate sets usage template. Can be defined by Application. + */ + setUsageTemplate(s: string): void + } + interface Command { + /** + * SetFlagErrorFunc sets a function to generate an error when flag parsing + * fails. + */ + setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void + } + interface Command { + /** + * SetHelpFunc sets help function. Can be defined by Application. + */ + setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void + } + interface Command { + /** + * SetHelpCommand sets help command. + */ + setHelpCommand(cmd: Command): void + } + interface Command { + /** + * SetHelpCommandGroupID sets the group id of the help command. + */ + setHelpCommandGroupID(groupID: string): void + } + interface Command { + /** + * SetCompletionCommandGroupID sets the group id of the completion command. + */ + setCompletionCommandGroupID(groupID: string): void + } + interface Command { + /** + * SetHelpTemplate sets help template to be used. Application can use it to set custom template. + */ + setHelpTemplate(s: string): void + } + interface Command { + /** + * SetVersionTemplate sets version template to be used. Application can use it to set custom template. + */ + setVersionTemplate(s: string): void + } + interface Command { + /** + * SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. + */ + setErrPrefix(s: string): void + } + interface Command { + /** + * SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. + * The user should not have a cyclic dependency on commands. + */ + setGlobalNormalizationFunc(n: (f: any, name: string) => any): void + } + interface Command { + /** + * OutOrStdout returns output to stdout. + */ + outOrStdout(): io.Writer + } + interface Command { + /** + * OutOrStderr returns output to stderr + */ + outOrStderr(): io.Writer + } + interface Command { + /** + * ErrOrStderr returns output to stderr + */ + errOrStderr(): io.Writer + } + interface Command { + /** + * InOrStdin returns input to stdin + */ + inOrStdin(): io.Reader + } + interface Command { + /** + * UsageFunc returns either the function set by SetUsageFunc for this command + * or a parent, or it returns a default usage function. + */ + usageFunc(): (_arg0: Command) => void + } + interface Command { + /** + * Usage puts out the usage for the command. + * Used when a user provides invalid input. + * Can be defined by user by overriding UsageFunc. + */ + usage(): void + } + interface Command { + /** + * HelpFunc returns either the function set by SetHelpFunc for this command + * or a parent, or it returns a function with default help behavior. + */ + helpFunc(): (_arg0: Command, _arg1: Array) => void + } + interface Command { + /** + * Help puts out the help for the command. + * Used when a user calls help [command]. + * Can be defined by user by overriding HelpFunc. + */ + help(): void + } + interface Command { + /** + * UsageString returns usage string. + */ + usageString(): string + } + interface Command { + /** + * FlagErrorFunc returns either the function set by SetFlagErrorFunc for this + * command or a parent, or it returns a function which returns the original + * error. + */ + flagErrorFunc(): (_arg0: Command, _arg1: Error) => void + } + interface Command { + /** + * UsagePadding return padding for the usage. + */ + usagePadding(): number + } + interface Command { + /** + * CommandPathPadding return padding for the command path. + */ + commandPathPadding(): number + } + interface Command { + /** + * NamePadding returns padding for the name. + */ + namePadding(): number + } + interface Command { + /** + * UsageTemplate returns usage template for the command. + */ + usageTemplate(): string + } + interface Command { + /** + * HelpTemplate return help template for the command. + */ + helpTemplate(): string + } + interface Command { + /** + * VersionTemplate return version template for the command. + */ + versionTemplate(): string + } + interface Command { + /** + * ErrPrefix return error message prefix for the command + */ + errPrefix(): string + } + interface Command { + /** + * Find the target command given the args and command tree + * Meant to be run on the highest node. Only searches down. + */ + find(args: Array): [(Command), Array] + } + interface Command { + /** + * Traverse the command tree to find the command, and parse args for + * each parent. + */ + traverse(args: Array): [(Command), Array] + } + interface Command { + /** + * SuggestionsFor provides suggestions for the typedName. + */ + suggestionsFor(typedName: string): Array + } + interface Command { + /** + * VisitParents visits all parents of the command and invokes fn on each parent. + */ + visitParents(fn: (_arg0: Command) => void): void + } + interface Command { + /** + * Root finds root command. + */ + root(): (Command) + } + interface Command { + /** + * ArgsLenAtDash will return the length of c.Flags().Args at the moment + * when a -- was found during args parsing. + */ + argsLenAtDash(): number + } + interface Command { + /** + * ExecuteContext is the same as Execute(), but sets the ctx on the command. + * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs + * functions. + */ + executeContext(ctx: context.Context): void + } + interface Command { + /** + * Execute uses the args (os.Args[1:] by default) + * and run through the command tree finding appropriate matches + * for commands and then corresponding flags. + */ + execute(): void + } + interface Command { + /** + * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. + * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs + * functions. + */ + executeContextC(ctx: context.Context): (Command) + } + interface Command { + /** + * ExecuteC executes the command. + */ + executeC(): (Command) + } + interface Command { + validateArgs(args: Array): void + } + interface Command { + /** + * ValidateRequiredFlags validates all required flags are present and returns an error otherwise + */ + validateRequiredFlags(): void + } + interface Command { + /** + * InitDefaultHelpFlag adds default help flag to c. + * It is called automatically by executing the c or by calling help and usage. + * If c already has help flag, it will do nothing. + */ + initDefaultHelpFlag(): void + } + interface Command { + /** + * InitDefaultVersionFlag adds default version flag to c. + * It is called automatically by executing the c. + * If c already has a version flag, it will do nothing. + * If c.Version is empty, it will do nothing. + */ + initDefaultVersionFlag(): void + } + interface Command { + /** + * InitDefaultHelpCmd adds default help command to c. + * It is called automatically by executing the c or by calling help and usage. + * If c already has help command or c has no subcommands, it will do nothing. + */ + initDefaultHelpCmd(): void + } + interface Command { + /** + * ResetCommands delete parent, subcommand and help command from c. + */ + resetCommands(): void + } + interface Command { + /** + * Commands returns a sorted slice of child commands. + */ + commands(): Array<(Command | undefined)> + } + interface Command { + /** + * AddCommand adds one or more commands to this parent command. + */ + addCommand(...cmds: (Command | undefined)[]): void + } + interface Command { + /** + * Groups returns a slice of child command groups. + */ + groups(): Array<(Group | undefined)> + } + interface Command { + /** + * AllChildCommandsHaveGroup returns if all subcommands are assigned to a group + */ + allChildCommandsHaveGroup(): boolean + } + interface Command { + /** + * ContainsGroup return if groupID exists in the list of command groups. + */ + containsGroup(groupID: string): boolean + } + interface Command { + /** + * AddGroup adds one or more command groups to this parent command. + */ + addGroup(...groups: (Group | undefined)[]): void + } + interface Command { + /** + * RemoveCommand removes one or more commands from a parent command. + */ + removeCommand(...cmds: (Command | undefined)[]): void + } + interface Command { + /** + * Print is a convenience method to Print to the defined output, fallback to Stderr if not set. + */ + print(...i: { + }[]): void + } + interface Command { + /** + * Println is a convenience method to Println to the defined output, fallback to Stderr if not set. + */ + println(...i: { + }[]): void + } + interface Command { + /** + * Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. + */ + printf(format: string, ...i: { + }[]): void + } + interface Command { + /** + * PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. + */ + printErr(...i: { + }[]): void + } + interface Command { + /** + * PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. + */ + printErrln(...i: { + }[]): void + } + interface Command { + /** + * PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. + */ + printErrf(format: string, ...i: { + }[]): void + } + interface Command { + /** + * CommandPath returns the full path to this command. + */ + commandPath(): string + } + interface Command { + /** + * UseLine puts out the full usage for a given command (including parents). + */ + useLine(): string + } + interface Command { + /** + * DebugFlags used to determine which flags have been assigned to which commands + * and which persist. + */ + debugFlags(): void + } + interface Command { + /** + * Name returns the command's name: the first word in the use line. + */ + name(): string + } + interface Command { + /** + * HasAlias determines if a given string is an alias of the command. + */ + hasAlias(s: string): boolean + } + interface Command { + /** + * CalledAs returns the command name or alias that was used to invoke + * this command or an empty string if the command has not been called. + */ + calledAs(): string + } + interface Command { + /** + * NameAndAliases returns a list of the command name and all aliases + */ + nameAndAliases(): string + } + interface Command { + /** + * HasExample determines if the command has example. + */ + hasExample(): boolean + } + interface Command { + /** + * Runnable determines if the command is itself runnable. + */ + runnable(): boolean + } + interface Command { + /** + * HasSubCommands determines if the command has children commands. + */ + hasSubCommands(): boolean + } + interface Command { + /** + * IsAvailableCommand determines if a command is available as a non-help command + * (this includes all non deprecated/hidden commands). + */ + isAvailableCommand(): boolean + } + interface Command { + /** + * IsAdditionalHelpTopicCommand determines if a command is an additional + * help topic command; additional help topic command is determined by the + * fact that it is NOT runnable/hidden/deprecated, and has no sub commands that + * are runnable/hidden/deprecated. + * Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. + */ + isAdditionalHelpTopicCommand(): boolean + } + interface Command { + /** + * HasHelpSubCommands determines if a command has any available 'help' sub commands + * that need to be shown in the usage/help default template under 'additional help + * topics'. + */ + hasHelpSubCommands(): boolean + } + interface Command { + /** + * HasAvailableSubCommands determines if a command has available sub commands that + * need to be shown in the usage/help default template under 'available commands'. + */ + hasAvailableSubCommands(): boolean + } + interface Command { + /** + * HasParent determines if the command is a child command. + */ + hasParent(): boolean + } + interface Command { + /** + * GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. + */ + globalNormalizationFunc(): (f: any, name: string) => any + } + interface Command { + /** + * Flags returns the complete FlagSet that applies + * to this command (local and persistent declared here and by all parents). + */ + flags(): (any) + } + interface Command { + /** + * LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. + * This function does not modify the flags of the current command, it's purpose is to return the current state. + */ + localNonPersistentFlags(): (any) + } + interface Command { + /** + * LocalFlags returns the local FlagSet specifically set in the current command. + * This function does not modify the flags of the current command, it's purpose is to return the current state. + */ + localFlags(): (any) + } + interface Command { + /** + * InheritedFlags returns all flags which were inherited from parent commands. + * This function does not modify the flags of the current command, it's purpose is to return the current state. + */ + inheritedFlags(): (any) + } + interface Command { + /** + * NonInheritedFlags returns all flags which were not inherited from parent commands. + * This function does not modify the flags of the current command, it's purpose is to return the current state. + */ + nonInheritedFlags(): (any) + } + interface Command { + /** + * PersistentFlags returns the persistent FlagSet specifically set in the current command. + */ + persistentFlags(): (any) + } + interface Command { + /** + * ResetFlags deletes all flags from command. + */ + resetFlags(): void + } + interface Command { + /** + * HasFlags checks if the command contains any flags (local plus persistent from the entire structure). + */ + hasFlags(): boolean + } + interface Command { + /** + * HasPersistentFlags checks if the command contains persistent flags. + */ + hasPersistentFlags(): boolean + } + interface Command { + /** + * HasLocalFlags checks if the command has flags specifically declared locally. + */ + hasLocalFlags(): boolean + } + interface Command { + /** + * HasInheritedFlags checks if the command has flags inherited from its parent command. + */ + hasInheritedFlags(): boolean + } + interface Command { + /** + * HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire + * structure) which are not hidden or deprecated. + */ + hasAvailableFlags(): boolean + } + interface Command { + /** + * HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. + */ + hasAvailablePersistentFlags(): boolean + } + interface Command { + /** + * HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden + * or deprecated. + */ + hasAvailableLocalFlags(): boolean + } + interface Command { + /** + * HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are + * not hidden or deprecated. + */ + hasAvailableInheritedFlags(): boolean + } + interface Command { + /** + * Flag climbs up the command tree looking for matching flag. + */ + flag(name: string): (any) + } + interface Command { + /** + * ParseFlags parses persistent flag tree and local flags. + */ + parseFlags(args: Array): void + } + interface Command { + /** + * Parent returns a commands parent command. + */ + parent(): (Command) + } + interface Command { + /** + * RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. + */ + registerFlagCompletionFunc(flagName: string, f: (cmd: Command, args: Array, toComplete: string) => [Array, ShellCompDirective]): void + } + interface Command { + /** + * GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. + */ + getFlagCompletionFunc(flagName: string): [(_arg0: Command, _arg1: Array, _arg2: string) => [Array, ShellCompDirective], boolean] + } + interface Command { + /** + * InitDefaultCompletionCmd adds a default 'completion' command to c. + * This function will do nothing if any of the following is true: + * 1- the feature has been explicitly disabled by the program, + * 2- c has no subcommands (to avoid creating one), + * 3- c already has a 'completion' command provided by the program. + */ + initDefaultCompletionCmd(): void + } + interface Command { + /** + * GenFishCompletion generates fish completion file and writes to the passed writer. + */ + genFishCompletion(w: io.Writer, includeDesc: boolean): void + } + interface Command { + /** + * GenFishCompletionFile generates fish completion file. + */ + genFishCompletionFile(filename: string, includeDesc: boolean): void + } + interface Command { + /** + * MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors + * if the command is invoked with a subset (but not all) of the given flags. + */ + markFlagsRequiredTogether(...flagNames: string[]): void + } + interface Command { + /** + * MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors + * if the command is invoked without at least one flag from the given set of flags. + */ + markFlagsOneRequired(...flagNames: string[]): void + } + interface Command { + /** + * MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors + * if the command is invoked with more than one flag from the given set of flags. + */ + markFlagsMutuallyExclusive(...flagNames: string[]): void + } + interface Command { + /** + * ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the + * first error encountered. + */ + validateFlagGroups(): void + } + interface Command { + /** + * GenPowerShellCompletionFile generates powershell completion file without descriptions. + */ + genPowerShellCompletionFile(filename: string): void + } + interface Command { + /** + * GenPowerShellCompletion generates powershell completion file without descriptions + * and writes it to the passed writer. + */ + genPowerShellCompletion(w: io.Writer): void + } + interface Command { + /** + * GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. + */ + genPowerShellCompletionFileWithDesc(filename: string): void + } + interface Command { + /** + * GenPowerShellCompletionWithDesc generates powershell completion file with descriptions + * and writes it to the passed writer. + */ + genPowerShellCompletionWithDesc(w: io.Writer): void + } + interface Command { + /** + * MarkFlagRequired instructs the various shell completion implementations to + * prioritize the named flag when performing completion, + * and causes your command to report an error if invoked without the flag. + */ + markFlagRequired(name: string): void + } + interface Command { + /** + * MarkPersistentFlagRequired instructs the various shell completion implementations to + * prioritize the named persistent flag when performing completion, + * and causes your command to report an error if invoked without the flag. + */ + markPersistentFlagRequired(name: string): void + } + interface Command { + /** + * MarkFlagFilename instructs the various shell completion implementations to + * limit completions for the named flag to the specified file extensions. + */ + markFlagFilename(name: string, ...extensions: string[]): void + } + interface Command { + /** + * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. + * The bash completion script will call the bash function f for the flag. + * + * This will only work for bash completion. + * It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows + * to register a Go function which will work across all shells. + */ + markFlagCustom(name: string, f: string): void + } + interface Command { + /** + * MarkPersistentFlagFilename instructs the various shell completion + * implementations to limit completions for the named persistent flag to the + * specified file extensions. + */ + markPersistentFlagFilename(name: string, ...extensions: string[]): void + } + interface Command { + /** + * MarkFlagDirname instructs the various shell completion implementations to + * limit completions for the named flag to directory names. + */ + markFlagDirname(name: string): void + } + interface Command { + /** + * MarkPersistentFlagDirname instructs the various shell completion + * implementations to limit completions for the named persistent flag to + * directory names. + */ + markPersistentFlagDirname(name: string): void + } + interface Command { + /** + * GenZshCompletionFile generates zsh completion file including descriptions. + */ + genZshCompletionFile(filename: string): void + } + interface Command { + /** + * GenZshCompletion generates zsh completion file including descriptions + * and writes it to the passed writer. + */ + genZshCompletion(w: io.Writer): void + } + interface Command { + /** + * GenZshCompletionFileNoDesc generates zsh completion file without descriptions. + */ + genZshCompletionFileNoDesc(filename: string): void + } + interface Command { + /** + * GenZshCompletionNoDesc generates zsh completion file without descriptions + * and writes it to the passed writer. + */ + genZshCompletionNoDesc(w: io.Writer): void + } + interface Command { + /** + * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was + * not consistent with Bash completion. It has therefore been disabled. + * Instead, when no other completion is specified, file completion is done by + * default for every argument. One can disable file completion on a per-argument + * basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. + * To achieve file extension filtering, one can use ValidArgsFunction and + * ShellCompDirectiveFilterFileExt. + * + * Deprecated + */ + markZshCompPositionalArgumentFile(argPosition: number, ...patterns: string[]): void + } + interface Command { + /** + * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore + * been disabled. + * To achieve the same behavior across all shells, one can use + * ValidArgs (for the first argument only) or ValidArgsFunction for + * any argument (can include the first one also). + * + * Deprecated + */ + markZshCompPositionalArgumentWords(argPosition: number, ...words: string[]): void } } @@ -14616,7 +21557,7 @@ namespace io { * the manuals for the appropriate operating system. * These calls return err == nil to indicate success; otherwise * err is an operating system error describing the failure. - * On most systems, that error has type syscall.Errno. + * On most systems, that error has type [Errno]. * * NOTE: Most of the functions, types, and constants defined in * this package are also available in the [golang.org/x/sys] package. @@ -14628,6 +21569,10 @@ namespace syscall { /** * SysProcIDMap holds Container ID to Host ID mappings used for User Namespaces in Linux. * See user_namespaces(7). + * + * Note that User Namespaces are not available on a number of popular Linux + * versions (due to security issues), or are available but subject to AppArmor + * restrictions like in Ubuntu 24.04. */ interface SysProcIDMap { containerID: number // Container ID. @@ -14638,7 +21583,7 @@ namespace syscall { import errorspkg = errors /** * Credential holds user and group identities to be assumed - * by a child process started by StartProcess. + * by a child process started by [StartProcess]. */ interface Credential { uid: number // User ID. @@ -14646,9 +21591,11 @@ namespace syscall { groups: Array // Supplementary group IDs. noSetGroups: boolean // If true, don't set supplementary groups } + // @ts-ignore + import runtimesyscall = syscall /** * A Signal is a number describing a process signal. - * It implements the os.Signal interface. + * It implements the [os.Signal] interface. */ interface Signal extends Number{} interface Signal { @@ -14671,7 +21618,7 @@ namespace syscall { * changes for clock synchronization, and a “monotonic clock,” which is * not. The general rule is that the wall clock is for telling time and * the monotonic clock is for measuring time. Rather than split the API, - * in this package the Time returned by time.Now contains both a wall + * in this package the Time returned by [time.Now] contains both a wall * clock reading and a monotonic clock reading; later time-telling * operations use the wall clock reading, but later time-measuring * operations, specifically comparisons and subtractions, use the @@ -14688,7 +21635,7 @@ namespace syscall { * elapsed := t.Sub(start) * ``` * - * Other idioms, such as time.Since(start), time.Until(deadline), and + * Other idioms, such as [time.Since](start), [time.Until](deadline), and * time.Now().Before(deadline), are similarly robust against wall clock * resets. * @@ -14713,23 +21660,26 @@ namespace syscall { * * On some systems the monotonic clock will stop if the computer goes to sleep. * On such a system, t.Sub(u) may not accurately reflect the actual - * time that passed between t and u. + * time that passed between t and u. The same applies to other functions and + * methods that subtract times, such as [Since], [Until], [Before], [After], + * [Add], [Sub], [Equal] and [Compare]. In some cases, you may need to strip + * the monotonic clock to get accurate results. * * Because the monotonic clock reading has no meaning outside * the current process, the serialized forms generated by t.GobEncode, * t.MarshalBinary, t.MarshalJSON, and t.MarshalText omit the monotonic * clock reading, and t.Format provides no format for it. Similarly, the - * constructors time.Date, time.Parse, time.ParseInLocation, and time.Unix, + * constructors [time.Date], [time.Parse], [time.ParseInLocation], and [time.Unix], * as well as the unmarshalers t.GobDecode, t.UnmarshalBinary. * t.UnmarshalJSON, and t.UnmarshalText always create times with * no monotonic clock reading. * - * The monotonic clock reading exists only in Time values. It is not - * a part of Duration values or the Unix times returned by t.Unix and + * The monotonic clock reading exists only in [Time] values. It is not + * a part of [Duration] values or the Unix times returned by t.Unix and * friends. * * Note that the Go == operator compares not just the time instant but - * also the Location and the monotonic clock reading. See the + * also the [Location] and the monotonic clock reading. See the * documentation for the Time type for a discussion of equality * testing for Time values. * @@ -14739,10 +21689,11 @@ namespace syscall { * * # Timer Resolution * - * Timer resolution varies depending on the Go runtime, the operating system + * [Timer] resolution varies depending on the Go runtime, the operating system * and the underlying hardware. - * On Unix, the resolution is approximately 1ms. - * On Windows, the default resolution is approximately 16ms, but + * On Unix, the resolution is ~1ms. + * On Windows version 1803 and newer, the resolution is ~0.5ms. + * On older Windows versions, the default resolution is ~16ms, but * a higher resolution may be requested using [golang.org/x/sys/windows.TimeBeginPeriod]. */ namespace time { @@ -14781,23 +21732,12 @@ namespace time { interface Location { /** * String returns a descriptive name for the time zone information, - * corresponding to the name argument to LoadLocation or FixedZone. + * corresponding to the name argument to [LoadLocation] or [FixedZone]. */ string(): string } } -/** - * Package fs defines basic interfaces to a file system. - * A file system can be provided by the host operating system - * but also by other packages. - * - * See the [testing/fstest] package for support with testing - * implementations of file systems. - */ -namespace fs { -} - /** * Package context defines the Context type, which carries deadlines, * cancellation signals, and other request-scoped values across API boundaries @@ -14854,241 +21794,202 @@ namespace fs { namespace context { } +/** + * Package io provides basic interfaces to I/O primitives. + * Its primary job is to wrap existing implementations of such primitives, + * such as those in package os, into shared public interfaces that + * abstract the functionality, plus some other related primitives. + * + * Because these interfaces and primitives wrap lower-level operations with + * various implementations, unless otherwise informed clients should not + * assume they are safe for parallel execution. + */ +namespace io { + /** + * ReadCloser is the interface that groups the basic Read and Close methods. + */ + interface ReadCloser { + [key:string]: any; + } + /** + * WriteCloser is the interface that groups the basic Write and Close methods. + */ + interface WriteCloser { + [key:string]: any; + } +} + +/** + * Package fs defines basic interfaces to a file system. + * A file system can be provided by the host operating system + * but also by other packages. + * + * See the [testing/fstest] package for support with testing + * implementations of file systems. + */ +namespace fs { +} + /** * Package url parses URLs and implements query escaping. */ namespace url { /** - * A URL represents a parsed URL (technically, a URI reference). - * - * The general form represented is: - * - * ``` - * [scheme:][//[userinfo@]host][/]path[?query][#fragment] - * ``` - * - * URLs that do not start with a slash after the scheme are interpreted as: - * - * ``` - * scheme:opaque[?query][#fragment] - * ``` - * - * The Host field contains the host and port subcomponents of the URL. - * When the port is present, it is separated from the host with a colon. - * When the host is an IPv6 address, it must be enclosed in square brackets: - * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port - * into a string suitable for the Host field, adding square brackets to - * the host when necessary. - * - * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/. - * A consequence is that it is impossible to tell which slashes in the Path were - * slashes in the raw URL and which were %2f. This distinction is rarely important, - * but when it is, the code should use the [URL.EscapedPath] method, which preserves - * the original encoding of Path. - * - * The RawPath field is an optional field which is only set when the default - * encoding of Path is different from the escaped path. See the EscapedPath method - * for more details. - * - * URL's String method uses the EscapedPath method to obtain the path. + * The Userinfo type is an immutable encapsulation of username and + * password details for a [URL]. An existing Userinfo value is guaranteed + * to have a username set (potentially empty, as allowed by RFC 2396), + * and optionally a password. */ - interface URL { - scheme: string - opaque: string // encoded opaque data - user?: Userinfo // username and password information - host: string // host or host:port (see Hostname and Port methods) - path: string // path (relative paths may omit leading slash) - rawPath: string // encoded path hint (see EscapedPath method) - omitHost: boolean // do not emit empty host (authority) - forceQuery: boolean // append a query ('?') even if RawQuery is empty - rawQuery: string // encoded query values, without '?' - fragment: string // fragment for references, without '#' - rawFragment: string // encoded fragment hint (see EscapedFragment method) + interface Userinfo { } - interface URL { + interface Userinfo { /** - * EscapedPath returns the escaped form of u.Path. - * In general there are multiple possible escaped forms of any path. - * EscapedPath returns u.RawPath when it is a valid escaping of u.Path. - * Otherwise EscapedPath ignores u.RawPath and computes an escaped - * form on its own. - * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct - * their results. - * In general, code should call EscapedPath instead of - * reading u.RawPath directly. + * Username returns the username. */ - escapedPath(): string + username(): string } - interface URL { + interface Userinfo { /** - * EscapedFragment returns the escaped form of u.Fragment. - * In general there are multiple possible escaped forms of any fragment. - * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment. - * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped - * form on its own. - * The [URL.String] method uses EscapedFragment to construct its result. - * In general, code should call EscapedFragment instead of - * reading u.RawFragment directly. + * Password returns the password in case it is set, and whether it is set. */ - escapedFragment(): string + password(): [string, boolean] } - interface URL { + interface Userinfo { /** - * String reassembles the [URL] into a valid URL string. - * The general form of the result is one of: - * - * ``` - * scheme:opaque?query#fragment - * scheme://userinfo@host/path?query#fragment - * ``` - * - * If u.Opaque is non-empty, String uses the first form; - * otherwise it uses the second form. - * Any non-ASCII characters in host are escaped. - * To obtain the path, String uses u.EscapedPath(). - * - * In the second form, the following rules apply: - * ``` - * - if u.Scheme is empty, scheme: is omitted. - * - if u.User is nil, userinfo@ is omitted. - * - if u.Host is empty, host/ is omitted. - * - if u.Scheme and u.Host are empty and u.User is nil, - * the entire scheme://userinfo@host/ is omitted. - * - if u.Host is non-empty and u.Path begins with a /, - * the form host/path does not add its own /. - * - if u.RawQuery is empty, ?query is omitted. - * - if u.Fragment is empty, #fragment is omitted. - * ``` + * String returns the encoded userinfo information in the standard form + * of "username[:password]". */ string(): string } - interface URL { - /** - * Redacted is like [URL.String] but replaces any password with "xxxxx". - * Only the password in u.User is redacted. - */ - redacted(): string - } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { /** - * Values maps a string key to a list of values. - * It is typically used for query parameters and form values. - * Unlike in the http.Header map, the keys in a Values map - * are case-sensitive. + * DateTime represents a [time.Time] instance in UTC that is wrapped + * and serialized using the app default date layout. */ - interface Values extends _TygojaDict{} - interface Values { - /** - * Get gets the first value associated with the given key. - * If there are no values associated with the key, Get returns - * the empty string. To access multiple values, use the map - * directly. - */ - get(key: string): string + interface DateTime { } - interface Values { + interface DateTime { /** - * Set sets the key to value. It replaces any existing - * values. + * Time returns the internal [time.Time] instance. */ - set(key: string, value: string): void + time(): time.Time } - interface Values { + interface DateTime { /** - * Add adds the value to key. It appends to any existing - * values associated with key. + * IsZero checks whether the current DateTime instance has zero time value. */ - add(key: string, value: string): void + isZero(): boolean } - interface Values { + interface DateTime { /** - * Del deletes the values associated with key. + * String serializes the current DateTime instance into a formatted + * UTC date string. + * + * The zero value is serialized to an empty string. */ - del(key: string): void + string(): string } - interface Values { + interface DateTime { /** - * Has checks whether a given key is set. + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface DateTime { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface DateTime { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface DateTime { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current DateTime instance. + */ + scan(value: any): void + } +} + +namespace store { + /** + * Store defines a concurrent safe in memory key-value data store. + */ + interface Store { + } + interface Store { + /** + * Reset clears the store and replaces the store data with a + * shallow copy of the provided newData. + */ + reset(newData: _TygojaDict): void + } + interface Store { + /** + * Length returns the current number of elements in the store. + */ + length(): number + } + interface Store { + /** + * RemoveAll removes all the existing store entries. + */ + removeAll(): void + } + interface Store { + /** + * Remove removes a single entry from the store. + * + * Remove does nothing if key doesn't exist in the store. + */ + remove(key: string): void + } + interface Store { + /** + * Has checks if element with the specified key exist or not. */ has(key: string): boolean } - interface Values { + interface Store { /** - * Encode encodes the values into “URL encoded” form - * ("bar=baz&foo=quux") sorted by key. - */ - encode(): string - } - interface URL { - /** - * IsAbs reports whether the [URL] is absolute. - * Absolute means that it has a non-empty scheme. - */ - isAbs(): boolean - } - interface URL { - /** - * Parse parses a [URL] in the context of the receiver. The provided URL - * may be relative or absolute. Parse returns nil, err on parse - * failure, otherwise its return value is the same as [URL.ResolveReference]. - */ - parse(ref: string): (URL) - } - interface URL { - /** - * ResolveReference resolves a URI reference to an absolute URI from - * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference - * may be relative or absolute. ResolveReference always returns a new - * [URL] instance, even if the returned URL is identical to either the - * base or reference. If ref is an absolute URL, then ResolveReference - * ignores base and returns a copy of ref. - */ - resolveReference(ref: URL): (URL) - } - interface URL { - /** - * Query parses RawQuery and returns the corresponding values. - * It silently discards malformed value pairs. - * To check errors use [ParseQuery]. - */ - query(): Values - } - interface URL { - /** - * RequestURI returns the encoded path?query or opaque?query - * string that would be used in an HTTP request for u. - */ - requestURI(): string - } - interface URL { - /** - * Hostname returns u.Host, stripping any valid port number if present. + * Get returns a single element value from the store. * - * If the result is enclosed in square brackets, as literal IPv6 addresses are, - * the square brackets are removed from the result. + * If key is not set, the zero T value is returned. */ - hostname(): string + get(key: string): T } - interface URL { + interface Store { /** - * Port returns the port part of u.Host, without the leading colon. + * GetAll returns a shallow copy of the current store data. + */ + getAll(): _TygojaDict + } + interface Store { + /** + * Set sets (or overwrite if already exist) a new value for key. + */ + set(key: string, value: T): void + } + interface Store { + /** + * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. * - * If u.Host doesn't contain a valid numeric port, Port returns an empty string. + * This method is similar to Set() but **it will skip adding new elements** + * to the store if the store length has reached the specified limit. + * false is returned if maxAllowedElements limit is reached. */ - port(): string - } - interface URL { - marshalBinary(): string|Array - } - interface URL { - unmarshalBinary(text: string|Array): void - } - interface URL { - /** - * JoinPath returns a new [URL] with the provided path elements joined to - * any existing path and the resulting path cleaned of any ./ or ../ elements. - * Any sequences of multiple / characters will be reduced to a single /. - */ - joinPath(...elem: string[]): (URL) + setIfLessThanLimit(key: string, value: T, maxAllowedElements: number): boolean } } @@ -15140,16 +22041,19 @@ namespace url { * listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C * library routines such as getaddrinfo and getnameinfo. * - * By default the pure Go resolver is used, because a blocked DNS request consumes - * only a goroutine, while a blocked C call consumes an operating system thread. + * On Unix the pure Go resolver is preferred over the cgo resolver, because a blocked DNS + * request consumes only a goroutine, while a blocked C call consumes an operating system thread. * When cgo is available, the cgo-based resolver is used instead under a variety of * conditions: on systems that do not let programs make direct DNS requests (OS X), * when the LOCALDOMAIN environment variable is present (even if empty), * when the RES_OPTIONS or HOSTALIASES environment variable is non-empty, * when the ASR_CONFIG environment variable is non-empty (OpenBSD only), * when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the - * Go resolver does not implement, and when the name being looked up ends in .local - * or is an mDNS name. + * Go resolver does not implement. + * + * On all systems (except Plan 9), when the cgo resolver is being used + * this package applies a concurrent cgo lookup limit to prevent the system + * from running out of system threads. Currently, it is limited to 500 concurrent lookups. * * The resolver decision can be overridden by setting the netdns value of the * GODEBUG environment variable (see package runtime) to go or cgo, as in: @@ -15167,6 +22071,12 @@ namespace url { * To force a particular resolver while also printing debugging information, * join the two settings by a plus sign, as in GODEBUG=netdns=go+1. * + * The Go resolver will send an EDNS0 additional header with a DNS request, + * to signal a willingness to accept a larger DNS packet size. + * This can reportedly cause sporadic failures with the DNS server run + * by some modems and routers. Setting GODEBUG=netedns0=0 will disable + * sending the additional header. + * * On macOS, if Go code that uses the net package is built with * -buildmode=c-archive, linking the resulting archive into a C program * requires passing -lresolv when linking the C code. @@ -15271,219 +22181,98 @@ namespace net { } } -/** - * Package sql provides a generic interface around SQL (or SQL-like) - * databases. - * - * The sql package must be used in conjunction with a database driver. - * See https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until - * after the query is completed. - * - * For usage examples, see the wiki page at - * https://golang.org/s/sqlwiki. - */ -namespace sql { +namespace hook { /** - * IsolationLevel is the transaction isolation level used in [TxOptions]. + * Hook defines a concurrent safe structure for handling event hooks + * (aka. callbacks propagation). */ - interface IsolationLevel extends Number{} - interface IsolationLevel { - /** - * String returns the name of the transaction isolation level. - */ - string(): string + interface Hook { } - /** - * DBStats contains database statistics. - */ - interface DBStats { - maxOpenConnections: number // Maximum number of open connections to the database. + interface Hook { /** - * Pool Status - */ - openConnections: number // The number of established connections both in use and idle. - inUse: number // The number of connections currently in use. - idle: number // The number of idle connections. - /** - * Counters - */ - waitCount: number // The total number of connections waited for. - waitDuration: time.Duration // The total time blocked waiting for a new connection. - maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. - maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. - maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. - } - /** - * Conn represents a single database connection rather than a pool of database - * connections. Prefer running queries from [DB] unless there is a specific - * need for a continuous single database connection. - * - * A Conn must call [Conn.Close] to return the connection to the database pool - * and may do so concurrently with a running query. - * - * After a call to [Conn.Close], all operations on the - * connection fail with [ErrConnDone]. - */ - interface Conn { - } - interface Conn { - /** - * PingContext verifies the connection to the database is still alive. - */ - pingContext(ctx: context.Context): void - } - interface Conn { - /** - * ExecContext executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Conn { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface Conn { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * the [*Row.Scan] method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface Conn { - /** - * PrepareContext creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. + * PreAdd registers a new handler to the hook by prepending it to the existing queue. * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. + * Returns an autogenerated hook id that could be used later to remove the hook with Hook.Remove(id). */ - prepareContext(ctx: context.Context, query: string): (Stmt) + preAdd(fn: Handler): string } - interface Conn { + interface Hook { /** - * Raw executes f exposing the underlying driver connection for the - * duration of f. The driverConn must not be used outside of f. + * Add registers a new handler to the hook by appending it to the existing queue. * - * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable - * until [Conn.Close] is called. + * Returns an autogenerated hook id that could be used later to remove the hook with Hook.Remove(id). */ - raw(f: (driverConn: any) => void): void + add(fn: Handler): string } - interface Conn { + interface Hook { /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled back. - * If the context is canceled, the sql package will roll back - * the transaction. [Tx.Commit] will return an error if the context provided to - * BeginTx is canceled. - * - * The provided [TxOptions] is optional and may be nil if defaults should be used. - * If a non-default isolation level is used that the driver doesn't support, - * an error will be returned. + * Remove removes a single hook handler by its id. */ - beginTx(ctx: context.Context, opts: TxOptions): (Tx) + remove(id: string): void } - interface Conn { + interface Hook { /** - * Close returns the connection to the connection pool. - * All operations after a Close will return with [ErrConnDone]. - * Close is safe to call concurrently with other operations and will - * block until all other operations finish. It may be useful to first - * cancel any used context and then call close directly after. + * RemoveAll removes all registered handlers. */ - close(): void + removeAll(): void + } + interface Hook { + /** + * Trigger executes all registered hook handlers one by one + * with the specified `data` as an argument. + * + * Optionally, this method allows also to register additional one off + * handlers that will be temporary appended to the handlers queue. + * + * The execution stops when: + * - hook.StopPropagation is returned in one of the handlers + * - any non-nil error is returned in one of the handlers + */ + trigger(data: T, ...oneOffHandlers: Handler[]): void } /** - * ColumnType contains the name and type of a column. + * TaggedHook defines a proxy hook which register handlers that are triggered only + * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). */ - interface ColumnType { + type _subbFzXR = mainHook + interface TaggedHook extends _subbFzXR { } - interface ColumnType { + interface TaggedHook { /** - * Name returns the name or alias of the column. + * CanTriggerOn checks if the current TaggedHook can be triggered with + * the provided event data tags. */ - name(): string + canTriggerOn(tags: Array): boolean } - interface ColumnType { + interface TaggedHook { /** - * Length returns the column type length for variable length column types such - * as text and binary field types. If the type length is unbounded the value will - * be [math.MaxInt64] (any database limits will still apply). - * If the column type is not variable length, such as an int, or if not supported - * by the driver ok is false. + * PreAdd registers a new handler to the hook by prepending it to the existing queue. + * + * The fn handler will be called only if the event data tags satisfy h.CanTriggerOn. */ - length(): [number, boolean] + preAdd(fn: Handler): string } - interface ColumnType { + interface TaggedHook { /** - * DecimalSize returns the scale and precision of a decimal type. - * If not applicable or if not supported ok is false. + * Add registers a new handler to the hook by appending it to the existing queue. + * + * The fn handler will be called only if the event data tags satisfy h.CanTriggerOn. */ - decimalSize(): [number, boolean] - } - interface ColumnType { - /** - * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. - * If a driver does not support this property ScanType will return - * the type of an empty interface. - */ - scanType(): any - } - interface ColumnType { - /** - * Nullable reports whether the column may be null. - * If a driver does not support this property ok will be false. - */ - nullable(): boolean - } - interface ColumnType { - /** - * DatabaseTypeName returns the database system name of the column type. If an empty - * string is returned, then the driver type name is not supported. - * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers - * are not included. - * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", - * "INT", and "BIGINT". - */ - databaseTypeName(): string + add(fn: Handler): string } +} + +namespace logging { /** - * Row is the result of calling [DB.QueryRow] to select a single row. + * Logger is an interface for logging entries at certain classifications. */ - interface Row { - } - interface Row { + interface Logger { + [key:string]: any; /** - * Scan copies the columns from the matched row into the values - * pointed at by dest. See the documentation on [Rows.Scan] for details. - * If more than one row matches the query, - * Scan uses the first row and discards the rest. If no row matches - * the query, Scan returns [ErrNoRows]. + * Logf is expected to support the standard fmt package "verbs". */ - scan(...dest: any[]): void - } - interface Row { - /** - * Err provides a way for wrapping packages to check for - * query errors without calling [Row.Scan]. - * Err returns the error, if any, that was encountered while running the query. - * If this error is not nil, this error will also be returned from [Row.Scan]. - */ - err(): void + logf(classification: Classification, format: string, ...v: { + }[]): void } } @@ -15570,8 +22359,8 @@ namespace textproto { * To protect against malicious inputs, this package sets limits on the size * of the MIME data it processes. * - * Reader.NextPart and Reader.NextRawPart limit the number of headers in a - * part to 10000 and Reader.ReadForm limits the total number of headers in all + * [Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a + * part to 10000 and [Reader.ReadForm] limits the total number of headers in all * FileHeaders to 10000. * These limits may be adjusted with the GODEBUG=multipartmaxheaders= * setting. @@ -15580,11 +22369,6 @@ namespace textproto { * This limit may be adjusted with the GODEBUG=multipartmaxparts= * setting. */ -/** - * Copyright 2023 The Go Authors. All rights reserved. - * Use of this source code is governed by a BSD-style - * license that can be found in the LICENSE file. - */ namespace multipart { interface Reader { /** @@ -15593,7 +22377,7 @@ namespace multipart { * It stores up to maxMemory bytes + 10MB (reserved for non-file parts) * in memory. File parts which can't be stored in memory will be stored on * disk in temporary files. - * It returns ErrMessageTooLarge if all non-file parts can't be stored in + * It returns [ErrMessageTooLarge] if all non-file parts can't be stored in * memory. */ readForm(maxMemory: number): (Form) @@ -15601,7 +22385,7 @@ namespace multipart { /** * Form is a parsed multipart form. * Its File parts are stored either in memory or on disk, - * and are accessible via the *FileHeader's Open method. + * and are accessible via the [*FileHeader]'s Open method. * Its Value parts are stored as strings. * Both are keyed by field name. */ @@ -15611,7 +22395,7 @@ namespace multipart { } interface Form { /** - * RemoveAll removes any temporary files associated with a Form. + * RemoveAll removes any temporary files associated with a [Form]. */ removeAll(): void } @@ -15633,7 +22417,7 @@ namespace multipart { interface Reader { /** * NextPart returns the next part in the multipart or an error. - * When there are no more parts, the error io.EOF is returned. + * When there are no more parts, the error [io.EOF] is returned. * * As a special case, if the "Content-Transfer-Encoding" header * has a value of "quoted-printable", that header is instead @@ -15644,9 +22428,9 @@ namespace multipart { interface Reader { /** * NextRawPart returns the next part in the multipart or an error. - * When there are no more parts, the error io.EOF is returned. + * When there are no more parts, the error [io.EOF] is returned. * - * Unlike NextPart, it does not have special handling for + * Unlike [Reader.NextPart], it does not have special handling for * "Content-Transfer-Encoding: quoted-printable". */ nextRawPart(): (Part) @@ -15782,6 +22566,7 @@ namespace http { interface Cookie { name: string value: string + quoted: boolean // indicates whether the Value was originally quoted path: string // optional domain: string // optional expires: time.Time // optional @@ -15795,6 +22580,7 @@ namespace http { secure: boolean httpOnly: boolean sameSite: SameSite + partitioned: boolean raw: string unparsed: Array // Raw text of unparsed attribute-value pairs } @@ -16081,134 +22867,11048 @@ namespace http { } } -namespace store { +/** + * Package gcerr provides an error type for Go CDK APIs. + */ +namespace gcerr { + interface ErrorCode { + string(): string + } /** - * Store defines a concurrent safe in memory key-value data store. + * An ErrorCode describes the error's category. */ - interface Store { + interface ErrorCode extends Number{} +} + +/** + * Package blob provides an easy and portable way to interact with blobs + * within a storage location. Subpackages contain driver implementations of + * blob for supported services. + * + * See https://gocloud.dev/howto/blob/ for a detailed how-to guide. + * + * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with + * functions in that package. + * + * # Errors + * + * The errors returned from this package can be inspected in several ways: + * + * The Code function from gocloud.dev/gcerrors will return an error code, also + * defined in that package, when invoked on an error. + * + * The Bucket.ErrorAs method can retrieve the driver error underlying the returned + * error. + * + * # OpenCensus Integration + * + * OpenCensus supports tracing and metric collection for multiple languages and + * backend providers. See https://opencensus.io. + * + * This API collects OpenCensus traces and metrics for the following methods: + * ``` + * - Attributes + * - Copy + * - Delete + * - ListPage + * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll + * are included because they call NewRangeReader.) + * - NewWriter, from creation until the call to Close. + * ``` + * + * All trace and metric names begin with the package import path. + * The traces add the method name. + * For example, "gocloud.dev/blob/Attributes". + * The metrics are "completed_calls", a count of completed method calls by driver, + * method and status (error code); and "latency", a distribution of method latency + * by driver and method. + * For example, "gocloud.dev/blob/latency". + * + * It also collects the following metrics: + * ``` + * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver. + * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver. + * ``` + * + * To enable trace collection in your application, see "Configure Exporter" at + * https://opencensus.io/quickstart/go/tracing. + * To enable metric collection in your application, see "Exporting stats" at + * https://opencensus.io/quickstart/go/metrics. + */ +namespace blob { + /** + * Writer writes bytes to a blob. + * + * It implements io.WriteCloser (https://golang.org/pkg/io/#Closer), and must be + * closed after all writes are done. + */ + interface Writer { } - interface Store { + interface Writer { /** - * Reset clears the store and replaces the store data with a - * shallow copy of the provided newData. - */ - reset(newData: _TygojaDict): void - } - interface Store { - /** - * Length returns the current number of elements in the store. - */ - length(): number - } - interface Store { - /** - * RemoveAll removes all the existing store entries. - */ - removeAll(): void - } - interface Store { - /** - * Remove removes a single entry from the store. + * Write implements the io.Writer interface (https://golang.org/pkg/io/#Writer). * - * Remove does nothing if key doesn't exist in the store. + * Writes may happen asynchronously, so the returned error can be nil + * even if the actual write eventually fails. The write is only guaranteed to + * have succeeded if Close returns no error. */ - remove(key: string): void + write(p: string|Array): number } - interface Store { + interface Writer { /** - * Has checks if element with the specified key exist or not. + * Close closes the blob writer. The write operation is not guaranteed to have succeeded until + * Close returns with no error. + * Close may return an error if the context provided to create the Writer is + * canceled or reaches its deadline. */ - has(key: string): boolean + close(): void } - interface Store { + interface Writer { /** - * Get returns a single element value from the store. + * ReadFrom reads from r and writes to w until EOF or error. + * The return value is the number of bytes read from r. * - * If key is not set, the zero T value is returned. + * It implements the io.ReaderFrom interface. */ - get(key: string): T + readFrom(r: io.Reader): number } - interface Store { + /** + * ListOptions sets options for listing blobs via Bucket.List. + */ + interface ListOptions { /** - * GetAll returns a shallow copy of the current store data. + * Prefix indicates that only blobs with a key starting with this prefix + * should be returned. */ - getAll(): _TygojaDict - } - interface Store { + prefix: string /** - * Set sets (or overwrite if already exist) a new value for key. - */ - set(key: string, value: T): void - } - interface Store { - /** - * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. + * Delimiter sets the delimiter used to define a hierarchical namespace, + * like a filesystem with "directories". It is highly recommended that you + * use "" or "/" as the Delimiter. Other values should work through this API, + * but service UIs generally assume "/". * - * This method is similar to Set() but **it will skip adding new elements** - * to the store if the store length has reached the specified limit. - * false is returned if maxAllowedElements limit is reached. + * An empty delimiter means that the bucket is treated as a single flat + * namespace. + * + * A non-empty delimiter means that any result with the delimiter in its key + * after Prefix is stripped will be returned with ListObject.IsDir = true, + * ListObject.Key truncated after the delimiter, and zero values for other + * ListObject fields. These results represent "directories". Multiple results + * in a "directory" are returned as a single result. */ - setIfLessThanLimit(key: string, value: T, maxAllowedElements: number): boolean + delimiter: string + /** + * BeforeList is a callback that will be called before each call to the + * the underlying service's list functionality. + * asFunc converts its argument to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information. + */ + beforeList: (asFunc: (_arg0: { + }) => boolean) => void + } + /** + * ListIterator iterates over List results. + */ + interface ListIterator { + } + interface ListIterator { + /** + * Next returns a *ListObject for the next blob. It returns (nil, io.EOF) if + * there are no more. + */ + next(ctx: context.Context): (ListObject) + } + /** + * SignedURLOptions sets options for SignedURL. + */ + interface SignedURLOptions { + /** + * Expiry sets how long the returned URL is valid for. + * Defaults to DefaultSignedURLExpiry. + */ + expiry: time.Duration + /** + * Method is the HTTP method that can be used on the URL; one of "GET", "PUT", + * or "DELETE". Defaults to "GET". + */ + method: string + /** + * ContentType specifies the Content-Type HTTP header the user agent is + * permitted to use in the PUT request. It must match exactly. See + * EnforceAbsentContentType for behavior when ContentType is the empty string. + * If a bucket does not implement this verification, then it returns an + * Unimplemented error. + * + * Must be empty for non-PUT requests. + */ + contentType: string + /** + * If EnforceAbsentContentType is true and ContentType is the empty string, + * then PUTing to the signed URL will fail if the Content-Type header is + * present. Not all buckets support this: ones that do not will return an + * Unimplemented error. + * + * If EnforceAbsentContentType is false and ContentType is the empty string, + * then PUTing without a Content-Type header will succeed, but it is + * implementation-specific whether providing a Content-Type header will fail. + * + * Must be false for non-PUT requests. + */ + enforceAbsentContentType: boolean + /** + * BeforeSign is a callback that will be called before each call to the + * the underlying service's sign functionality. + * asFunc converts its argument to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information. + */ + beforeSign: (asFunc: (_arg0: { + }) => boolean) => void + } + /** + * ReaderOptions sets options for NewReader and NewRangeReader. + */ + interface ReaderOptions { + /** + * BeforeRead is a callback that will be called before + * any data is read (unless NewReader returns an error before then, in which + * case it may not be called at all). + * + * Calling Seek may reset the underlying reader, and result in BeforeRead + * getting called again with a different underlying provider-specific reader.. + * + * asFunc converts its argument to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information. + */ + beforeRead: (asFunc: (_arg0: { + }) => boolean) => void + } + /** + * WriterOptions sets options for NewWriter. + */ + interface WriterOptions { + /** + * BufferSize changes the default size in bytes of the chunks that + * Writer will upload in a single request; larger blobs will be split into + * multiple requests. + * + * This option may be ignored by some drivers. + * + * If 0, the driver will choose a reasonable default. + * + * If the Writer is used to do many small writes concurrently, using a + * smaller BufferSize may reduce memory usage. + */ + bufferSize: number + /** + * MaxConcurrency changes the default concurrency for parts of an upload. + * + * This option may be ignored by some drivers. + * + * If 0, the driver will choose a reasonable default. + */ + maxConcurrency: number + /** + * CacheControl specifies caching attributes that services may use + * when serving the blob. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + */ + cacheControl: string + /** + * ContentDisposition specifies whether the blob content is expected to be + * displayed inline or as an attachment. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition + */ + contentDisposition: string + /** + * ContentEncoding specifies the encoding used for the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + */ + contentEncoding: string + /** + * ContentLanguage specifies the language used in the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language + */ + contentLanguage: string + /** + * ContentType specifies the MIME type of the blob being written. If not set, + * it will be inferred from the content using the algorithm described at + * http://mimesniff.spec.whatwg.org/. + * Set DisableContentTypeDetection to true to disable the above and force + * the ContentType to stay empty. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type + */ + contentType: string + /** + * When true, if ContentType is the empty string, it will stay the empty + * string rather than being inferred from the content. + * Note that while the blob will be written with an empty string ContentType, + * most providers will fill one in during reads, so don't expect an empty + * ContentType if you read the blob back. + */ + disableContentTypeDetection: boolean + /** + * ContentMD5 is used as a message integrity check. + * If len(ContentMD5) > 0, the MD5 hash of the bytes written must match + * ContentMD5, or Close will return an error without completing the write. + * https://tools.ietf.org/html/rfc1864 + */ + contentMD5: string|Array + /** + * Metadata holds key/value strings to be associated with the blob, or nil. + * Keys may not be empty, and are lowercased before being written. + * Duplicate case-insensitive keys (e.g., "foo" and "FOO") will result in + * an error. + */ + metadata: _TygojaDict + /** + * BeforeWrite is a callback that will be called exactly once, before + * any data is written (unless NewWriter returns an error, in which case + * it will not be called at all). Note that this is not necessarily during + * or after the first Write call, as drivers may buffer bytes before + * sending an upload request. + * + * asFunc converts its argument to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information. + */ + beforeWrite: (asFunc: (_arg0: { + }) => boolean) => void + } + /** + * CopyOptions sets options for Copy. + */ + interface CopyOptions { + /** + * BeforeCopy is a callback that will be called before the copy is + * initiated. + * + * asFunc converts its argument to driver-specific types. + * See https://gocloud.dev/concepts/as/ for background information. + */ + beforeCopy: (asFunc: (_arg0: { + }) => boolean) => void } } /** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. + * Package sql provides a generic interface around SQL (or SQL-like) + * databases. + * + * The sql package must be used in conjunction with a database driver. + * See https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until + * after the query is completed. + * + * For usage examples, see the wiki page at + * https://golang.org/s/sqlwiki. */ -namespace types { +namespace sql { /** - * DateTime represents a [time.Time] instance in UTC that is wrapped - * and serialized using the app default date layout. + * IsolationLevel is the transaction isolation level used in [TxOptions]. */ - interface DateTime { - } - interface DateTime { + interface IsolationLevel extends Number{} + interface IsolationLevel { /** - * Time returns the internal [time.Time] instance. - */ - time(): time.Time - } - interface DateTime { - /** - * IsZero checks whether the current DateTime instance has zero time value. - */ - isZero(): boolean - } - interface DateTime { - /** - * String serializes the current DateTime instance into a formatted - * UTC date string. - * - * The zero value is serialized to an empty string. + * String returns the name of the transaction isolation level. */ string(): string } - interface DateTime { + /** + * DBStats contains database statistics. + */ + interface DBStats { + maxOpenConnections: number // Maximum number of open connections to the database. /** - * MarshalJSON implements the [json.Marshaler] interface. + * Pool Status */ - marshalJSON(): string|Array - } - interface DateTime { + openConnections: number // The number of established connections both in use and idle. + inUse: number // The number of connections currently in use. + idle: number // The number of idle connections. /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. + * Counters */ - unmarshalJSON(b: string|Array): void + waitCount: number // The total number of connections waited for. + waitDuration: time.Duration // The total time blocked waiting for a new connection. + maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. + maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. + maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. } - interface DateTime { + /** + * Conn represents a single database connection rather than a pool of database + * connections. Prefer running queries from [DB] unless there is a specific + * need for a continuous single database connection. + * + * A Conn must call [Conn.Close] to return the connection to the database pool + * and may do so concurrently with a running query. + * + * After a call to [Conn.Close], all operations on the + * connection fail with [ErrConnDone]. + */ + interface Conn { + } + interface Conn { /** - * Value implements the [driver.Valuer] interface. + * PingContext verifies the connection to the database is still alive. */ - value(): any + pingContext(ctx: context.Context): void } - interface DateTime { + interface Conn { /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current DateTime instance. + * ExecContext executes a query without returning any rows. + * The args are for any placeholder parameters in the query. */ - scan(value: any): void + execContext(ctx: context.Context, query: string, ...args: any[]): Result } + interface Conn { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface Conn { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * the [*Row.Scan] method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface Conn { + /** + * PrepareContext creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface Conn { + /** + * Raw executes f exposing the underlying driver connection for the + * duration of f. The driverConn must not be used outside of f. + * + * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable + * until [Conn.Close] is called. + */ + raw(f: (driverConn: any) => void): void + } + interface Conn { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled back. + * If the context is canceled, the sql package will roll back + * the transaction. [Tx.Commit] will return an error if the context provided to + * BeginTx is canceled. + * + * The provided [TxOptions] is optional and may be nil if defaults should be used. + * If a non-default isolation level is used that the driver doesn't support, + * an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): (Tx) + } + interface Conn { + /** + * Close returns the connection to the connection pool. + * All operations after a Close will return with [ErrConnDone]. + * Close is safe to call concurrently with other operations and will + * block until all other operations finish. It may be useful to first + * cancel any used context and then call close directly after. + */ + close(): void + } + /** + * ColumnType contains the name and type of a column. + */ + interface ColumnType { + } + interface ColumnType { + /** + * Name returns the name or alias of the column. + */ + name(): string + } + interface ColumnType { + /** + * Length returns the column type length for variable length column types such + * as text and binary field types. If the type length is unbounded the value will + * be [math.MaxInt64] (any database limits will still apply). + * If the column type is not variable length, such as an int, or if not supported + * by the driver ok is false. + */ + length(): [number, boolean] + } + interface ColumnType { + /** + * DecimalSize returns the scale and precision of a decimal type. + * If not applicable or if not supported ok is false. + */ + decimalSize(): [number, number, boolean] + } + interface ColumnType { + /** + * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. + * If a driver does not support this property ScanType will return + * the type of an empty interface. + */ + scanType(): any + } + interface ColumnType { + /** + * Nullable reports whether the column may be null. + * If a driver does not support this property ok will be false. + */ + nullable(): [boolean, boolean] + } + interface ColumnType { + /** + * DatabaseTypeName returns the database system name of the column type. If an empty + * string is returned, then the driver type name is not supported. + * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers + * are not included. + * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", + * "INT", and "BIGINT". + */ + databaseTypeName(): string + } + /** + * Row is the result of calling [DB.QueryRow] to select a single row. + */ + interface Row { + } + interface Row { + /** + * Scan copies the columns from the matched row into the values + * pointed at by dest. See the documentation on [Rows.Scan] for details. + * If more than one row matches the query, + * Scan uses the first row and discards the rest. If no row matches + * the query, Scan returns [ErrNoRows]. + */ + scan(...dest: any[]): void + } + interface Row { + /** + * Err provides a way for wrapping packages to check for + * query errors without calling [Row.Scan]. + * Err returns the error, if any, that was encountered while running the query. + * If this error is not nil, this error will also be returned from [Row.Scan]. + */ + err(): void + } +} + +namespace migrate { + interface Migration { + file: string + up: (db: dbx.Builder) => void + down: (db: dbx.Builder) => void + } +} + +/** + * Package middleware provides transport agnostic middleware for decorating SDK + * handlers. + * + * The Smithy middleware stack provides ordered behavior to be invoked on an + * underlying handler. The stack is separated into steps that are invoked in a + * static order. A step is a collection of middleware that are injected into a + * ordered list defined by the user. The user may add, insert, swap, and remove a + * step's middleware. When the stack is invoked the step middleware become static, + * and their order cannot be modified. + * + * A stack and its step middleware are **not** safe to modify concurrently. + * + * A stack will use the ordered list of middleware to decorate a underlying + * handler. A handler could be something like an HTTP Client that round trips an + * API operation over HTTP. + * + * Smithy Middleware Stack + * + * A Stack is a collection of middleware that wrap a handler. The stack can be + * broken down into discreet steps. Each step may contain zero or more middleware + * specific to that stack's step. + * + * A Stack Step is a predefined set of middleware that are invoked in a static + * order by the Stack. These steps represent fixed points in the middleware stack + * for organizing specific behavior, such as serialize and build. A Stack Step is + * composed of zero or more middleware that are specific to that step. A step may + * define its own set of input/output parameters the generic input/output + * parameters are cast from. A step calls its middleware recursively, before + * calling the next step in the stack returning the result or error of the step + * middleware decorating the underlying handler. + * + * * Initialize: Prepares the input, and sets any default parameters as needed, + * (e.g. idempotency token, and presigned URLs). + * + * * Serialize: Serializes the prepared input into a data structure that can be + * consumed by the target transport's message, (e.g. REST-JSON serialization). + * + * * Build: Adds additional metadata to the serialized transport message, (e.g. + * HTTP's Content-Length header, or body checksum). Decorations and + * modifications to the message should be copied to all message attempts. + * + * * Finalize: Performs final preparations needed before sending the message. The + * message should already be complete by this stage, and is only alternated to + * meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request + * signing). + * + * * Deserialize: Reacts to the handler's response returned by the recipient of + * the request message. Deserializes the response into a structured type or + * error above stacks can react to. + * + * Adding Middleware to a Stack Step + * + * Middleware can be added to a step front or back, or relative, by name, to an + * existing middleware in that stack. If a middleware does not have a name, a + * unique name will be generated at the middleware and be added to the step. + * + * ``` + * // Create middleware stack + * stack := middleware.NewStack() + * + * // Add middleware to stack steps + * stack.Initialize.Add(paramValidationMiddleware, middleware.After) + * stack.Serialize.Add(marshalOperationFoo, middleware.After) + * stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) + * + * // Invoke middleware on handler. + * resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) + * ``` + */ +namespace middleware { + /** + * Stack provides protocol and transport agnostic set of middleware split into + * distinct steps. Steps have specific transitions between them, that are + * managed by the individual step. + * + * Steps are composed as middleware around the underlying handler in the + * following order: + * + * ``` + * Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler + * ``` + * + * Any middleware within the chain may choose to stop and return an error or + * response. Since the middleware decorate the handler like a call stack, each + * middleware will receive the result of the next middleware in the chain. + * Middleware that does not need to react to an input, or result must forward + * along the input down the chain, or return the result back up the chain. + * + * ``` + * Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler + * ``` + */ + interface Stack { + /** + * Initialize prepares the input, and sets any default parameters as + * needed, (e.g. idempotency token, and presigned URLs). + * + * Takes Input Parameters, and returns result or error. + * + * Receives result or error from Serialize step. + */ + initialize?: InitializeStep + /** + * Serialize serializes the prepared input into a data structure that can be consumed + * by the target transport's message, (e.g. REST-JSON serialization) + * + * Converts Input Parameters into a Request, and returns the result or error. + * + * Receives result or error from Build step. + */ + serialize?: SerializeStep + /** + * Build adds additional metadata to the serialized transport message + * (e.g. HTTP's Content-Length header, or body checksum). Decorations and + * modifications to the message should be copied to all message attempts. + * + * Takes Request, and returns result or error. + * + * Receives result or error from Finalize step. + */ + build?: BuildStep + /** + * Finalize performs final preparations needed before sending the message. The + * message should already be complete by this stage, and is only alternated + * to meet the expectations of the recipient (e.g. Retry and AWS SigV4 + * request signing) + * + * Takes Request, and returns result or error. + * + * Receives result or error from Deserialize step. + */ + finalize?: FinalizeStep + /** + * Deserialize reacts to the handler's response returned by the recipient of the request + * message. Deserializes the response into a structured type or error above + * stacks can react to. + * + * Should only forward Request to underlying handler. + * + * Takes Request, and returns result or error. + * + * Receives raw response, or error from underlying handler. + */ + deserialize?: DeserializeStep + } + interface Stack { + /** + * ID returns the unique ID for the stack as a middleware. + */ + id(): string + } + interface Stack { + /** + * HandleMiddleware invokes the middleware stack decorating the next handler. + * Each step of stack will be invoked in order before calling the next step. + * With the next handler call last. + * + * The input value must be the input parameters of the operation being + * performed. + * + * Will return the result of the operation, or error. + */ + handleMiddleware(ctx: context.Context, input: { + }, next: Handler): [{ + }, Metadata] + } + interface Stack { + /** + * List returns a list of all middleware in the stack by step. + */ + list(): Array + } + interface Stack { + string(): string + } +} + +/** + * Package bearer provides middleware and utilities for authenticating API + * operation calls with a Bearer Token. + */ +namespace bearer { + // @ts-ignore + import smithyhttp = http + /** + * TokenProvider provides interface for retrieving bearer tokens. + */ + interface TokenProvider { + [key:string]: any; + retrieveBearerToken(_arg0: context.Context): Token + } + // @ts-ignore + import smithycontext = context +} + +/** + * Package aws provides the core SDK's utilities and shared types. Use this package's + * utilities to simplify setting and reading API operations parameters. + * + * # Value and Pointer Conversion Utilities + * + * This package includes a helper conversion utility for each scalar type the SDK's + * API use. These utilities make getting a pointer of the scalar, and dereferencing + * a pointer easier. + * + * Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. + * The Pointer to value will safely dereference the pointer and return its value. + * If the pointer was nil, the scalar's zero value will be returned. + * + * The value to pointer functions will be named after the scalar type. So get a + * *string from a string value use the "String" function. This makes it easy to + * to get pointer of a literal string value, because getting the address of a + * literal requires assigning the value to a variable first. + * + * ``` + * var strPtr *string + * + * // Without the SDK's conversion functions + * str := "my string" + * strPtr = &str + * + * // With the SDK's conversion functions + * strPtr = aws.String("my string") + * + * // Convert *string to string value + * str = aws.ToString(strPtr) + * ``` + * + * In addition to scalars the aws package also includes conversion utilities for + * map and slice for commonly types used in API parameters. The map and slice + * conversion functions use similar naming pattern as the scalar conversion + * functions. + * + * ``` + * var strPtrs []*string + * var strs []string = []string{"Go", "Gophers", "Go"} + * + * // Convert []string to []*string + * strPtrs = aws.StringSlice(strs) + * + * // Convert []*string to []string + * strs = aws.ToStringSlice(strPtrs) + * ``` + * + * # SDK Default HTTP Client + * + * The SDK will use the http.DefaultClient if a HTTP client is not provided to + * the SDK's Session, or service client constructor. This means that if the + * http.DefaultClient is modified by other components of your application the + * modifications will be picked up by the SDK as well. + * + * In some cases this might be intended, but it is a better practice to create + * a custom HTTP Client to share explicitly through your application. You can + * configure the SDK to use the custom HTTP Client by setting the HTTPClient + * value of the SDK's Config type when creating a Session or service client. + */ +/** + * Package aws provides core functionality for making requests to AWS services. + */ +namespace aws { + /** + * AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing. + */ + interface AccountIDEndpointMode extends String{} + // @ts-ignore + import smithybearer = bearer + /** + * HTTPClient provides the interface to provide custom HTTPClients. Generally + * *http.Client is sufficient for most use cases. The HTTPClient should not + * follow 301 or 302 redirects. + */ + interface HTTPClient { + [key:string]: any; + do(_arg0: http.Request): (http.Response) + } + // @ts-ignore + import sdkrand = rand + /** + * A CredentialsProvider is the interface for any component which will provide + * credentials Credentials. A CredentialsProvider is required to manage its own + * Expired state, and what to be expired means. + * + * A credentials provider implementation can be wrapped with a CredentialCache + * to cache the credential value retrieved. Without the cache the SDK will + * attempt to retrieve the credentials for every request. + */ + interface CredentialsProvider { + [key:string]: any; + /** + * Retrieve returns nil if it successfully retrieved the value. + * Error is returned if the value were not obtainable, or empty. + */ + retrieve(ctx: context.Context): Credentials + } + /** + * DefaultsMode is the SDK defaults mode setting. + */ + interface DefaultsMode extends String{} + interface DefaultsMode { + /** + * SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches + * the provided string when compared using EqualFold. If the value does not match a known + * constant it will be set to as-is and the function will return false. As a special case, if the + * provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. + */ + setFromString(v: string): boolean + } + /** + * EndpointResolver is an endpoint resolver that can be used to provide or + * override an endpoint for the given service and region. API clients will + * attempt to use the EndpointResolver first to resolve an endpoint if + * available. If the EndpointResolver returns an EndpointNotFoundError error, + * API clients will fallback to attempting to resolve the endpoint using its + * internal default endpoint resolver. + * + * Deprecated: The global endpoint resolution interface is deprecated. The API + * for endpoint resolution is now unique to each service and is set via the + * EndpointResolverV2 field on service client options. Setting a value for + * EndpointResolver on aws.Config or service client options will prevent you + * from using any endpoint-related service features released after the + * introduction of EndpointResolverV2. You may also encounter broken or + * unexpected behavior when using the old global interface with services that + * use many endpoint-related customizations such as S3. + */ + interface EndpointResolver { + [key:string]: any; + resolveEndpoint(service: string, region: string): Endpoint + } + /** + * EndpointResolverWithOptions is an endpoint resolver that can be used to provide or + * override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will + * attempt to use the EndpointResolverWithOptions first to resolve an endpoint if + * available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, + * API clients will fallback to attempting to resolve the endpoint using its + * internal default endpoint resolver. + * + * Deprecated: The global endpoint resolution interface is deprecated. See + * deprecation docs on [EndpointResolver]. + */ + interface EndpointResolverWithOptions { + [key:string]: any; + resolveEndpoint(service: string, region: string, ...options: { + }[]): Endpoint + } + /** + * ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where + * each bit is a flag that describes the logging behavior for one or more client components. + * The entire 64-bit group is reserved for later expansion by the SDK. + * + * Example: Setting ClientLogMode to enable logging of retries and requests + * + * ``` + * clientLogMode := aws.LogRetries | aws.LogRequest + * ``` + * + * Example: Adding an additional log mode to an existing ClientLogMode value + * + * ``` + * clientLogMode |= aws.LogResponse + * ``` + */ + interface ClientLogMode extends Number{} + interface ClientLogMode { + /** + * IsSigning returns whether the Signing logging mode bit is set + */ + isSigning(): boolean + } + interface ClientLogMode { + /** + * IsRetries returns whether the Retries logging mode bit is set + */ + isRetries(): boolean + } + interface ClientLogMode { + /** + * IsRequest returns whether the Request logging mode bit is set + */ + isRequest(): boolean + } + interface ClientLogMode { + /** + * IsRequestWithBody returns whether the RequestWithBody logging mode bit is set + */ + isRequestWithBody(): boolean + } + interface ClientLogMode { + /** + * IsResponse returns whether the Response logging mode bit is set + */ + isResponse(): boolean + } + interface ClientLogMode { + /** + * IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set + */ + isResponseWithBody(): boolean + } + interface ClientLogMode { + /** + * IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set + */ + isDeprecatedUsage(): boolean + } + interface ClientLogMode { + /** + * IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set + */ + isRequestEventMessage(): boolean + } + interface ClientLogMode { + /** + * IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set + */ + isResponseEventMessage(): boolean + } + interface ClientLogMode { + /** + * ClearSigning clears the Signing logging mode bit + */ + clearSigning(): void + } + interface ClientLogMode { + /** + * ClearRetries clears the Retries logging mode bit + */ + clearRetries(): void + } + interface ClientLogMode { + /** + * ClearRequest clears the Request logging mode bit + */ + clearRequest(): void + } + interface ClientLogMode { + /** + * ClearRequestWithBody clears the RequestWithBody logging mode bit + */ + clearRequestWithBody(): void + } + interface ClientLogMode { + /** + * ClearResponse clears the Response logging mode bit + */ + clearResponse(): void + } + interface ClientLogMode { + /** + * ClearResponseWithBody clears the ResponseWithBody logging mode bit + */ + clearResponseWithBody(): void + } + interface ClientLogMode { + /** + * ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit + */ + clearDeprecatedUsage(): void + } + interface ClientLogMode { + /** + * ClearRequestEventMessage clears the RequestEventMessage logging mode bit + */ + clearRequestEventMessage(): void + } + interface ClientLogMode { + /** + * ClearResponseEventMessage clears the ResponseEventMessage logging mode bit + */ + clearResponseEventMessage(): void + } + /** + * RetryMode provides the mode the API client will use to create a retryer + * based on. + */ + interface RetryMode extends String{} + interface RetryMode { + string(): string + } + /** + * Retryer is an interface to determine if a given error from a + * attempt should be retried, and if so what backoff delay to apply. The + * default implementation used by most services is the retry package's Standard + * type. Which contains basic retry logic using exponential backoff. + */ + interface Retryer { + [key:string]: any; + /** + * IsErrorRetryable returns if the failed attempt is retryable. This check + * should determine if the error can be retried, or if the error is + * terminal. + */ + isErrorRetryable(_arg0: Error): boolean + /** + * MaxAttempts returns the maximum number of attempts that can be made for + * an attempt before failing. A value of 0 implies that the attempt should + * be retried until it succeeds if the errors are retryable. + */ + maxAttempts(): number + /** + * RetryDelay returns the delay that should be used before retrying the + * attempt. Will return error if the delay could not be determined. + */ + retryDelay(attempt: number, opErr: Error): time.Duration + /** + * GetRetryToken attempts to deduct the retry cost from the retry token pool. + * Returning the token release function, or error. + */ + getRetryToken(ctx: context.Context, opErr: Error): (_arg0: Error) => void + /** + * GetInitialToken returns the initial attempt token that can increment the + * retry token pool if the attempt is successful. + */ + getInitialToken(): (_arg0: Error) => void + } + /** + * RuntimeEnvironment is a collection of values that are determined at runtime + * based on the environment that the SDK is executing in. Some of these values + * may or may not be present based on the executing environment and certain SDK + * configuration properties that drive whether these values are populated.. + */ + interface RuntimeEnvironment { + environmentIdentifier: ExecutionEnvironmentID + region: string + ec2InstanceMetadataRegion: string + } +} + +/** + * Package s3 provides the API client, operations, and parameter types for Amazon + * Simple Storage Service. + */ +namespace s3 { + // @ts-ignore + import awsmiddleware = middleware + // @ts-ignore + import awshttp = http + // @ts-ignore + import internalauth = auth + // @ts-ignore + import internalauthsmithy = smithy + // @ts-ignore + import internalConfig = configsources + // @ts-ignore + import internalmiddleware = middleware + // @ts-ignore + import acceptencodingcust = accept_encoding + // @ts-ignore + import internalChecksum = checksum + // @ts-ignore + import presignedurlcust = presigned_url + // @ts-ignore + import s3sharedconfig = config + // @ts-ignore + import s3cust = customizations + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithyauth = auth + // @ts-ignore + import smithydocument = document + // @ts-ignore + import smithyhttp = http + type _subdoBVF = noSmithyDocumentSerde + interface AbortMultipartUploadInput extends _subdoBVF { + /** + * The bucket name to which the upload was taking place. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Key of the object for which the multipart upload was initiated. + * + * This member is required. + */ + key?: string + /** + * Upload ID that identifies the multipart upload. + * + * This member is required. + */ + uploadId?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + } + type _subAhzcd = noSmithyDocumentSerde + interface AbortMultipartUploadOutput extends _subAhzcd { + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subRygdJ = noSmithyDocumentSerde + interface CompleteMultipartUploadInput extends _subRygdJ { + /** + * Name of the bucket to which the multipart upload was initiated. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Object key for which the multipart upload was initiated. + * + * This member is required. + */ + key?: string + /** + * ID for the initiated multipart upload. + * + * This member is required. + */ + uploadId?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see + * [Checking object integrity]in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32C?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA1?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA256?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Uploads the object only if the object key name does not already exist in the + * bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + * + * If a conflicting operation occurs during the upload S3 returns a 409 + * ConditionalRequestConflict response. On a 409 failure you should re-initiate the + * multipart upload with CreateMultipartUpload and re-upload each part. + * + * Expects the '*' (asterisk) character. + * + * For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + * User Guide. + * + * [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifNoneMatch?: string + /** + * The container for the multipart upload request information. + */ + multipartUpload?: types.CompletedMultipartUpload + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The server-side encryption (SSE) algorithm used to encrypt the object. This + * parameter is required only when the object was created using a checksum + * algorithm or if your bucket policy requires the use of SSE-C. For more + * information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key + */ + sseCustomerAlgorithm?: string + /** + * The server-side encryption (SSE) customer managed key. This parameter is needed + * only when the object was created using a checksum algorithm. For more + * information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKey?: string + /** + * The MD5 server-side encryption (SSE) customer managed key. This parameter is + * needed only when the object was created using a checksum algorithm. For more + * information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKeyMD5?: string + } + type _subopwgM = noSmithyDocumentSerde + interface CompleteMultipartUploadOutput extends _subopwgM { + /** + * The name of the bucket that contains the newly created object. Does not return + * the access point ARN or access point alias if used. + * + * Access points are not supported by directory buckets. + */ + bucket?: string + /** + * Indicates whether the multipart upload uses an S3 Bucket Key for server-side + * encryption with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + /** + * Entity tag that identifies the newly created object's data. Objects with + * different object data will have different entity tags. The entity tag is an + * opaque string. The entity tag may or may not be an MD5 digest of the object + * data. If the entity tag is not an MD5 digest of the object data, it will contain + * one or more nonhexadecimal characters and/or will consist of less than 32 or + * more than 32 hexadecimal digits. For more information about how the entity tag + * is calculated, see [Checking object integrity]in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + eTag?: string + /** + * If the object expiration is configured, this will contain the expiration date ( + * expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + * + * This functionality is not supported for directory buckets. + */ + expiration?: string + /** + * The object key of the newly created object. + */ + key?: string + /** + * The URI that identifies the newly created object. + */ + location?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when storing this object in Amazon S3 + * (for example, AES256 , aws:kms ). + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Version ID of the newly created object, in case the bucket has versioning + * turned on. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subzVCbd = noSmithyDocumentSerde + interface CopyObjectInput extends _subzVCbd { + /** + * The name of the destination bucket. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Specifies the source object for the copy operation. The source object can be up + * to 5 GB. If the source object is an object that was uploaded by using a + * multipart upload, the object copy will be a single part object after the source + * object is copied to the destination bucket. + * + * You specify the value of the copy source in one of two formats, depending on + * whether you want to access the source object through an [access point]: + * + * ``` + * - For objects not accessed through an access point, specify the name of the + * source bucket and the key of the source object, separated by a slash (/). For + * example, to copy the object reports/january.pdf from the general purpose + * bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value + * must be URL-encoded. To copy the object reports/january.pdf from the directory + * bucket awsexamplebucket--use1-az5--x-s3 , use + * awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be + * URL-encoded. + * + * - For objects accessed through access points, specify the Amazon Resource + * Name (ARN) of the object as accessed through the access point, in the format + * arn:aws:s3:::accesspoint//object/ . For example, to copy the object + * reports/january.pdf through access point my-access-point owned by account + * 123456789012 in Region us-west-2 , use the URL encoding of + * arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf + * . The value must be URL encoded. + * + * - Amazon S3 supports copy operations using Access points only when the source + * and destination buckets are in the same Amazon Web Services Region. + * + * - Access points are not supported by directory buckets. + * ``` + * + * Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + * ``` + * ARN of the object as accessed in the format + * arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + * reports/january.pdf through outpost my-outpost owned by account 123456789012 + * in Region us-west-2 , use the URL encoding of + * arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf + * . The value must be URL-encoded. + * ``` + * + * If your source bucket versioning is enabled, the x-amz-copy-source header by + * default identifies the current version of an object to copy. If the current + * version is a delete marker, Amazon S3 behaves as if the object was deleted. To + * copy a different version, use the versionId query parameter. Specifically, + * append ?versionId= to the value (for example, + * awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 + * ). If you don't specify a version ID, Amazon S3 copies the latest version of the + * source object. + * + * If you enable versioning on the destination bucket, Amazon S3 generates a + * unique version ID for the copied object. This version ID is different from the + * version ID of the source object. Amazon S3 returns the version ID of the copied + * object in the x-amz-version-id response header in the response. + * + * If you do not enable versioning or suspend it on the destination bucket, the + * version ID that Amazon S3 generates in the x-amz-version-id response header is + * always null. + * + * Directory buckets - S3 Versioning isn't enabled and supported for directory + * buckets. + * + * [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + * + * This member is required. + */ + copySource?: string + /** + * The key of the destination object. + * + * This member is required. + */ + key?: string + /** + * The canned access control list (ACL) to apply to the object. + * + * When you copy an object, the ACL metadata is not preserved and is set to private + * by default. Only the owner has full access control. To override the default ACL + * setting, specify a new ACL when you generate a copy request. For more + * information, see [Using ACLs]. + * + * If the destination bucket that you're copying objects to uses the bucket owner + * enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect + * permissions. Buckets that use this setting only accept PUT requests that don't + * specify an ACL or PUT requests that specify bucket owner full control ACLs, + * such as the bucket-owner-full-control canned ACL or an equivalent form of this + * ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 + * User Guide. + * + * ``` + * - If your destination bucket uses the bucket owner enforced setting for + * Object Ownership, all objects written to the bucket by any account will be owned + * by the bucket owner. + * + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + * + * [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + * [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + */ + acl: types.ObjectCannedACL + /** + * Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + * with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + * If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. + * + * Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object + * encryption with SSE-KMS. Specifying this header with a COPY action doesn’t + * affect bucket-level settings for S3 Bucket Key. + * + * For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. + * + * Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS + * encrypted objects from general purpose buckets to directory buckets, from + * directory buckets to general purpose buckets, or between directory buckets, + * through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request + * is made for a KMS-encrypted object. + * + * [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + */ + bucketKeyEnabled?: boolean + /** + * Specifies the caching behavior along the request/reply chain. + */ + cacheControl?: string + /** + * Indicates the algorithm that you want Amazon S3 to use to create the checksum + * for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + * + * When you copy an object, if the source object has a checksum, that checksum + * value will be copied to the new object by default. If the CopyObject request + * does not include this x-amz-checksum-algorithm header, the checksum algorithm + * will be copied from the source object to the destination object (if it's present + * on the source object). You can optionally specify a different checksum algorithm + * to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported + * values will respond with the HTTP status code 400 Bad Request . + * + * For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + * default checksum algorithm that's used for performance. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * Specifies presentational information for the object. Indicates whether an + * object should be displayed in a web browser or downloaded as a file. It allows + * specifying the desired filename for the downloaded file. + */ + contentDisposition?: string + /** + * Specifies what content encodings have been applied to the object and thus what + * decoding mechanisms must be applied to obtain the media-type referenced by the + * Content-Type header field. + * + * For directory buckets, only the aws-chunked value is supported in this header + * field. + */ + contentEncoding?: string + /** + * The language the content is in. + */ + contentLanguage?: string + /** + * A standard MIME type that describes the format of the object data. + */ + contentType?: string + /** + * Copies the object if its entity tag (ETag) matches the specified tag. + * + * If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + * headers are present in the request and evaluate as follows, Amazon S3 returns + * 200 OK and copies the data: + * + * ``` + * - x-amz-copy-source-if-match condition evaluates to true + * + * - x-amz-copy-source-if-unmodified-since condition evaluates to false + * ``` + */ + copySourceIfMatch?: string + /** + * Copies the object if it has been modified since the specified time. + * + * If both the x-amz-copy-source-if-none-match and + * x-amz-copy-source-if-modified-since headers are present in the request and + * evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + * code: + * + * ``` + * - x-amz-copy-source-if-none-match condition evaluates to false + * + * - x-amz-copy-source-if-modified-since condition evaluates to true + * ``` + */ + copySourceIfModifiedSince?: time.Time + /** + * Copies the object if its entity tag (ETag) is different than the specified ETag. + * + * If both the x-amz-copy-source-if-none-match and + * x-amz-copy-source-if-modified-since headers are present in the request and + * evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + * code: + * + * ``` + * - x-amz-copy-source-if-none-match condition evaluates to false + * + * - x-amz-copy-source-if-modified-since condition evaluates to true + * ``` + */ + copySourceIfNoneMatch?: string + /** + * Copies the object if it hasn't been modified since the specified time. + * + * If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + * headers are present in the request and evaluate as follows, Amazon S3 returns + * 200 OK and copies the data: + * + * ``` + * - x-amz-copy-source-if-match condition evaluates to true + * + * - x-amz-copy-source-if-unmodified-since condition evaluates to false + * ``` + */ + copySourceIfUnmodifiedSince?: time.Time + /** + * Specifies the algorithm to use when decrypting the source object (for example, + * AES256 ). + * + * If the source object for the copy is stored in Amazon S3 using SSE-C, you must + * provide the necessary encryption information in your request so that Amazon S3 + * can decrypt the object for copying. + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceSSECustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + * the source object. The encryption key provided in this header must be the same + * one that was used when the source object was created. + * + * If the source object for the copy is stored in Amazon S3 using SSE-C, you must + * provide the necessary encryption information in your request so that Amazon S3 + * can decrypt the object for copying. + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceSSECustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * If the source object for the copy is stored in Amazon S3 using SSE-C, you must + * provide the necessary encryption information in your request so that Amazon S3 + * can decrypt the object for copying. + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceSSECustomerKeyMD5?: string + /** + * The account ID of the expected destination bucket owner. If the account ID that + * you provide does not match the actual owner of the destination bucket, the + * request fails with the HTTP status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The account ID of the expected source bucket owner. If the account ID that you + * provide does not match the actual owner of the source bucket, the request fails + * with the HTTP status code 403 Forbidden (access denied). + */ + expectedSourceBucketOwner?: string + /** + * The date and time at which the object is no longer cacheable. + */ + expires?: time.Time + /** + * Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantFullControl?: string + /** + * Allows grantee to read the object data and its metadata. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantRead?: string + /** + * Allows grantee to read the object ACL. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantReadACP?: string + /** + * Allows grantee to write the ACL for the applicable object. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantWriteACP?: string + /** + * A map of metadata to store with the object in S3. + */ + metadata: _TygojaDict + /** + * Specifies whether the metadata is copied from the source object or replaced + * with metadata that's provided in the request. When copying an object, you can + * preserve all metadata (the default) or specify new metadata. If this header + * isn’t specified, COPY is the default behavior. + * + * General purpose bucket - For general purpose buckets, when you grant + * permissions, you can use the s3:x-amz-metadata-directive condition key to + * enforce certain metadata behavior when objects are uploaded. For more + * information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide. + * + * x-amz-website-redirect-location is unique to each object and is not copied when + * using the x-amz-metadata-directive header. To copy the value, you must specify + * x-amz-website-redirect-location in the request header. + * + * [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html + */ + metadataDirective: types.MetadataDirective + /** + * Specifies whether you want to apply a legal hold to the object copy. + * + * This functionality is not supported for directory buckets. + */ + objectLockLegalHoldStatus: types.ObjectLockLegalHoldStatus + /** + * The Object Lock mode that you want to apply to the object copy. + * + * This functionality is not supported for directory buckets. + */ + objectLockMode: types.ObjectLockMode + /** + * The date and time when you want the Object Lock of the object copy to expire. + * + * This functionality is not supported for directory buckets. + */ + objectLockRetainUntilDate?: time.Time + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Specifies the algorithm to use when encrypting the object (for example, AES256 ). + * + * When you perform a CopyObject operation, if you want to use a different type of + * encryption setting for the target object, you can specify appropriate + * encryption-related headers to encrypt the target object with an Amazon S3 + * managed key, a KMS key, or a customer-provided key. If the encryption setting in + * your request is different from the default encryption configuration of the + * destination bucket, the encryption setting in your request takes precedence. + * + * This functionality is not supported when the destination bucket is a directory + * bucket. + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use in + * encrypting data. This value is used to store the object and then it is + * discarded. Amazon S3 does not store the encryption key. The key must be + * appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header. + * + * This functionality is not supported when the destination bucket is a directory + * bucket. + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * This functionality is not supported when the destination bucket is a directory + * bucket. + */ + sseCustomerKeyMD5?: string + /** + * Specifies the Amazon Web Services KMS Encryption Context as an additional + * encryption context to use for the destination object encryption. The value of + * this header is a base64-encoded UTF-8 string holding JSON with the encryption + * context key-value pairs. + * + * General purpose buckets - This value must be explicitly added to specify + * encryption context for CopyObject requests if you want an additional encryption + * context for your destination object. The additional encryption context of the + * source object won't be copied to the destination object. For more information, + * see [Encryption context]in the Amazon S3 User Guide. + * + * Directory buckets - You can optionally provide an explicit encryption context + * value. The value must match the default encryption context - the bucket Amazon + * Resource Name (ARN). An additional encryption context value is not supported. + * + * [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context + */ + ssekmsEncryptionContext?: string + /** + * Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + * encryption. All GET and PUT requests for an object protected by KMS will fail if + * they're not made via SSL or using SigV4. For information about configuring any + * of the officially supported Amazon Web Services SDKs and Amazon Web Services + * CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide. + * + * Directory buckets - If you specify x-amz-server-side-encryption with aws:kms , + * you must specify the x-amz-server-side-encryption-aws-kms-key-id header with + * the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key + * to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID + * or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS + * configuration can only support 1 [customer managed key]per directory bucket for the lifetime of the + * bucket. [Amazon Web Services managed key]( aws/s3 ) isn't supported. + * + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when storing this object in Amazon + * S3. Unrecognized or unsupported values won’t write a destination object and will + * receive a 400 Bad Request response. + * + * Amazon S3 automatically encrypts all new objects that are copied to an S3 + * bucket. When copying an object, if you don't specify encryption information in + * your copy request, the encryption setting of the target object is set to the + * default encryption configuration of the destination bucket. By default, all + * buckets have a base level of encryption configuration that uses server-side + * encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a + * different default encryption configuration, Amazon S3 uses the corresponding + * encryption key to encrypt the target object copy. + * + * With server-side encryption, Amazon S3 encrypts your data as it writes your + * data to disks in its data centers and decrypts the data when you access it. For + * more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide. + * + * General purpose buckets + * + * ``` + * - For general purpose buckets, there are the following supported options for + * server-side encryption: server-side encryption with Key Management Service (KMS) + * keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS + * keys (DSSE-KMS), and server-side encryption with customer-provided encryption + * keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided + * key to encrypt the target object copy. + * + * - When you perform a CopyObject operation, if you want to use a different type + * of encryption setting for the target object, you can specify appropriate + * encryption-related headers to encrypt the target object with an Amazon S3 + * managed key, a KMS key, or a customer-provided key. If the encryption setting in + * your request is different from the default encryption configuration of the + * destination bucket, the encryption setting in your request takes precedence. + * ``` + * + * Directory buckets + * + * ``` + * - For directory buckets, there are only two supported options for server-side + * encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( + * AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We + * recommend that the bucket's default encryption uses the desired encryption + * configuration and you don't override the bucket default encryption in your + * CreateSession requests or PUT object requests. Then, new objects are + * automatically encrypted with the desired encryption settings. For more + * information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the + * encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + * + * - To encrypt new object copies to a directory bucket with SSE-KMS, we + * recommend you specify SSE-KMS as the directory bucket's default encryption + * configuration with a KMS key (specifically, a [customer managed key]). [Amazon Web Services managed key]( aws/s3 ) isn't supported. + * Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket for the + * lifetime of the bucket. After you specify a customer managed key for SSE-KMS, + * you can't override the customer managed key for the bucket's SSE-KMS + * configuration. Then, when you perform a CopyObject operation and want to + * specify server-side encryption settings for new object copies with SSE-KMS in + * the encryption-related request headers, you must ensure the encryption key is + * the same customer managed key that you specified for the directory bucket's + * default encryption configuration. + * ``` + * + * [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + * [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + */ + serverSideEncryption: types.ServerSideEncryption + /** + * If the x-amz-storage-class header is not used, the copied object will be stored + * in the STANDARD Storage Class by default. The STANDARD storage class provides + * high durability and high availability. Depending on performance needs, you can + * specify a different Storage Class. + * + * ``` + * - Directory buckets - For directory buckets, only the S3 Express One Zone + * storage class is supported to store newly created objects. Unsupported storage + * class values won't write a destination object and will respond with the HTTP + * status code 400 Bad Request . + * + * - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + * ``` + * + * You can use the CopyObject action to change the storage class of an object that + * is already stored in Amazon S3 by using the x-amz-storage-class header. For + * more information, see [Storage Classes]in the Amazon S3 User Guide. + * + * Before using an object as a source object for the copy operation, you must + * restore a copy of it if it meets any of the following conditions: + * + * ``` + * - The storage class of the source object is GLACIER or DEEP_ARCHIVE . + * + * - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is + * Archive Access or Deep Archive Access . + * ``` + * + * For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide. + * + * [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + * [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + * [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html + * [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition + */ + storageClass: types.StorageClass + /** + * The tag-set for the object copy in the destination bucket. This value must be + * used in conjunction with the x-amz-tagging-directive if you choose REPLACE for + * the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive + * , you don't need to set the x-amz-tagging header, because the tag-set will be + * copied from the source object directly. The tag-set must be encoded as URL Query + * parameters. + * + * The default value is the empty value. + * + * Directory buckets - For directory buckets in a CopyObject operation, only the + * empty tag-set is supported. Any requests that attempt to write non-empty tags + * into directory buckets will receive a 501 Not Implemented status code. When the + * destination bucket is a directory bucket, you will receive a 501 Not Implemented + * response in any of the following situations: + * + * ``` + * - When you attempt to COPY the tag-set from an S3 source object that has + * non-empty tags. + * + * - When you attempt to REPLACE the tag-set of a source object and set a + * non-empty value to x-amz-tagging . + * + * - When you don't set the x-amz-tagging-directive header and the source object + * has non-empty tags. This is because the default value of + * x-amz-tagging-directive is COPY . + * ``` + * + * Because only the empty tag-set is supported for directory buckets in a + * CopyObject operation, the following situations are allowed: + * + * ``` + * - When you attempt to COPY the tag-set from a directory bucket source object + * that has no tags to a general purpose bucket. It copies an empty tag-set to the + * destination object. + * + * - When you attempt to REPLACE the tag-set of a directory bucket source object + * and set the x-amz-tagging value of the directory bucket destination object to + * empty. + * + * - When you attempt to REPLACE the tag-set of a general purpose bucket source + * object that has non-empty tags and set the x-amz-tagging value of the + * directory bucket destination object to empty. + * + * - When you attempt to REPLACE the tag-set of a directory bucket source object + * and don't set the x-amz-tagging value of the directory bucket destination + * object. This is because the default value of x-amz-tagging is the empty value. + * ``` + */ + tagging?: string + /** + * Specifies whether the object tag-set is copied from the source object or + * replaced with the tag-set that's provided in the request. + * + * The default value is COPY . + * + * Directory buckets - For directory buckets in a CopyObject operation, only the + * empty tag-set is supported. Any requests that attempt to write non-empty tags + * into directory buckets will receive a 501 Not Implemented status code. When the + * destination bucket is a directory bucket, you will receive a 501 Not Implemented + * response in any of the following situations: + * + * ``` + * - When you attempt to COPY the tag-set from an S3 source object that has + * non-empty tags. + * + * - When you attempt to REPLACE the tag-set of a source object and set a + * non-empty value to x-amz-tagging . + * + * - When you don't set the x-amz-tagging-directive header and the source object + * has non-empty tags. This is because the default value of + * x-amz-tagging-directive is COPY . + * ``` + * + * Because only the empty tag-set is supported for directory buckets in a + * CopyObject operation, the following situations are allowed: + * + * ``` + * - When you attempt to COPY the tag-set from a directory bucket source object + * that has no tags to a general purpose bucket. It copies an empty tag-set to the + * destination object. + * + * - When you attempt to REPLACE the tag-set of a directory bucket source object + * and set the x-amz-tagging value of the directory bucket destination object to + * empty. + * + * - When you attempt to REPLACE the tag-set of a general purpose bucket source + * object that has non-empty tags and set the x-amz-tagging value of the + * directory bucket destination object to empty. + * + * - When you attempt to REPLACE the tag-set of a directory bucket source object + * and don't set the x-amz-tagging value of the directory bucket destination + * object. This is because the default value of x-amz-tagging is the empty value. + * ``` + */ + taggingDirective: types.TaggingDirective + /** + * If the destination bucket is configured as a website, redirects requests for + * this object copy to another object in the same bucket or to an external URL. + * Amazon S3 stores the value of this header in the object metadata. This value is + * unique to each object and is not copied when using the x-amz-metadata-directive + * header. Instead, you may opt to provide this header in combination with the + * x-amz-metadata-directive header. + * + * This functionality is not supported for directory buckets. + */ + websiteRedirectLocation?: string + } + type _subMUDlx = noSmithyDocumentSerde + interface CopyObjectOutput extends _subMUDlx { + /** + * Indicates whether the copied object uses an S3 Bucket Key for server-side + * encryption with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * Container for all response elements. + */ + copyObjectResult?: types.CopyObjectResult + /** + * Version ID of the source object that was copied. + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceVersionId?: string + /** + * If the object expiration is configured, the response includes this header. + * + * This functionality is not supported for directory buckets. + */ + expiration?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to confirm the encryption + * algorithm that's used. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to provide the round-trip + * message integrity verification of the customer-provided encryption key. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * If present, indicates the Amazon Web Services KMS Encryption Context to use for + * object encryption. The value of this header is a base64-encoded UTF-8 string + * holding JSON with the encryption context key-value pairs. + */ + ssekmsEncryptionContext?: string + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Version ID of the newly created copy. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subFvUAF = noSmithyDocumentSerde + interface CreateBucketInput extends _subFvUAF { + /** + * The name of the bucket to create. + * + * General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules] + * in the Amazon S3 User Guide. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + * + * This member is required. + */ + bucket?: string + /** + * The canned ACL to apply to the bucket. + * + * This functionality is not supported for directory buckets. + */ + acl: types.BucketCannedACL + /** + * The configuration information for the bucket. + */ + createBucketConfiguration?: types.CreateBucketConfiguration + /** + * Allows grantee the read, write, read ACP, and write ACP permissions on the + * bucket. + * + * This functionality is not supported for directory buckets. + */ + grantFullControl?: string + /** + * Allows grantee to list the objects in the bucket. + * + * This functionality is not supported for directory buckets. + */ + grantRead?: string + /** + * Allows grantee to read the bucket ACL. + * + * This functionality is not supported for directory buckets. + */ + grantReadACP?: string + /** + * Allows grantee to create new objects in the bucket. + * + * For the bucket and object owners of existing objects, also allows deletions and + * overwrites of those objects. + * + * This functionality is not supported for directory buckets. + */ + grantWrite?: string + /** + * Allows grantee to write the ACL for the applicable bucket. + * + * This functionality is not supported for directory buckets. + */ + grantWriteACP?: string + /** + * Specifies whether you want S3 Object Lock to be enabled for the new bucket. + * + * This functionality is not supported for directory buckets. + */ + objectLockEnabledForBucket?: boolean + /** + * The container element for object ownership for a bucket's ownership controls. + * + * BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + * bucket owner if the objects are uploaded with the bucket-owner-full-control + * canned ACL. + * + * ObjectWriter - The uploading account will own the object if the object is + * uploaded with the bucket-owner-full-control canned ACL. + * + * BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + * affect permissions. The bucket owner automatically owns and has full control + * over every object in the bucket. The bucket only accepts PUT requests that don't + * specify an ACL or specify bucket owner full control ACLs (such as the predefined + * bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + * the same permissions). + * + * By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + * disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + * you must control access for each object individually. For more information about + * S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. Directory buckets + * use the bucket owner enforced setting for S3 Object Ownership. + * + * [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + */ + objectOwnership: types.ObjectOwnership + } + type _subVPZab = noSmithyDocumentSerde + interface CreateBucketOutput extends _subVPZab { + /** + * A forward slash followed by the name of the bucket. + */ + location?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subcEoRi = noSmithyDocumentSerde + interface CreateMultipartUploadInput extends _subcEoRi { + /** + * The name of the bucket where the multipart upload is initiated and where the + * object is uploaded. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Object key for which the multipart upload is to be initiated. + * + * This member is required. + */ + key?: string + /** + * The canned ACL to apply to the object. Amazon S3 supports a set of predefined + * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and + * permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide. + * + * By default, all objects are private. Only the owner has full access control. + * When uploading an object, you can grant access permissions to individual Amazon + * Web Services accounts or to predefined groups defined by Amazon S3. These + * permissions are then added to the access control list (ACL) on the new object. + * For more information, see [Using ACLs]. One way to grant the permissions using the request + * headers is to specify a canned ACL with the x-amz-acl request header. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + * + * [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + * [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + */ + acl: types.ObjectCannedACL + /** + * Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + * with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + * + * General purpose buckets - Setting this header to true causes Amazon S3 to use + * an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + * header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + * + * Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + * operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + * supported, when you copy SSE-KMS encrypted objects from general purpose buckets + * to directory buckets, from directory buckets to general purpose buckets, or + * between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + * call to KMS every time a copy request is made for a KMS-encrypted object. + * + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + */ + bucketKeyEnabled?: boolean + /** + * Specifies caching behavior along the request/reply chain. + */ + cacheControl?: string + /** + * Indicates the algorithm that you want Amazon S3 to use to create the checksum + * for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * Specifies presentational information for the object. + */ + contentDisposition?: string + /** + * Specifies what content encodings have been applied to the object and thus what + * decoding mechanisms must be applied to obtain the media-type referenced by the + * Content-Type header field. + * + * For directory buckets, only the aws-chunked value is supported in this header + * field. + */ + contentEncoding?: string + /** + * The language that the content is in. + */ + contentLanguage?: string + /** + * A standard MIME type describing the format of the object data. + */ + contentType?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The date and time at which the object is no longer cacheable. + */ + expires?: time.Time + /** + * Specify access permissions explicitly to give the grantee READ, READ_ACP, and + * WRITE_ACP permissions on the object. + * + * By default, all objects are private. Only the owner has full access control. + * When uploading an object, you can use this header to explicitly grant access + * permissions to specific Amazon Web Services accounts or groups. This header maps + * to specific permissions that Amazon S3 supports in an ACL. For more information, + * see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + * + * You specify each grantee as a type=value pair, where the type is one of the + * following: + * + * ``` + * - id – if the value specified is the canonical user ID of an Amazon Web + * Services account + * + * - uri – if you are granting permissions to a predefined group + * + * - emailAddress – if the value specified is the email address of an Amazon Web + * Services account + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * ``` + * Amazon Web Services General Reference. + * ``` + * + * For example, the following x-amz-grant-read header grants the Amazon Web + * Services accounts identified by account IDs permissions to read object data and + * its metadata: + * + * ``` + * x-amz-grant-read: id="11112222333", id="444455556666" + * + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + */ + grantFullControl?: string + /** + * Specify access permissions explicitly to allow grantee to read the object data + * and its metadata. + * + * By default, all objects are private. Only the owner has full access control. + * When uploading an object, you can use this header to explicitly grant access + * permissions to specific Amazon Web Services accounts or groups. This header maps + * to specific permissions that Amazon S3 supports in an ACL. For more information, + * see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + * + * You specify each grantee as a type=value pair, where the type is one of the + * following: + * + * ``` + * - id – if the value specified is the canonical user ID of an Amazon Web + * Services account + * + * - uri – if you are granting permissions to a predefined group + * + * - emailAddress – if the value specified is the email address of an Amazon Web + * Services account + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * ``` + * Amazon Web Services General Reference. + * ``` + * + * For example, the following x-amz-grant-read header grants the Amazon Web + * Services accounts identified by account IDs permissions to read object data and + * its metadata: + * + * ``` + * x-amz-grant-read: id="11112222333", id="444455556666" + * + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + */ + grantRead?: string + /** + * Specify access permissions explicitly to allows grantee to read the object ACL. + * + * By default, all objects are private. Only the owner has full access control. + * When uploading an object, you can use this header to explicitly grant access + * permissions to specific Amazon Web Services accounts or groups. This header maps + * to specific permissions that Amazon S3 supports in an ACL. For more information, + * see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + * + * You specify each grantee as a type=value pair, where the type is one of the + * following: + * + * ``` + * - id – if the value specified is the canonical user ID of an Amazon Web + * Services account + * + * - uri – if you are granting permissions to a predefined group + * + * - emailAddress – if the value specified is the email address of an Amazon Web + * Services account + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * ``` + * Amazon Web Services General Reference. + * ``` + * + * For example, the following x-amz-grant-read header grants the Amazon Web + * Services accounts identified by account IDs permissions to read object data and + * its metadata: + * + * ``` + * x-amz-grant-read: id="11112222333", id="444455556666" + * + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + */ + grantReadACP?: string + /** + * Specify access permissions explicitly to allows grantee to allow grantee to + * write the ACL for the applicable object. + * + * By default, all objects are private. Only the owner has full access control. + * When uploading an object, you can use this header to explicitly grant access + * permissions to specific Amazon Web Services accounts or groups. This header maps + * to specific permissions that Amazon S3 supports in an ACL. For more information, + * see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + * + * You specify each grantee as a type=value pair, where the type is one of the + * following: + * + * ``` + * - id – if the value specified is the canonical user ID of an Amazon Web + * Services account + * + * - uri – if you are granting permissions to a predefined group + * + * - emailAddress – if the value specified is the email address of an Amazon Web + * Services account + * ``` + * + * Using email addresses to specify a grantee is only supported in the following + * ``` + * Amazon Web Services Regions: + * + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + * ``` + * Amazon Web Services General Reference. + * ``` + * + * For example, the following x-amz-grant-read header grants the Amazon Web + * Services accounts identified by account IDs permissions to read object data and + * its metadata: + * + * ``` + * x-amz-grant-read: id="11112222333", id="444455556666" + * + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + */ + grantWriteACP?: string + /** + * A map of metadata to store with the object in S3. + */ + metadata: _TygojaDict + /** + * Specifies whether you want to apply a legal hold to the uploaded object. + * + * This functionality is not supported for directory buckets. + */ + objectLockLegalHoldStatus: types.ObjectLockLegalHoldStatus + /** + * Specifies the Object Lock mode that you want to apply to the uploaded object. + * + * This functionality is not supported for directory buckets. + */ + objectLockMode: types.ObjectLockMode + /** + * Specifies the date and time when you want the Object Lock to expire. + * + * This functionality is not supported for directory buckets. + */ + objectLockRetainUntilDate?: time.Time + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Specifies the algorithm to use when encrypting the object (for example, AES256). + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use in + * encrypting data. This value is used to store the object and then it is + * discarded; Amazon S3 does not store the encryption key. The key must be + * appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the customer-provided encryption key + * according to RFC 1321. Amazon S3 uses this header for a message integrity check + * to ensure that the encryption key was transmitted without error. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * Specifies the Amazon Web Services KMS Encryption Context to use for object + * encryption. The value of this header is a Base64-encoded string of a UTF-8 + * encoded JSON, which contains the encryption context as key-value pairs. + * + * Directory buckets - You can optionally provide an explicit encryption context + * value. The value must match the default encryption context - the bucket Amazon + * Resource Name (ARN). An additional encryption context value is not supported. + */ + ssekmsEncryptionContext?: string + /** + * Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + * encryption. If the KMS key doesn't exist in the same account that's issuing the + * command, you must use the full Key ARN not the Key ID. + * + * General purpose buckets - If you specify x-amz-server-side-encryption with + * aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + * Alias) of the KMS key to use. If you specify + * x-amz-server-side-encryption:aws:kms or + * x-amz-server-side-encryption:aws:kms:dsse , but do not provide + * x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + * Services managed key ( aws/s3 ) to protect the data. + * + * Directory buckets - If you specify x-amz-server-side-encryption with aws:kms , + * you must specify the x-amz-server-side-encryption-aws-kms-key-id header with + * the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key + * to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID + * or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS + * configuration can only support 1 [customer managed key]per directory bucket for the lifetime of the + * bucket. [Amazon Web Services managed key]( aws/s3 ) isn't supported. + * + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3 (for example, AES256 , aws:kms ). + * + * ``` + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: server-side encryption with Amazon S3 + * managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + * (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + * the desired encryption configuration and you don't override the bucket default + * encryption in your CreateSession requests or PUT object requests. Then, new + * objects are automatically encrypted with the desired encryption settings. For + * more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + * the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + * ``` + * + * In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + * ``` + * encryption request headers must match the encryption settings that are specified + * in the CreateSession request. You can't override the values of the encryption + * settings ( x-amz-server-side-encryption , + * x-amz-server-side-encryption-aws-kms-key-id , + * x-amz-server-side-encryption-context , and + * x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + * CreateSession request. You don't need to explicitly specify these encryption + * settings values in Zonal endpoint API calls, and Amazon S3 will use the + * encryption settings values from the CreateSession request to protect new + * objects in the directory bucket. + * ``` + * + * When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + * ``` + * session token refreshes automatically to avoid service interruptions when a + * session expires. The CLI or the Amazon Web Services SDKs use the bucket's + * default encryption configuration for the CreateSession request. It's not + * supported to override the encryption settings values in the CreateSession + * request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + * request headers must match the default encryption configuration of the directory + * bucket. + * ``` + * + * [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + */ + serverSideEncryption: types.ServerSideEncryption + /** + * By default, Amazon S3 uses the STANDARD Storage Class to store newly created + * objects. The STANDARD storage class provides high durability and high + * availability. Depending on performance needs, you can specify a different + * Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + * + * ``` + * - For directory buckets, only the S3 Express One Zone storage class is + * supported to store newly created objects. + * + * - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + * ``` + * + * [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + */ + storageClass: types.StorageClass + /** + * The tag-set for the object. The tag-set must be encoded as URL Query parameters. + * + * This functionality is not supported for directory buckets. + */ + tagging?: string + /** + * If the bucket is configured as a website, redirects requests for this object to + * another object in the same bucket or to an external URL. Amazon S3 stores the + * value of this header in the object metadata. + * + * This functionality is not supported for directory buckets. + */ + websiteRedirectLocation?: string + } + type _subHZuFd = noSmithyDocumentSerde + interface CreateMultipartUploadOutput extends _subHZuFd { + /** + * If the bucket has a lifecycle rule configured with an action to abort + * incomplete multipart uploads and the prefix in the lifecycle rule matches the + * object name in the request, the response includes this header. The header + * indicates when the initiated multipart upload becomes eligible for an abort + * operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + * + * The response also includes the x-amz-abort-rule-id header that provides the ID + * of the lifecycle configuration rule that defines the abort action. + * + * This functionality is not supported for directory buckets. + * + * [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + */ + abortDate?: time.Time + /** + * This header is returned along with the x-amz-abort-date header. It identifies + * the applicable lifecycle configuration rule that defines the action to abort + * incomplete multipart uploads. + * + * This functionality is not supported for directory buckets. + */ + abortRuleId?: string + /** + * The name of the bucket to which the multipart upload was initiated. Does not + * return the access point ARN or access point alias if used. + * + * Access points are not supported by directory buckets. + */ + bucket?: string + /** + * Indicates whether the multipart upload uses an S3 Bucket Key for server-side + * encryption with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * The algorithm that was used to create a checksum of the object. + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * Object key for which the multipart upload was initiated. + */ + key?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to confirm the encryption + * algorithm that's used. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to provide the round-trip + * message integrity verification of the customer-provided encryption key. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * If present, indicates the Amazon Web Services KMS Encryption Context to use for + * object encryption. The value of this header is a Base64-encoded string of a + * UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + */ + ssekmsEncryptionContext?: string + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3 (for example, AES256 , aws:kms ). + */ + serverSideEncryption: types.ServerSideEncryption + /** + * ID for the initiated multipart upload. + */ + uploadId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subNnMiF = noSmithyDocumentSerde + interface CreateSessionInput extends _subNnMiF { + /** + * The name of the bucket that you create a session for. + * + * This member is required. + */ + bucket?: string + /** + * Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + * with server-side encryption using KMS keys (SSE-KMS). + * + * S3 Bucket Keys are always enabled for GET and PUT operations in a directory + * bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy + * SSE-KMS encrypted objects from general purpose buckets to directory buckets, + * from directory buckets to general purpose buckets, or between directory buckets, + * through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a + * copy request is made for a KMS-encrypted object. + * + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + */ + bucketKeyEnabled?: boolean + /** + * Specifies the Amazon Web Services KMS Encryption Context as an additional + * encryption context to use for object encryption. The value of this header is a + * Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption + * context as key-value pairs. This value is stored as object metadata and + * automatically gets passed on to Amazon Web Services KMS for future GetObject + * operations on this object. + * + * General purpose buckets - This value must be explicitly added during CopyObject + * operations if you want an additional encryption context for your object. For + * more information, see [Encryption context]in the Amazon S3 User Guide. + * + * Directory buckets - You can optionally provide an explicit encryption context + * value. The value must match the default encryption context - the bucket Amazon + * Resource Name (ARN). An additional encryption context value is not supported. + * + * [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context + */ + ssekmsEncryptionContext?: string + /** + * If you specify x-amz-server-side-encryption with aws:kms , you must specify the + * x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key + * ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you + * get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key + * alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist + * in the same account that't issuing the command, you must use the full Key ARN + * not the Key ID. + * + * Your SSE-KMS configuration can only support 1 [customer managed key] per directory bucket for the + * lifetime of the bucket. [Amazon Web Services managed key]( aws/s3 ) isn't supported. + * + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm to use when you store objects in the + * directory bucket. + * + * For directory buckets, there are only two supported options for server-side + * encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 + * ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). By default, + * Amazon S3 encrypts data with SSE-S3. For more information, see [Protecting data with server-side encryption]in the Amazon S3 + * User Guide. + * + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Specifies the mode of the session that will be created, either ReadWrite or + * ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is + * capable of executing all the Zonal endpoint API operations on a directory + * bucket. A ReadOnly session is constrained to execute the following Zonal + * endpoint API operations: GetObject , HeadObject , ListObjectsV2 , + * GetObjectAttributes , ListParts , and ListMultipartUploads . + */ + sessionMode: types.SessionMode + } + type _subKUQZg = noSmithyDocumentSerde + interface CreateSessionOutput extends _subKUQZg { + /** + * The established temporary security credentials for the created session. + * + * This member is required. + */ + credentials?: types.SessionCredentials + /** + * Indicates whether to use an S3 Bucket Key for server-side encryption with KMS + * keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * If present, indicates the Amazon Web Services KMS Encryption Context to use for + * object encryption. The value of this header is a Base64-encoded string of a + * UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + * This value is stored as object metadata and automatically gets passed on to + * Amazon Web Services KMS for future GetObject operations on this object. + */ + ssekmsEncryptionContext?: string + /** + * If you specify x-amz-server-side-encryption with aws:kms , this header indicates + * the ID of the KMS symmetric encryption customer managed key that was used for + * object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store objects in the + * directory bucket. + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subWWBVW = noSmithyDocumentSerde + interface DeleteBucketInput extends _subWWBVW { + /** + * Specifies the bucket being deleted. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + * + * For directory buckets, this header is not supported in this API operation. If + * you specify this header, the request fails with the HTTP status code 501 Not + * Implemented . + */ + expectedBucketOwner?: string + } + type _subKOrqE = noSmithyDocumentSerde + interface DeleteBucketOutput extends _subKOrqE { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subPauEn = noSmithyDocumentSerde + interface DeleteBucketAnalyticsConfigurationInput extends _subPauEn { + /** + * The name of the bucket from which an analytics configuration is deleted. + * + * This member is required. + */ + bucket?: string + /** + * The ID that identifies the analytics configuration. + * + * This member is required. + */ + id?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subyOGOJ = noSmithyDocumentSerde + interface DeleteBucketAnalyticsConfigurationOutput extends _subyOGOJ { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subKrJPX = noSmithyDocumentSerde + interface DeleteBucketCorsInput extends _subKrJPX { + /** + * Specifies the bucket whose cors configuration is being deleted. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subUJnEh = noSmithyDocumentSerde + interface DeleteBucketCorsOutput extends _subUJnEh { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subSOoKo = noSmithyDocumentSerde + interface DeleteBucketEncryptionInput extends _subSOoKo { + /** + * The name of the bucket containing the server-side encryption configuration to + * delete. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + * + * For directory buckets, this header is not supported in this API operation. If + * you specify this header, the request fails with the HTTP status code 501 Not + * Implemented . + */ + expectedBucketOwner?: string + } + type _subfyfpN = noSmithyDocumentSerde + interface DeleteBucketEncryptionOutput extends _subfyfpN { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _submCnCq = noSmithyDocumentSerde + interface DeleteBucketIntelligentTieringConfigurationInput extends _submCnCq { + /** + * The name of the Amazon S3 bucket whose configuration you want to modify or + * retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the S3 Intelligent-Tiering configuration. + * + * This member is required. + */ + id?: string + } + type _subKbBci = noSmithyDocumentSerde + interface DeleteBucketIntelligentTieringConfigurationOutput extends _subKbBci { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subJcILg = noSmithyDocumentSerde + interface DeleteBucketInventoryConfigurationInput extends _subJcILg { + /** + * The name of the bucket containing the inventory configuration to delete. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the inventory configuration. + * + * This member is required. + */ + id?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subUovMy = noSmithyDocumentSerde + interface DeleteBucketInventoryConfigurationOutput extends _subUovMy { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subNuiWc = noSmithyDocumentSerde + interface DeleteBucketLifecycleInput extends _subNuiWc { + /** + * The bucket name of the lifecycle to delete. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subacBoP = noSmithyDocumentSerde + interface DeleteBucketLifecycleOutput extends _subacBoP { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subBqDnL = noSmithyDocumentSerde + interface DeleteBucketMetricsConfigurationInput extends _subBqDnL { + /** + * The name of the bucket containing the metrics configuration to delete. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the metrics configuration. The ID has a 64 character + * limit and can only contain letters, numbers, periods, dashes, and underscores. + * + * This member is required. + */ + id?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subRTqPc = noSmithyDocumentSerde + interface DeleteBucketMetricsConfigurationOutput extends _subRTqPc { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subpReTa = noSmithyDocumentSerde + interface DeleteBucketOwnershipControlsInput extends _subpReTa { + /** + * The Amazon S3 bucket whose OwnershipControls you want to delete. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subomweP = noSmithyDocumentSerde + interface DeleteBucketOwnershipControlsOutput extends _subomweP { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subYpTYR = noSmithyDocumentSerde + interface DeleteBucketPolicyInput extends _subYpTYR { + /** + * The bucket name. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + * + * For directory buckets, this header is not supported in this API operation. If + * you specify this header, the request fails with the HTTP status code 501 Not + * Implemented . + */ + expectedBucketOwner?: string + } + type _subSarex = noSmithyDocumentSerde + interface DeleteBucketPolicyOutput extends _subSarex { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subyoKUU = noSmithyDocumentSerde + interface DeleteBucketReplicationInput extends _subyoKUU { + /** + * The bucket name. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subcFLBG = noSmithyDocumentSerde + interface DeleteBucketReplicationOutput extends _subcFLBG { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subFkuCR = noSmithyDocumentSerde + interface DeleteBucketTaggingInput extends _subFkuCR { + /** + * The bucket that has the tag set to be removed. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subXaBzc = noSmithyDocumentSerde + interface DeleteBucketTaggingOutput extends _subXaBzc { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subfMerk = noSmithyDocumentSerde + interface DeleteBucketWebsiteInput extends _subfMerk { + /** + * The bucket name for which you want to remove the website configuration. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subGWwIc = noSmithyDocumentSerde + interface DeleteBucketWebsiteOutput extends _subGWwIc { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subkIyTL = noSmithyDocumentSerde + interface DeleteObjectInput extends _subkIyTL { + /** + * The bucket name of the bucket containing the object. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Key name of the object to delete. + * + * This member is required. + */ + key?: string + /** + * Indicates whether S3 Object Lock should bypass Governance-mode restrictions to + * process this operation. To use this header, you must have the + * s3:BypassGovernanceRetention permission. + * + * This functionality is not supported for directory buckets. + */ + bypassGovernanceRetention?: boolean + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The concatenation of the authentication device's serial number, a space, and + * the value that is displayed on your authentication device. Required to + * permanently delete a versioned object if versioning is configured with MFA + * delete enabled. + * + * This functionality is not supported for directory buckets. + */ + mfa?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Version ID used to reference a specific version of the object. + * + * For directory buckets in this API operation, only the null value of the version + * ID is supported. + */ + versionId?: string + } + type _subynbWF = noSmithyDocumentSerde + interface DeleteObjectOutput extends _subynbWF { + /** + * Indicates whether the specified object version that was permanently deleted was + * (true) or was not (false) a delete marker before deletion. In a simple DELETE, + * this header indicates whether (true) or not (false) the current version of the + * object is a delete marker. + * + * This functionality is not supported for directory buckets. + */ + deleteMarker?: boolean + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Returns the version ID of the delete marker created as a result of the DELETE + * operation. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subUIqMT = noSmithyDocumentSerde + interface DeleteObjectTaggingInput extends _subUIqMT { + /** + * The bucket name containing the objects from which to remove the tags. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The key that identifies the object in the bucket from which to remove all tags. + * + * This member is required. + */ + key?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The versionId of the object that the tag-set will be removed from. + */ + versionId?: string + } + type _subhfXHb = noSmithyDocumentSerde + interface DeleteObjectTaggingOutput extends _subhfXHb { + /** + * The versionId of the object the tag-set was removed from. + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subUILFI = noSmithyDocumentSerde + interface DeleteObjectsInput extends _subUILFI { + /** + * The bucket name containing the objects to delete. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Container for the request. + * + * This member is required. + */ + delete?: types.Delete + /** + * Specifies whether you want to delete this object even if it has a + * Governance-type Object Lock in place. To use this header, you must have the + * s3:BypassGovernanceRetention permission. + * + * This functionality is not supported for directory buckets. + */ + bypassGovernanceRetention?: boolean + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + * fails the request with the HTTP status code 400 Bad Request . + * + * For the x-amz-checksum-algorithm header, replace algorithm with the + * supported algorithm from the following list: + * + * ``` + * - CRC32 + * + * - CRC32C + * + * - SHA1 + * + * - SHA256 + * ``` + * + * For more information, see [Checking object integrity] in the Amazon S3 User Guide. + * + * If the individual checksum value you provide through x-amz-checksum-algorithm + * doesn't match the checksum algorithm you set through + * x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm + * parameter and uses the checksum algorithm that matches the provided value in + * x-amz-checksum-algorithm . + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The concatenation of the authentication device's serial number, a space, and + * the value that is displayed on your authentication device. Required to + * permanently delete a versioned object if versioning is configured with MFA + * delete enabled. + * + * When performing the DeleteObjects operation on an MFA delete enabled bucket, + * which attempts to delete the specified versioned objects, you must include an + * MFA token. If you don't provide an MFA token, the entire request will fail, even + * if there are non-versioned objects that you are trying to delete. If you provide + * an invalid token, whether there are versioned object keys in the request or not, + * the entire Multi-Object Delete request will fail. For information about MFA + * Delete, see [MFA Delete]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete + */ + mfa?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + } + type _subDrSEi = noSmithyDocumentSerde + interface DeleteObjectsOutput extends _subDrSEi { + /** + * Container element for a successful delete. It identifies the object that was + * successfully deleted. + */ + deleted: Array + /** + * Container for a failed delete action that describes the object that Amazon S3 + * attempted to delete and the error it encountered. + */ + errors: Array + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subdkKfs = noSmithyDocumentSerde + interface DeletePublicAccessBlockInput extends _subdkKfs { + /** + * The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subbnEep = noSmithyDocumentSerde + interface DeletePublicAccessBlockOutput extends _subbnEep { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subxadmv = noSmithyDocumentSerde + interface GetBucketAccelerateConfigurationInput extends _subxadmv { + /** + * The name of the bucket for which the accelerate configuration is retrieved. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + } + type _subMJXUk = noSmithyDocumentSerde + interface GetBucketAccelerateConfigurationOutput extends _subMJXUk { + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * The accelerate configuration of the bucket. + */ + status: types.BucketAccelerateStatus + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subymWaB = noSmithyDocumentSerde + interface GetBucketAclInput extends _subymWaB { + /** + * Specifies the S3 bucket whose ACL is being requested. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subZjWFl = noSmithyDocumentSerde + interface GetBucketAclOutput extends _subZjWFl { + /** + * A list of grants. + */ + grants: Array + /** + * Container for the bucket owner's display name and ID. + */ + owner?: types.Owner + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subSTUzj = noSmithyDocumentSerde + interface GetBucketAnalyticsConfigurationInput extends _subSTUzj { + /** + * The name of the bucket from which an analytics configuration is retrieved. + * + * This member is required. + */ + bucket?: string + /** + * The ID that identifies the analytics configuration. + * + * This member is required. + */ + id?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subGjmkO = noSmithyDocumentSerde + interface GetBucketAnalyticsConfigurationOutput extends _subGjmkO { + /** + * The configuration and any analyses for the analytics filter. + */ + analyticsConfiguration?: types.AnalyticsConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subBAguP = noSmithyDocumentSerde + interface GetBucketCorsInput extends _subBAguP { + /** + * The bucket name for which to get the cors configuration. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subaoqvd = noSmithyDocumentSerde + interface GetBucketCorsOutput extends _subaoqvd { + /** + * A set of origins and methods (cross-origin access that you want to allow). You + * can add up to 100 rules to the configuration. + */ + corsRules: Array + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subGSqbe = noSmithyDocumentSerde + interface GetBucketEncryptionInput extends _subGSqbe { + /** + * The name of the bucket from which the server-side encryption configuration is + * retrieved. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + * + * For directory buckets, this header is not supported in this API operation. If + * you specify this header, the request fails with the HTTP status code 501 Not + * Implemented . + */ + expectedBucketOwner?: string + } + type _subWdusS = noSmithyDocumentSerde + interface GetBucketEncryptionOutput extends _subWdusS { + /** + * Specifies the default server-side-encryption configuration. + */ + serverSideEncryptionConfiguration?: types.ServerSideEncryptionConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subZxizZ = noSmithyDocumentSerde + interface GetBucketIntelligentTieringConfigurationInput extends _subZxizZ { + /** + * The name of the Amazon S3 bucket whose configuration you want to modify or + * retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the S3 Intelligent-Tiering configuration. + * + * This member is required. + */ + id?: string + } + type _subeeKSR = noSmithyDocumentSerde + interface GetBucketIntelligentTieringConfigurationOutput extends _subeeKSR { + /** + * Container for S3 Intelligent-Tiering configuration. + */ + intelligentTieringConfiguration?: types.IntelligentTieringConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _submQByk = noSmithyDocumentSerde + interface GetBucketInventoryConfigurationInput extends _submQByk { + /** + * The name of the bucket containing the inventory configuration to retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the inventory configuration. + * + * This member is required. + */ + id?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _sublBZFv = noSmithyDocumentSerde + interface GetBucketInventoryConfigurationOutput extends _sublBZFv { + /** + * Specifies the inventory configuration. + */ + inventoryConfiguration?: types.InventoryConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _sublFvsi = noSmithyDocumentSerde + interface GetBucketLifecycleConfigurationInput extends _sublFvsi { + /** + * The name of the bucket for which to get the lifecycle information. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subkQQAq = noSmithyDocumentSerde + interface GetBucketLifecycleConfigurationOutput extends _subkQQAq { + /** + * Container for a lifecycle rule. + */ + rules: Array + /** + * Indicates which default minimum object size behavior is applied to the + * lifecycle configuration. + * + * ``` + * - all_storage_classes_128K - Objects smaller than 128 KB will not transition + * to any storage class by default. + * + * - varies_by_storage_class - Objects smaller than 128 KB will transition to + * Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + * all other storage classes will prevent transitions smaller than 128 KB. + * ``` + * + * To customize the minimum object size for any transition you can add a filter + * that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + * of your transition rule. Custom filters always take precedence over the default + * transition behavior. + */ + transitionDefaultMinimumObjectSize: types.TransitionDefaultMinimumObjectSize + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + // @ts-ignore + import smithyxml = xml + // @ts-ignore + import smithyio = io + type _subtPYNo = noSmithyDocumentSerde + interface GetBucketLocationInput extends _subtPYNo { + /** + * The name of the bucket for which to get the location. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subYSLff = noSmithyDocumentSerde + interface GetBucketLocationOutput extends _subYSLff { + /** + * Specifies the Region where the bucket resides. For a list of all the Amazon S3 + * supported location constraints by Region, see [Regions and Endpoints]. Buckets in Region us-east-1 + * have a LocationConstraint of null . + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + */ + locationConstraint: types.BucketLocationConstraint + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subffqWS = noSmithyDocumentSerde + interface GetBucketLoggingInput extends _subffqWS { + /** + * The bucket name for which to get the logging information. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subLKFCe = noSmithyDocumentSerde + interface GetBucketLoggingOutput extends _subLKFCe { + /** + * Describes where logs are stored and the prefix that Amazon S3 assigns to all + * log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + * Reference. + * + * [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html + */ + loggingEnabled?: types.LoggingEnabled + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subXnQXI = noSmithyDocumentSerde + interface GetBucketMetricsConfigurationInput extends _subXnQXI { + /** + * The name of the bucket containing the metrics configuration to retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the metrics configuration. The ID has a 64 character + * limit and can only contain letters, numbers, periods, dashes, and underscores. + * + * This member is required. + */ + id?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subPlbFZ = noSmithyDocumentSerde + interface GetBucketMetricsConfigurationOutput extends _subPlbFZ { + /** + * Specifies the metrics configuration. + */ + metricsConfiguration?: types.MetricsConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subtoemD = noSmithyDocumentSerde + interface GetBucketNotificationConfigurationInput extends _subtoemD { + /** + * The name of the bucket for which to get the notification configuration. + * + * When you use this API operation with an access point, provide the alias of the + * access point in place of the bucket name. + * + * When you use this API operation with an Object Lambda access point, provide the + * alias of the Object Lambda access point in place of the bucket name. If the + * Object Lambda access point alias in a request is not valid, the error code + * InvalidAccessPointAliasError is returned. For more information about + * InvalidAccessPointAliasError , see [List of Error Codes]. + * + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + /** + * A container for specifying the notification configuration of the bucket. If + * this element is empty, notifications are turned off for the bucket. + */ + type _subMPMnb = noSmithyDocumentSerde + interface GetBucketNotificationConfigurationOutput extends _subMPMnb { + /** + * Enables delivery of events to Amazon EventBridge. + */ + eventBridgeConfiguration?: types.EventBridgeConfiguration + /** + * Describes the Lambda functions to invoke and the events for which to invoke + * them. + */ + lambdaFunctionConfigurations: Array + /** + * The Amazon Simple Queue Service queues to publish messages to and the events + * for which to publish messages. + */ + queueConfigurations: Array + /** + * The topic to which notifications are sent and the events for which + * notifications are generated. + */ + topicConfigurations: Array + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subuEFGJ = noSmithyDocumentSerde + interface GetBucketOwnershipControlsInput extends _subuEFGJ { + /** + * The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subgMVIl = noSmithyDocumentSerde + interface GetBucketOwnershipControlsOutput extends _subgMVIl { + /** + * The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + * ObjectWriter) currently in effect for this Amazon S3 bucket. + */ + ownershipControls?: types.OwnershipControls + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subJDaPJ = noSmithyDocumentSerde + interface GetBucketPolicyInput extends _subJDaPJ { + /** + * The bucket name to get the bucket policy for. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * Access points - When you use this API operation with an access point, provide + * the alias of the access point in place of the bucket name. + * + * Object Lambda access points - When you use this API operation with an Object + * Lambda access point, provide the alias of the Object Lambda access point in + * place of the bucket name. If the Object Lambda access point alias in a request + * is not valid, the error code InvalidAccessPointAliasError is returned. For more + * information about InvalidAccessPointAliasError , see [List of Error Codes]. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + * + * For directory buckets, this header is not supported in this API operation. If + * you specify this header, the request fails with the HTTP status code 501 Not + * Implemented . + */ + expectedBucketOwner?: string + } + type _subtdLaI = noSmithyDocumentSerde + interface GetBucketPolicyOutput extends _subtdLaI { + /** + * The bucket policy as a JSON document. + */ + policy?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subbnuMF = noSmithyDocumentSerde + interface GetBucketPolicyStatusInput extends _subbnuMF { + /** + * The name of the Amazon S3 bucket whose policy status you want to retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subBRLkC = noSmithyDocumentSerde + interface GetBucketPolicyStatusOutput extends _subBRLkC { + /** + * The policy status for the specified bucket. + */ + policyStatus?: types.PolicyStatus + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subUECqT = noSmithyDocumentSerde + interface GetBucketReplicationInput extends _subUECqT { + /** + * The bucket name for which to get the replication information. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subirNjO = noSmithyDocumentSerde + interface GetBucketReplicationOutput extends _subirNjO { + /** + * A container for replication rules. You can add up to 1,000 rules. The maximum + * size of a replication configuration is 2 MB. + */ + replicationConfiguration?: types.ReplicationConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subItMYt = noSmithyDocumentSerde + interface GetBucketRequestPaymentInput extends _subItMYt { + /** + * The name of the bucket for which to get the payment request configuration + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subvFcwV = noSmithyDocumentSerde + interface GetBucketRequestPaymentOutput extends _subvFcwV { + /** + * Specifies who pays for the download and request fees. + */ + payer: types.Payer + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subpdzhk = noSmithyDocumentSerde + interface GetBucketTaggingInput extends _subpdzhk { + /** + * The name of the bucket for which to get the tagging information. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subcxgba = noSmithyDocumentSerde + interface GetBucketTaggingOutput extends _subcxgba { + /** + * Contains the tag set. + * + * This member is required. + */ + tagSet: Array + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subckrrF = noSmithyDocumentSerde + interface GetBucketVersioningInput extends _subckrrF { + /** + * The name of the bucket for which to get the versioning information. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subZmfqn = noSmithyDocumentSerde + interface GetBucketVersioningOutput extends _subZmfqn { + /** + * Specifies whether MFA delete is enabled in the bucket versioning configuration. + * This element is only returned if the bucket has been configured with MFA delete. + * If the bucket has never been so configured, this element is not returned. + */ + mfaDelete: types.MFADeleteStatus + /** + * The versioning state of the bucket. + */ + status: types.BucketVersioningStatus + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subpcLqO = noSmithyDocumentSerde + interface GetBucketWebsiteInput extends _subpcLqO { + /** + * The bucket name for which to get the website configuration. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subpWjbK = noSmithyDocumentSerde + interface GetBucketWebsiteOutput extends _subpWjbK { + /** + * The object key name of the website error document to use for 4XX class errors. + */ + errorDocument?: types.ErrorDocument + /** + * The name of the index document for the website (for example index.html ). + */ + indexDocument?: types.IndexDocument + /** + * Specifies the redirect behavior of all requests to a website endpoint of an + * Amazon S3 bucket. + */ + redirectAllRequestsTo?: types.RedirectAllRequestsTo + /** + * Rules that define when a redirect is applied and the redirect behavior. + */ + routingRules: Array + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subrfljy = noSmithyDocumentSerde + interface GetObjectInput extends _subrfljy { + /** + * The bucket name containing the object. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Object Lambda access points - When you use this action with an Object Lambda + * access point, you must direct requests to the Object Lambda access point + * hostname. The Object Lambda access point hostname takes the form + * AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Key of the object to get. + * + * This member is required. + */ + key?: string + /** + * To retrieve the checksum, this mode must be enabled. + * + * General purpose buckets - In addition, if you enable checksum mode and the + * object is uploaded with a [checksum]and encrypted with an Key Management Service (KMS) + * key, you must have permission to use the kms:Decrypt action to retrieve the + * checksum. + * + * [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html + */ + checksumMode: types.ChecksumMode + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Return the object only if its entity tag (ETag) is the same as the one + * specified in this header; otherwise, return a 412 Precondition Failed error. + * + * If both of the If-Match and If-Unmodified-Since headers are present in the + * request as follows: If-Match condition evaluates to true , and; + * If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + * the data requested. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifMatch?: string + /** + * Return the object only if it has been modified since the specified time; + * otherwise, return a 304 Not Modified error. + * + * If both of the If-None-Match and If-Modified-Since headers are present in the + * request as follows: If-None-Match condition evaluates to false , and; + * If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + * Modified status code. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifModifiedSince?: time.Time + /** + * Return the object only if its entity tag (ETag) is different from the one + * specified in this header; otherwise, return a 304 Not Modified error. + * + * If both of the If-None-Match and If-Modified-Since headers are present in the + * request as follows: If-None-Match condition evaluates to false , and; + * If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + * Modified HTTP status code. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifNoneMatch?: string + /** + * Return the object only if it has not been modified since the specified time; + * otherwise, return a 412 Precondition Failed error. + * + * If both of the If-Match and If-Unmodified-Since headers are present in the + * request as follows: If-Match condition evaluates to true , and; + * If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + * the data requested. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifUnmodifiedSince?: time.Time + /** + * Part number of the object being read. This is a positive integer between 1 and + * 10,000. Effectively performs a 'ranged' GET request for the part specified. + * Useful for downloading just a part of an object. + */ + partNumber?: number + /** + * Downloads the specified byte range of an object. For more information about the + * HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]. + * + * Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + * + * [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range + */ + range?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Sets the Cache-Control header of the response. + */ + responseCacheControl?: string + /** + * Sets the Content-Disposition header of the response. + */ + responseContentDisposition?: string + /** + * Sets the Content-Encoding header of the response. + */ + responseContentEncoding?: string + /** + * Sets the Content-Language header of the response. + */ + responseContentLanguage?: string + /** + * Sets the Content-Type header of the response. + */ + responseContentType?: string + /** + * Sets the Expires header of the response. + */ + responseExpires?: time.Time + /** + * Specifies the algorithm to use when decrypting the object (for example, AES256 ). + * + * If you encrypt an object by using server-side encryption with customer-provided + * encryption keys (SSE-C) when you store the object in Amazon S3, then when you + * GET the object, you must use the following headers: + * + * ``` + * - x-amz-server-side-encryption-customer-algorithm + * + * - x-amz-server-side-encryption-customer-key + * + * - x-amz-server-side-encryption-customer-key-MD5 + * ``` + * + * For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key that you originally provided for + * Amazon S3 to encrypt the data before storing it. This value is used to decrypt + * the object when recovering it and must match the one used when storing the data. + * The key must be appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header. + * + * If you encrypt an object by using server-side encryption with customer-provided + * encryption keys (SSE-C) when you store the object in Amazon S3, then when you + * GET the object, you must use the following headers: + * + * ``` + * - x-amz-server-side-encryption-customer-algorithm + * + * - x-amz-server-side-encryption-customer-key + * + * - x-amz-server-side-encryption-customer-key-MD5 + * ``` + * + * For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the customer-provided encryption key + * according to RFC 1321. Amazon S3 uses this header for a message integrity check + * to ensure that the encryption key was transmitted without error. + * + * If you encrypt an object by using server-side encryption with customer-provided + * encryption keys (SSE-C) when you store the object in Amazon S3, then when you + * GET the object, you must use the following headers: + * + * ``` + * - x-amz-server-side-encryption-customer-algorithm + * + * - x-amz-server-side-encryption-customer-key + * + * - x-amz-server-side-encryption-customer-key-MD5 + * ``` + * + * For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKeyMD5?: string + /** + * Version ID used to reference a specific version of the object. + * + * By default, the GetObject operation returns the current version of an object. + * To return a different version, use the versionId subresource. + * + * ``` + * - If you include a versionId in your request header, you must have the + * s3:GetObjectVersion permission to access a specific version of an object. The + * s3:GetObject permission is not required in this scenario. + * + * - If you request the current version of an object without a specific versionId + * in the request header, only the s3:GetObject permission is required. The + * s3:GetObjectVersion permission is not required in this scenario. + * + * - Directory buckets - S3 Versioning isn't enabled and supported for directory + * buckets. For this API operation, only the null value of the version ID is + * supported by directory buckets. You can only specify null to the versionId + * query parameter in the request. + * ``` + * + * For more information about versioning, see [PutBucketVersioning]. + * + * [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html + */ + versionId?: string + } + type _subaoaMd = noSmithyDocumentSerde + interface GetObjectOutput extends _subaoaMd { + /** + * Indicates that a range of bytes was specified in the request. + */ + acceptRanges?: string + /** + * Object data. + */ + body: io.ReadCloser + /** + * Indicates whether the object uses an S3 Bucket Key for server-side encryption + * with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * Specifies caching behavior along the request/reply chain. + */ + cacheControl?: string + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA256?: string + /** + * Specifies presentational information for the object. + */ + contentDisposition?: string + /** + * Indicates what content encodings have been applied to the object and thus what + * decoding mechanisms must be applied to obtain the media-type referenced by the + * Content-Type header field. + */ + contentEncoding?: string + /** + * The language the content is in. + */ + contentLanguage?: string + /** + * Size of the body in bytes. + */ + contentLength?: number + /** + * The portion of the object returned in the response. + */ + contentRange?: string + /** + * A standard MIME type describing the format of the object data. + */ + contentType?: string + /** + * Indicates whether the object retrieved was (true) or was not (false) a Delete + * Marker. If false, this response header does not appear in the response. + * + * ``` + * - If the current version of the object is a delete marker, Amazon S3 behaves + * as if the object was deleted and includes x-amz-delete-marker: true in the + * response. + * + * - If the specified version in the request is a delete marker, the response + * returns a 405 Method Not Allowed error and the Last-Modified: timestamp + * response header. + * ``` + */ + deleteMarker?: boolean + /** + * An entity tag (ETag) is an opaque identifier assigned by a web server to a + * specific version of a resource found at a URL. + */ + eTag?: string + /** + * If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + * the response includes this header. It includes the expiry-date and rule-id + * key-value pairs providing object expiration information. The value of the + * rule-id is URL-encoded. + * + * This functionality is not supported for directory buckets. + * + * [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + */ + expiration?: string + /** + * The date and time at which the object is no longer cacheable. + * + * Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + * the ExpiresString field which contains the unparsed value from the service + * response. + */ + expires?: time.Time + /** + * The unparsed value of the Expires field from the service response. Prefer use + * of this value over the normal Expires response field where possible. + */ + expiresString?: string + /** + * Date and time when the object was last modified. + * + * General purpose buckets - When you specify a versionId of the object in your + * request, if the specified version in the request is a delete marker, the + * response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + * response header. + */ + lastModified?: time.Time + /** + * A map of metadata to store with the object in S3. + * + * Map keys will be normalized to lower-case. + */ + metadata: _TygojaDict + /** + * This is set to the number of metadata entries not returned in the headers that + * are prefixed with x-amz-meta- . This can happen if you create metadata using an + * API like SOAP that supports more flexible metadata than the REST API. For + * example, using SOAP, you can create metadata whose values are not legal HTTP + * headers. + * + * This functionality is not supported for directory buckets. + */ + missingMeta?: number + /** + * Indicates whether this object has an active legal hold. This field is only + * returned if you have permission to view an object's legal hold status. + * + * This functionality is not supported for directory buckets. + */ + objectLockLegalHoldStatus: types.ObjectLockLegalHoldStatus + /** + * The Object Lock mode that's currently in place for this object. + * + * This functionality is not supported for directory buckets. + */ + objectLockMode: types.ObjectLockMode + /** + * The date and time when this object's Object Lock will expire. + * + * This functionality is not supported for directory buckets. + */ + objectLockRetainUntilDate?: time.Time + /** + * The count of parts this object has. This value is only returned if you specify + * partNumber in your request and the object was uploaded as a multipart upload. + */ + partsCount?: number + /** + * Amazon S3 can return this if your request involves a bucket that is either a + * source or destination in a replication rule. + * + * This functionality is not supported for directory buckets. + */ + replicationStatus: types.ReplicationStatus + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Provides information about object restoration action and expiration time of the + * restored object copy. + * + * This functionality is not supported for directory buckets. Only the S3 Express + * One Zone storage class is supported by directory buckets to store objects. + */ + restore?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to confirm the encryption + * algorithm that's used. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to provide the round-trip + * message integrity verification of the customer-provided encryption key. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3. + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Provides storage class information of the object. Amazon S3 returns this header + * for all objects except for S3 Standard storage class objects. + * + * Directory buckets - Only the S3 Express One Zone storage class is supported by + * directory buckets to store objects. + */ + storageClass: types.StorageClass + /** + * The number of tags, if any, on the object, when you have the relevant + * permission to read object tags. + * + * You can use [GetObjectTagging] to retrieve the tag set associated with an object. + * + * This functionality is not supported for directory buckets. + * + * [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + */ + tagCount?: number + /** + * Version ID of the object. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + /** + * If the bucket is configured as a website, redirects requests for this object to + * another object in the same bucket or to an external URL. Amazon S3 stores the + * value of this header in the object metadata. + * + * This functionality is not supported for directory buckets. + */ + websiteRedirectLocation?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subcSmrI = noSmithyDocumentSerde + interface GetObjectAclInput extends _subcSmrI { + /** + * The bucket name that contains the object for which to get the ACL information. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The key of the object for which to get the ACL information. + * + * This member is required. + */ + key?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Version ID used to reference a specific version of the object. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + } + type _subAcamt = noSmithyDocumentSerde + interface GetObjectAclOutput extends _subAcamt { + /** + * A list of grants. + */ + grants: Array + /** + * Container for the bucket owner's display name and ID. + */ + owner?: types.Owner + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subxCSXH = noSmithyDocumentSerde + interface GetObjectAttributesInput extends _subxCSXH { + /** + * The name of the bucket that contains the object. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The object key. + * + * This member is required. + */ + key?: string + /** + * Specifies the fields at the root level that you want returned in the response. + * Fields that you do not specify are not returned. + * + * This member is required. + */ + objectAttributes: Array + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Sets the maximum number of parts to return. + */ + maxParts?: number + /** + * Specifies the part after which listing should begin. Only parts with higher + * part numbers will be listed. + */ + partNumberMarker?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Specifies the algorithm to use when encrypting the object (for example, AES256). + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use in + * encrypting data. This value is used to store the object and then it is + * discarded; Amazon S3 does not store the encryption key. The key must be + * appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * The version ID used to reference a specific version of the object. + * + * S3 Versioning isn't enabled and supported for directory buckets. For this API + * operation, only the null value of the version ID is supported by directory + * buckets. You can only specify null to the versionId query parameter in the + * request. + */ + versionId?: string + } + type _subtJwoC = noSmithyDocumentSerde + interface GetObjectAttributesOutput extends _subtJwoC { + /** + * The checksum or digest of the object. + */ + checksum?: types.Checksum + /** + * Specifies whether the object retrieved was ( true ) or was not ( false ) a + * delete marker. If false , this response header does not appear in the response. + * + * This functionality is not supported for directory buckets. + */ + deleteMarker?: boolean + /** + * An ETag is an opaque identifier assigned by a web server to a specific version + * of a resource found at a URL. + */ + eTag?: string + /** + * The creation date of the object. + */ + lastModified?: time.Time + /** + * A collection of parts associated with a multipart upload. + */ + objectParts?: types.GetObjectAttributesParts + /** + * The size of the object in bytes. + */ + objectSize?: number + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Provides the storage class information of the object. Amazon S3 returns this + * header for all objects except for S3 Standard storage class objects. + * + * For more information, see [Storage Classes]. + * + * Directory buckets - Only the S3 Express One Zone storage class is supported by + * directory buckets to store objects. + * + * [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + */ + storageClass: types.StorageClass + /** + * The version ID of the object. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subyrFaW = noSmithyDocumentSerde + interface GetObjectLegalHoldInput extends _subyrFaW { + /** + * The bucket name containing the object whose legal hold status you want to + * retrieve. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The key name for the object whose legal hold status you want to retrieve. + * + * This member is required. + */ + key?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The version ID of the object whose legal hold status you want to retrieve. + */ + versionId?: string + } + type _subYTMHo = noSmithyDocumentSerde + interface GetObjectLegalHoldOutput extends _subYTMHo { + /** + * The current legal hold status for the specified object. + */ + legalHold?: types.ObjectLockLegalHold + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _suboPiwy = noSmithyDocumentSerde + interface GetObjectLockConfigurationInput extends _suboPiwy { + /** + * The bucket whose Object Lock configuration you want to retrieve. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subGRZzR = noSmithyDocumentSerde + interface GetObjectLockConfigurationOutput extends _subGRZzR { + /** + * The specified bucket's Object Lock configuration. + */ + objectLockConfiguration?: types.ObjectLockConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subBxtRy = noSmithyDocumentSerde + interface GetObjectRetentionInput extends _subBxtRy { + /** + * The bucket name containing the object whose retention settings you want to + * retrieve. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The key name for the object whose retention settings you want to retrieve. + * + * This member is required. + */ + key?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The version ID for the object whose retention settings you want to retrieve. + */ + versionId?: string + } + type _subdWwqE = noSmithyDocumentSerde + interface GetObjectRetentionOutput extends _subdWwqE { + /** + * The container element for an object's retention settings. + */ + retention?: types.ObjectLockRetention + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subMZCpC = noSmithyDocumentSerde + interface GetObjectTaggingInput extends _subMZCpC { + /** + * The bucket name containing the object for which to get the tagging information. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Object key for which to get the tagging information. + * + * This member is required. + */ + key?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The versionId of the object for which to get the tagging information. + */ + versionId?: string + } + type _subZcjmo = noSmithyDocumentSerde + interface GetObjectTaggingOutput extends _subZcjmo { + /** + * Contains the tag set. + * + * This member is required. + */ + tagSet: Array + /** + * The versionId of the object for which you got the tagging information. + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subpvHHG = noSmithyDocumentSerde + interface GetObjectTorrentInput extends _subpvHHG { + /** + * The name of the bucket containing the object for which to get the torrent files. + * + * This member is required. + */ + bucket?: string + /** + * The object key for which to get the information. + * + * This member is required. + */ + key?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + } + type _subDeCIU = noSmithyDocumentSerde + interface GetObjectTorrentOutput extends _subDeCIU { + /** + * A Bencoded dictionary as defined by the BitTorrent specification + */ + body: io.ReadCloser + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subwDirT = noSmithyDocumentSerde + interface GetPublicAccessBlockInput extends _subwDirT { + /** + * The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + * to retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subLleHs = noSmithyDocumentSerde + interface GetPublicAccessBlockOutput extends _subLleHs { + /** + * The PublicAccessBlock configuration currently in effect for this Amazon S3 + * bucket. + */ + publicAccessBlockConfiguration?: types.PublicAccessBlockConfiguration + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + // @ts-ignore + import smithytime = time + // @ts-ignore + import smithywaiter = waiter + type _subTJcNB = noSmithyDocumentSerde + interface HeadBucketInput extends _subTJcNB { + /** + * The bucket name. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Object Lambda access points - When you use this API operation with an Object + * Lambda access point, provide the alias of the Object Lambda access point in + * place of the bucket name. If the Object Lambda access point alias in a request + * is not valid, the error code InvalidAccessPointAliasError is returned. For more + * information about InvalidAccessPointAliasError , see [List of Error Codes]. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + * + * This member is required. + */ + bucket?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _submwZVR = noSmithyDocumentSerde + interface HeadBucketOutput extends _submwZVR { + /** + * Indicates whether the bucket name used in the request is an access point alias. + * + * For directory buckets, the value of this field is false . + */ + accessPointAlias?: boolean + /** + * The name of the location where the bucket will be created. + * + * For directory buckets, the AZ ID of the Availability Zone where the bucket is + * created. An example AZ ID value is usw2-az1 . + * + * This functionality is only supported by directory buckets. + */ + bucketLocationName?: string + /** + * The type of location where the bucket is created. + * + * This functionality is only supported by directory buckets. + */ + bucketLocationType: types.LocationType + /** + * The Region that the bucket is located. + */ + bucketRegion?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subaRmfR = noSmithyDocumentSerde + interface HeadObjectInput extends _subaRmfR { + /** + * The name of the bucket that contains the object. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The object key. + * + * This member is required. + */ + key?: string + /** + * To retrieve the checksum, this parameter must be enabled. + * + * General purpose buckets - If you enable checksum mode and the object is + * uploaded with a [checksum]and encrypted with an Key Management Service (KMS) key, you + * must have permission to use the kms:Decrypt action to retrieve the checksum. + * + * Directory buckets - If you enable ChecksumMode and the object is encrypted with + * Amazon Web Services Key Management Service (Amazon Web Services KMS), you must + * also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM + * identity-based policies and KMS key policies for the KMS key to retrieve the + * checksum of the object. + * + * [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html + */ + checksumMode: types.ChecksumMode + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Return the object only if its entity tag (ETag) is the same as the one + * specified; otherwise, return a 412 (precondition failed) error. + * + * If both of the If-Match and If-Unmodified-Since headers are present in the + * request as follows: + * + * ``` + * - If-Match condition evaluates to true , and; + * + * - If-Unmodified-Since condition evaluates to false ; + * ``` + * + * Then Amazon S3 returns 200 OK and the data requested. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifMatch?: string + /** + * Return the object only if it has been modified since the specified time; + * otherwise, return a 304 (not modified) error. + * + * If both of the If-None-Match and If-Modified-Since headers are present in the + * request as follows: + * + * ``` + * - If-None-Match condition evaluates to false , and; + * + * - If-Modified-Since condition evaluates to true ; + * ``` + * + * Then Amazon S3 returns the 304 Not Modified response code. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifModifiedSince?: time.Time + /** + * Return the object only if its entity tag (ETag) is different from the one + * specified; otherwise, return a 304 (not modified) error. + * + * If both of the If-None-Match and If-Modified-Since headers are present in the + * request as follows: + * + * ``` + * - If-None-Match condition evaluates to false , and; + * + * - If-Modified-Since condition evaluates to true ; + * ``` + * + * Then Amazon S3 returns the 304 Not Modified response code. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifNoneMatch?: string + /** + * Return the object only if it has not been modified since the specified time; + * otherwise, return a 412 (precondition failed) error. + * + * If both of the If-Match and If-Unmodified-Since headers are present in the + * request as follows: + * + * ``` + * - If-Match condition evaluates to true , and; + * + * - If-Unmodified-Since condition evaluates to false ; + * ``` + * + * Then Amazon S3 returns 200 OK and the data requested. + * + * For more information about conditional requests, see [RFC 7232]. + * + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifUnmodifiedSince?: time.Time + /** + * Part number of the object being read. This is a positive integer between 1 and + * 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + * Useful querying about the size of the part and the number of parts in this + * object. + */ + partNumber?: number + /** + * HeadObject returns only the metadata for an object. If the Range is + * satisfiable, only the ContentLength is affected in the response. If the Range + * is not satisfiable, S3 returns a 416 - Requested Range Not Satisfiable error. + */ + range?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Sets the Cache-Control header of the response. + */ + responseCacheControl?: string + /** + * Sets the Content-Disposition header of the response. + */ + responseContentDisposition?: string + /** + * Sets the Content-Encoding header of the response. + */ + responseContentEncoding?: string + /** + * Sets the Content-Language header of the response. + */ + responseContentLanguage?: string + /** + * Sets the Content-Type header of the response. + */ + responseContentType?: string + /** + * Sets the Expires header of the response. + */ + responseExpires?: time.Time + /** + * Specifies the algorithm to use when encrypting the object (for example, AES256). + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use in + * encrypting data. This value is used to store the object and then it is + * discarded; Amazon S3 does not store the encryption key. The key must be + * appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * Version ID used to reference a specific version of the object. + * + * For directory buckets in this API operation, only the null value of the version + * ID is supported. + */ + versionId?: string + } + type _subFwlXE = noSmithyDocumentSerde + interface HeadObjectOutput extends _subFwlXE { + /** + * Indicates that a range of bytes was specified. + */ + acceptRanges?: string + /** + * The archive state of the head object. + * + * This functionality is not supported for directory buckets. + */ + archiveStatus: types.ArchiveStatus + /** + * Indicates whether the object uses an S3 Bucket Key for server-side encryption + * with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * Specifies caching behavior along the request/reply chain. + */ + cacheControl?: string + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + /** + * Specifies presentational information for the object. + */ + contentDisposition?: string + /** + * Indicates what content encodings have been applied to the object and thus what + * decoding mechanisms must be applied to obtain the media-type referenced by the + * Content-Type header field. + */ + contentEncoding?: string + /** + * The language the content is in. + */ + contentLanguage?: string + /** + * Size of the body in bytes. + */ + contentLength?: number + /** + * A standard MIME type describing the format of the object data. + */ + contentType?: string + /** + * Specifies whether the object retrieved was (true) or was not (false) a Delete + * Marker. If false, this response header does not appear in the response. + * + * This functionality is not supported for directory buckets. + */ + deleteMarker?: boolean + /** + * An entity tag (ETag) is an opaque identifier assigned by a web server to a + * specific version of a resource found at a URL. + */ + eTag?: string + /** + * If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + * the response includes this header. It includes the expiry-date and rule-id + * key-value pairs providing object expiration information. The value of the + * rule-id is URL-encoded. + * + * This functionality is not supported for directory buckets. + * + * [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + */ + expiration?: string + /** + * The date and time at which the object is no longer cacheable. + * + * Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + * the ExpiresString field which contains the unparsed value from the service + * response. + */ + expires?: time.Time + /** + * The unparsed value of the Expires field from the service response. Prefer use + * of this value over the normal Expires response field where possible. + */ + expiresString?: string + /** + * Date and time when the object was last modified. + */ + lastModified?: time.Time + /** + * A map of metadata to store with the object in S3. + * + * Map keys will be normalized to lower-case. + */ + metadata: _TygojaDict + /** + * This is set to the number of metadata entries not returned in x-amz-meta + * headers. This can happen if you create metadata using an API like SOAP that + * supports more flexible metadata than the REST API. For example, using SOAP, you + * can create metadata whose values are not legal HTTP headers. + * + * This functionality is not supported for directory buckets. + */ + missingMeta?: number + /** + * Specifies whether a legal hold is in effect for this object. This header is + * only returned if the requester has the s3:GetObjectLegalHold permission. This + * header is not returned if the specified version of this object has never had a + * legal hold applied. For more information about S3 Object Lock, see [Object Lock]. + * + * This functionality is not supported for directory buckets. + * + * [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + objectLockLegalHoldStatus: types.ObjectLockLegalHoldStatus + /** + * The Object Lock mode, if any, that's in effect for this object. This header is + * only returned if the requester has the s3:GetObjectRetention permission. For + * more information about S3 Object Lock, see [Object Lock]. + * + * This functionality is not supported for directory buckets. + * + * [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + objectLockMode: types.ObjectLockMode + /** + * The date and time when the Object Lock retention period expires. This header is + * only returned if the requester has the s3:GetObjectRetention permission. + * + * This functionality is not supported for directory buckets. + */ + objectLockRetainUntilDate?: time.Time + /** + * The count of parts this object has. This value is only returned if you specify + * partNumber in your request and the object was uploaded as a multipart upload. + */ + partsCount?: number + /** + * Amazon S3 can return this header if your request involves a bucket that is + * either a source or a destination in a replication rule. + * + * In replication, you have a source bucket on which you configure replication and + * destination bucket or buckets where Amazon S3 stores object replicas. When you + * request an object ( GetObject ) or object metadata ( HeadObject ) from these + * buckets, Amazon S3 will return the x-amz-replication-status header in the + * response as follows: + * + * ``` + * - If requesting an object from the source bucket, Amazon S3 will return the + * x-amz-replication-status header if the object in your request is eligible for + * replication. + * ``` + * + * For example, suppose that in your replication configuration, you specify object + * ``` + * prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix + * TaxDocs . Any objects you upload with this key name prefix, for example + * TaxDocs/document1.pdf , are eligible for replication. For any object request + * with this key name prefix, Amazon S3 will return the x-amz-replication-status + * header with value PENDING, COMPLETED or FAILED indicating object replication + * status. + * + * - If requesting an object from a destination bucket, Amazon S3 will return + * the x-amz-replication-status header with value REPLICA if the object in your + * request is a replica that Amazon S3 created and there is no replica modification + * replication in progress. + * + * - When replicating objects to multiple destination buckets, the + * x-amz-replication-status header acts differently. The header of the source + * object will only return a value of COMPLETED when replication is successful to + * all destinations. The header will remain at value PENDING until replication has + * completed for all destinations. If one or more destinations fails replication + * the header will return FAILED. + * ``` + * + * For more information, see [Replication]. + * + * This functionality is not supported for directory buckets. + * + * [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + */ + replicationStatus: types.ReplicationStatus + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If the object is an archived object (an object whose storage class is GLACIER), + * the response includes this header if either the archive restoration is in + * progress (see [RestoreObject]or an archive copy is already restored. + * + * If an archive copy is already restored, the header value indicates when Amazon + * S3 is scheduled to delete the object copy. For example: + * + * ``` + * x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + * GMT" + * ``` + * + * If the object restoration is in progress, the header returns the value + * ongoing-request="true" . + * + * For more information about archiving objects, see [Transitioning Objects: General Considerations]. + * + * This functionality is not supported for directory buckets. Only the S3 Express + * One Zone storage class is supported by directory buckets to store objects. + * + * [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations + * [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + */ + restore?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to confirm the encryption + * algorithm that's used. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to provide the round-trip + * message integrity verification of the customer-provided encryption key. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Provides storage class information of the object. Amazon S3 returns this header + * for all objects except for S3 Standard storage class objects. + * + * For more information, see [Storage Classes]. + * + * Directory buckets - Only the S3 Express One Zone storage class is supported by + * directory buckets to store objects. + * + * [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + */ + storageClass: types.StorageClass + /** + * Version ID of the object. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + /** + * If the bucket is configured as a website, redirects requests for this object to + * another object in the same bucket or to an external URL. Amazon S3 stores the + * value of this header in the object metadata. + * + * This functionality is not supported for directory buckets. + */ + websiteRedirectLocation?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subdNWQa = noSmithyDocumentSerde + interface ListBucketAnalyticsConfigurationsInput extends _subdNWQa { + /** + * The name of the bucket from which analytics configurations are retrieved. + * + * This member is required. + */ + bucket?: string + /** + * The ContinuationToken that represents a placeholder from where this request + * should begin. + */ + continuationToken?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subaBiJF = noSmithyDocumentSerde + interface ListBucketAnalyticsConfigurationsOutput extends _subaBiJF { + /** + * The list of analytics configurations for a bucket. + */ + analyticsConfigurationList: Array + /** + * The marker that is used as a starting point for this analytics configuration + * list response. This value is present if it was sent in the request. + */ + continuationToken?: string + /** + * Indicates whether the returned list of analytics configurations is complete. A + * value of true indicates that the list is not complete and the + * NextContinuationToken will be provided for a subsequent request. + */ + isTruncated?: boolean + /** + * NextContinuationToken is sent when isTruncated is true, which indicates that + * there are more analytics configurations to list. The next request must include + * this NextContinuationToken . The token is obfuscated and is not a usable value. + */ + nextContinuationToken?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subdKGaA = noSmithyDocumentSerde + interface ListBucketIntelligentTieringConfigurationsInput extends _subdKGaA { + /** + * The name of the Amazon S3 bucket whose configuration you want to modify or + * retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The ContinuationToken that represents a placeholder from where this request + * should begin. + */ + continuationToken?: string + } + type _subnzypp = noSmithyDocumentSerde + interface ListBucketIntelligentTieringConfigurationsOutput extends _subnzypp { + /** + * The ContinuationToken that represents a placeholder from where this request + * should begin. + */ + continuationToken?: string + /** + * The list of S3 Intelligent-Tiering configurations for a bucket. + */ + intelligentTieringConfigurationList: Array + /** + * Indicates whether the returned list of analytics configurations is complete. A + * value of true indicates that the list is not complete and the + * NextContinuationToken will be provided for a subsequent request. + */ + isTruncated?: boolean + /** + * The marker used to continue this inventory configuration listing. Use the + * NextContinuationToken from this response to continue the listing in a subsequent + * request. The continuation token is an opaque value that Amazon S3 understands. + */ + nextContinuationToken?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subBEkvG = noSmithyDocumentSerde + interface ListBucketInventoryConfigurationsInput extends _subBEkvG { + /** + * The name of the bucket containing the inventory configurations to retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The marker used to continue an inventory configuration listing that has been + * truncated. Use the NextContinuationToken from a previously truncated list + * response to continue the listing. The continuation token is an opaque value that + * Amazon S3 understands. + */ + continuationToken?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subNlUiN = noSmithyDocumentSerde + interface ListBucketInventoryConfigurationsOutput extends _subNlUiN { + /** + * If sent in the request, the marker that is used as a starting point for this + * inventory configuration list response. + */ + continuationToken?: string + /** + * The list of inventory configurations for a bucket. + */ + inventoryConfigurationList: Array + /** + * Tells whether the returned list of inventory configurations is complete. A + * value of true indicates that the list is not complete and the + * NextContinuationToken is provided for a subsequent request. + */ + isTruncated?: boolean + /** + * The marker used to continue this inventory configuration listing. Use the + * NextContinuationToken from this response to continue the listing in a subsequent + * request. The continuation token is an opaque value that Amazon S3 understands. + */ + nextContinuationToken?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subuBMnM = noSmithyDocumentSerde + interface ListBucketMetricsConfigurationsInput extends _subuBMnM { + /** + * The name of the bucket containing the metrics configurations to retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The marker that is used to continue a metrics configuration listing that has + * been truncated. Use the NextContinuationToken from a previously truncated list + * response to continue the listing. The continuation token is an opaque value that + * Amazon S3 understands. + */ + continuationToken?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subWAvdQ = noSmithyDocumentSerde + interface ListBucketMetricsConfigurationsOutput extends _subWAvdQ { + /** + * The marker that is used as a starting point for this metrics configuration list + * response. This value is present if it was sent in the request. + */ + continuationToken?: string + /** + * Indicates whether the returned list of metrics configurations is complete. A + * value of true indicates that the list is not complete and the + * NextContinuationToken will be provided for a subsequent request. + */ + isTruncated?: boolean + /** + * The list of metrics configurations for a bucket. + */ + metricsConfigurationList: Array + /** + * The marker used to continue a metrics configuration listing that has been + * truncated. Use the NextContinuationToken from a previously truncated list + * response to continue the listing. The continuation token is an opaque value that + * Amazon S3 understands. + */ + nextContinuationToken?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subDHJJR = noSmithyDocumentSerde + interface ListBucketsInput extends _subDHJJR { + /** + * ContinuationToken indicates to Amazon S3 that the list is being continued on + * this bucket with a token. ContinuationToken is obfuscated and is not a real + * key. You can use this ContinuationToken for pagination of the list results. + * + * Length Constraints: Minimum length of 0. Maximum length of 1024. + * + * Required: No. + */ + continuationToken?: string + /** + * Maximum number of buckets to be returned in response. When the number is more + * than the count of buckets that are owned by an Amazon Web Services account, + * return all the buckets in response. + */ + maxBuckets?: number + } + type _subZmNtO = noSmithyDocumentSerde + interface ListBucketsOutput extends _subZmNtO { + /** + * The list of buckets owned by the requester. + */ + buckets: Array + /** + * ContinuationToken is included in the response when there are more buckets that + * can be listed with pagination. The next ListBuckets request to Amazon S3 can be + * continued with this ContinuationToken . ContinuationToken is obfuscated and is + * not a real bucket. + */ + continuationToken?: string + /** + * The owner of the buckets listed. + */ + owner?: types.Owner + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subymSpX = noSmithyDocumentSerde + interface ListDirectoryBucketsInput extends _subymSpX { + /** + * ContinuationToken indicates to Amazon S3 that the list is being continued on + * buckets in this account with a token. ContinuationToken is obfuscated and is + * not a real bucket name. You can use this ContinuationToken for the pagination + * of the list results. + */ + continuationToken?: string + /** + * Maximum number of buckets to be returned in response. When the number is more + * than the count of buckets that are owned by an Amazon Web Services account, + * return all the buckets in response. + */ + maxDirectoryBuckets?: number + } + type _subHVtMn = noSmithyDocumentSerde + interface ListDirectoryBucketsOutput extends _subHVtMn { + /** + * The list of buckets owned by the requester. + */ + buckets: Array + /** + * If ContinuationToken was sent with the request, it is included in the response. + * You can use the returned ContinuationToken for pagination of the list response. + */ + continuationToken?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subZXicn = noSmithyDocumentSerde + interface ListMultipartUploadsInput extends _subZXicn { + /** + * The name of the bucket to which the multipart upload was initiated. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Character you use to group keys. + * + * All keys that contain the same string between the prefix, if specified, and the + * first occurrence of the delimiter after the prefix are grouped under a single + * result element, CommonPrefixes . If you don't specify the prefix parameter, then + * the substring starts at the beginning of the key. The keys that are grouped + * under CommonPrefixes result element are not returned elsewhere in the response. + * + * Directory buckets - For directory buckets, / is the only supported delimiter. + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + * encoded only in UTF-8. An object key can contain any Unicode character. However, + * the XML 1.0 parser can't parse certain characters, such as characters with an + * ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + * can add this parameter to request that Amazon S3 encode the keys in the + * response. For more information about characters to avoid in object key names, + * see [Object key naming guidelines]. + * + * When using the URL encoding type, non-ASCII characters that are used in an + * object's key name will be percent-encoded according to UTF-8 code values. For + * example, the object test_file(3).png will appear as test_file%283%29.png . + * + * [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + * [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html + */ + encodingType: types.EncodingType + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Specifies the multipart upload after which listing should begin. + * + * ``` + * - General purpose buckets - For general purpose buckets, key-marker is an + * object key. Together with upload-id-marker , this parameter specifies the + * multipart upload after which listing should begin. + * ``` + * + * If upload-id-marker is not specified, only the keys lexicographically greater + * ``` + * than the specified key-marker will be included in the list. + * ``` + * + * If upload-id-marker is specified, any multipart uploads for a key equal to the + * ``` + * key-marker might also be included, provided those multipart uploads have + * upload IDs lexicographically greater than the specified upload-id-marker . + * + * - Directory buckets - For directory buckets, key-marker is obfuscated and + * isn't a real object key. The upload-id-marker parameter isn't supported by + * directory buckets. To list the additional multipart uploads, you only need to + * set the value of key-marker to the NextKeyMarker value from the previous + * response. + * ``` + * + * In the ListMultipartUploads response, the multipart uploads aren't sorted + * ``` + * lexicographically based on the object keys. + * ``` + */ + keyMarker?: string + /** + * Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the + * response body. 1,000 is the maximum number of uploads that can be returned in a + * response. + */ + maxUploads?: number + /** + * Lists in-progress uploads only for those keys that begin with the specified + * prefix. You can use prefixes to separate a bucket into different grouping of + * keys. (You can think of using prefix to make groups in the same way that you'd + * use a folder in a file system.) + * + * Directory buckets - For directory buckets, only prefixes that end in a + * delimiter ( / ) are supported. + */ + prefix?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Together with key-marker, specifies the multipart upload after which listing + * should begin. If key-marker is not specified, the upload-id-marker parameter is + * ignored. Otherwise, any multipart uploads for a key equal to the key-marker + * might be included in the list only if they have an upload ID lexicographically + * greater than the specified upload-id-marker . + * + * This functionality is not supported for directory buckets. + */ + uploadIdMarker?: string + } + type _subopNEP = noSmithyDocumentSerde + interface ListMultipartUploadsOutput extends _subopNEP { + /** + * The name of the bucket to which the multipart upload was initiated. Does not + * return the access point ARN or access point alias if used. + */ + bucket?: string + /** + * If you specify a delimiter in the request, then the result returns each + * distinct key prefix containing the delimiter in a CommonPrefixes element. The + * distinct key prefixes are returned in the Prefix child element. + * + * Directory buckets - For directory buckets, only prefixes that end in a + * delimiter ( / ) are supported. + */ + commonPrefixes: Array + /** + * Contains the delimiter you specified in the request. If you don't specify a + * delimiter in your request, this element is absent from the response. + * + * Directory buckets - For directory buckets, / is the only supported delimiter. + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode object keys in the response. + * + * If you specify the encoding-type request parameter, Amazon S3 includes this + * element in the response, and returns encoded key name values in the following + * response elements: + * + * Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . + */ + encodingType: types.EncodingType + /** + * Indicates whether the returned list of multipart uploads is truncated. A value + * of true indicates that the list was truncated. The list can be truncated if the + * number of multipart uploads exceeds the limit allowed or specified by max + * uploads. + */ + isTruncated?: boolean + /** + * The key at or after which the listing began. + */ + keyMarker?: string + /** + * Maximum number of multipart uploads that could have been included in the + * response. + */ + maxUploads?: number + /** + * When a list is truncated, this element specifies the value that should be used + * for the key-marker request parameter in a subsequent request. + */ + nextKeyMarker?: string + /** + * When a list is truncated, this element specifies the value that should be used + * for the upload-id-marker request parameter in a subsequent request. + * + * This functionality is not supported for directory buckets. + */ + nextUploadIdMarker?: string + /** + * When a prefix is provided in the request, this field contains the specified + * prefix. The result contains only keys starting with the specified prefix. + * + * Directory buckets - For directory buckets, only prefixes that end in a + * delimiter ( / ) are supported. + */ + prefix?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Together with key-marker, specifies the multipart upload after which listing + * should begin. If key-marker is not specified, the upload-id-marker parameter is + * ignored. Otherwise, any multipart uploads for a key equal to the key-marker + * might be included in the list only if they have an upload ID lexicographically + * greater than the specified upload-id-marker . + * + * This functionality is not supported for directory buckets. + */ + uploadIdMarker?: string + /** + * Container for elements related to a particular multipart upload. A response can + * contain zero or more Upload elements. + */ + uploads: Array + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subYLZDQ = noSmithyDocumentSerde + interface ListObjectVersionsInput extends _subYLZDQ { + /** + * The bucket name that contains the objects. + * + * This member is required. + */ + bucket?: string + /** + * A delimiter is a character that you specify to group keys. All keys that + * contain the same string between the prefix and the first occurrence of the + * delimiter are grouped under a single result element in CommonPrefixes . These + * groups are counted as one result against the max-keys limitation. These keys + * are not returned elsewhere in the response. + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + * encoded only in UTF-8. An object key can contain any Unicode character. However, + * the XML 1.0 parser can't parse certain characters, such as characters with an + * ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + * can add this parameter to request that Amazon S3 encode the keys in the + * response. For more information about characters to avoid in object key names, + * see [Object key naming guidelines]. + * + * When using the URL encoding type, non-ASCII characters that are used in an + * object's key name will be percent-encoded according to UTF-8 code values. For + * example, the object test_file(3).png will appear as test_file%283%29.png . + * + * [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + * [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html + */ + encodingType: types.EncodingType + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Specifies the key to start with when listing objects in a bucket. + */ + keyMarker?: string + /** + * Sets the maximum number of keys returned in the response. By default, the + * action returns up to 1,000 key names. The response might contain fewer keys but + * will never contain more. If additional keys satisfy the search criteria, but + * were not returned because max-keys was exceeded, the response contains true . To + * return the additional keys, see key-marker and version-id-marker . + */ + maxKeys?: number + /** + * Specifies the optional fields that you want returned in the response. Fields + * that you do not specify are not returned. + */ + optionalObjectAttributes: Array + /** + * Use this parameter to select only those keys that begin with the specified + * prefix. You can use prefixes to separate a bucket into different groupings of + * keys. (You can think of using prefix to make groups in the same way that you'd + * use a folder in a file system.) You can use prefix with delimiter to roll up + * numerous objects into a single result under CommonPrefixes . + */ + prefix?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Specifies the object version you want to start listing from. + */ + versionIdMarker?: string + } + type _subHQVKY = noSmithyDocumentSerde + interface ListObjectVersionsOutput extends _subHQVKY { + /** + * All of the keys rolled up into a common prefix count as a single return when + * calculating the number of returns. + */ + commonPrefixes: Array + /** + * Container for an object that is a delete marker. + */ + deleteMarkers: Array + /** + * The delimiter grouping the included keys. A delimiter is a character that you + * specify to group keys. All keys that contain the same string between the prefix + * and the first occurrence of the delimiter are grouped under a single result + * element in CommonPrefixes . These groups are counted as one result against the + * max-keys limitation. These keys are not returned elsewhere in the response. + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode object key names in the XML response. + * + * If you specify the encoding-type request parameter, Amazon S3 includes this + * element in the response, and returns encoded key name values in the following + * response elements: + * + * KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . + */ + encodingType: types.EncodingType + /** + * A flag that indicates whether Amazon S3 returned all of the results that + * satisfied the search criteria. If your results were truncated, you can make a + * follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker + * response parameters as a starting place in another request to return the rest of + * the results. + */ + isTruncated?: boolean + /** + * Marks the last key returned in a truncated response. + */ + keyMarker?: string + /** + * Specifies the maximum number of objects to return. + */ + maxKeys?: number + /** + * The bucket name. + */ + name?: string + /** + * When the number of responses exceeds the value of MaxKeys , NextKeyMarker + * specifies the first key not returned that satisfies the search criteria. Use + * this value for the key-marker request parameter in a subsequent request. + */ + nextKeyMarker?: string + /** + * When the number of responses exceeds the value of MaxKeys , NextVersionIdMarker + * specifies the first object version not returned that satisfies the search + * criteria. Use this value for the version-id-marker request parameter in a + * subsequent request. + */ + nextVersionIdMarker?: string + /** + * Selects objects that start with the value supplied by this parameter. + */ + prefix?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Marks the last version of the key returned in a truncated response. + */ + versionIdMarker?: string + /** + * Container for version information. + */ + versions: Array + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subpZPkc = noSmithyDocumentSerde + interface ListObjectsInput extends _subpZPkc { + /** + * The name of the bucket containing the objects. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * A delimiter is a character that you use to group keys. + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + * encoded only in UTF-8. An object key can contain any Unicode character. However, + * the XML 1.0 parser can't parse certain characters, such as characters with an + * ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + * can add this parameter to request that Amazon S3 encode the keys in the + * response. For more information about characters to avoid in object key names, + * see [Object key naming guidelines]. + * + * When using the URL encoding type, non-ASCII characters that are used in an + * object's key name will be percent-encoded according to UTF-8 code values. For + * example, the object test_file(3).png will appear as test_file%283%29.png . + * + * [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + * [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html + */ + encodingType: types.EncodingType + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + * listing after this specified key. Marker can be any key in the bucket. + */ + marker?: string + /** + * Sets the maximum number of keys returned in the response. By default, the + * action returns up to 1,000 key names. The response might contain fewer keys but + * will never contain more. + */ + maxKeys?: number + /** + * Specifies the optional fields that you want returned in the response. Fields + * that you do not specify are not returned. + */ + optionalObjectAttributes: Array + /** + * Limits the response to keys that begin with the specified prefix. + */ + prefix?: string + /** + * Confirms that the requester knows that she or he will be charged for the list + * objects request. Bucket owners need not specify this parameter in their + * requests. + */ + requestPayer: types.RequestPayer + } + type _subOSJeO = noSmithyDocumentSerde + interface ListObjectsOutput extends _subOSJeO { + /** + * All of the keys (up to 1,000) rolled up in a common prefix count as a single + * return when calculating the number of returns. + * + * A response can contain CommonPrefixes only if you specify a delimiter. + * + * CommonPrefixes contains all (if there are any) keys between Prefix and the next + * occurrence of the string specified by the delimiter. + * + * CommonPrefixes lists keys that act like subdirectories in the directory + * specified by Prefix . + * + * For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in + * notes/summer/july , the common prefix is notes/summer/ . All of the keys that + * roll up into a common prefix count as a single return when calculating the + * number of returns. + */ + commonPrefixes: Array + /** + * Metadata about each object returned. + */ + contents: Array + /** + * Causes keys that contain the same string between the prefix and the first + * occurrence of the delimiter to be rolled up into a single result element in the + * CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + * the response. Each rolled-up result counts as only one return against the + * MaxKeys value. + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + * encoded only in UTF-8. An object key can contain any Unicode character. However, + * the XML 1.0 parser can't parse certain characters, such as characters with an + * ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + * can add this parameter to request that Amazon S3 encode the keys in the + * response. For more information about characters to avoid in object key names, + * see [Object key naming guidelines]. + * + * When using the URL encoding type, non-ASCII characters that are used in an + * object's key name will be percent-encoded according to UTF-8 code values. For + * example, the object test_file(3).png will appear as test_file%283%29.png . + * + * [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + * [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html + */ + encodingType: types.EncodingType + /** + * A flag that indicates whether Amazon S3 returned all of the results that + * satisfied the search criteria. + */ + isTruncated?: boolean + /** + * Indicates where in the bucket listing begins. Marker is included in the + * response if it was sent with the request. + */ + marker?: string + /** + * The maximum number of keys returned in the response body. + */ + maxKeys?: number + /** + * The bucket name. + */ + name?: string + /** + * When the response is truncated (the IsTruncated element value in the response + * is true ), you can use the key name in this field as the marker parameter in + * the subsequent request to get the next set of objects. Amazon S3 lists objects + * in alphabetical order. + * + * This element is returned only if you have the delimiter request parameter + * specified. If the response does not include the NextMarker element and it is + * truncated, you can use the value of the last Key element in the response as the + * marker parameter in the subsequent request to get the next set of object keys. + */ + nextMarker?: string + /** + * Keys that begin with the indicated prefix. + */ + prefix?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subYNZGG = noSmithyDocumentSerde + interface ListObjectsV2Input extends _subYNZGG { + /** + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * ContinuationToken indicates to Amazon S3 that the list is being continued on + * this bucket with a token. ContinuationToken is obfuscated and is not a real + * key. You can use this ContinuationToken for pagination of the list results. + */ + continuationToken?: string + /** + * A delimiter is a character that you use to group keys. + * + * ``` + * - Directory buckets - For directory buckets, / is the only supported delimiter. + * + * - Directory buckets - When you query ListObjectsV2 with a delimiter during + * in-progress multipart uploads, the CommonPrefixes response parameter contains + * the prefixes that are associated with the in-progress multipart uploads. For + * more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + * ``` + * + * [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + * encoded only in UTF-8. An object key can contain any Unicode character. However, + * the XML 1.0 parser can't parse certain characters, such as characters with an + * ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + * can add this parameter to request that Amazon S3 encode the keys in the + * response. For more information about characters to avoid in object key names, + * see [Object key naming guidelines]. + * + * When using the URL encoding type, non-ASCII characters that are used in an + * object's key name will be percent-encoded according to UTF-8 code values. For + * example, the object test_file(3).png will appear as test_file%283%29.png . + * + * [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + * [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html + */ + encodingType: types.EncodingType + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The owner field is not present in ListObjectsV2 by default. If you want to + * return the owner field with each key in the result, then set the FetchOwner + * field to true . + * + * Directory buckets - For directory buckets, the bucket owner is returned as the + * object owner for all objects. + */ + fetchOwner?: boolean + /** + * Sets the maximum number of keys returned in the response. By default, the + * action returns up to 1,000 key names. The response might contain fewer keys but + * will never contain more. + */ + maxKeys?: number + /** + * Specifies the optional fields that you want returned in the response. Fields + * that you do not specify are not returned. + * + * This functionality is not supported for directory buckets. + */ + optionalObjectAttributes: Array + /** + * Limits the response to keys that begin with the specified prefix. + * + * Directory buckets - For directory buckets, only prefixes that end in a + * delimiter ( / ) are supported. + */ + prefix?: string + /** + * Confirms that the requester knows that she or he will be charged for the list + * objects request in V2 style. Bucket owners need not specify this parameter in + * their requests. + * + * This functionality is not supported for directory buckets. + */ + requestPayer: types.RequestPayer + /** + * StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + * listing after this specified key. StartAfter can be any key in the bucket. + * + * This functionality is not supported for directory buckets. + */ + startAfter?: string + } + type _subMMqpa = noSmithyDocumentSerde + interface ListObjectsV2Output extends _subMMqpa { + /** + * All of the keys (up to 1,000) that share the same prefix are grouped together. + * When counting the total numbers of returns by this API operation, this group of + * keys is considered as one item. + * + * A response can contain CommonPrefixes only if you specify a delimiter. + * + * CommonPrefixes contains all (if there are any) keys between Prefix and the next + * occurrence of the string specified by a delimiter. + * + * CommonPrefixes lists keys that act like subdirectories in the directory + * specified by Prefix . + * + * For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in + * notes/summer/july , the common prefix is notes/summer/ . All of the keys that + * roll up into a common prefix count as a single return when calculating the + * number of returns. + * + * ``` + * - Directory buckets - For directory buckets, only prefixes that end in a + * delimiter ( / ) are supported. + * + * - Directory buckets - When you query ListObjectsV2 with a delimiter during + * in-progress multipart uploads, the CommonPrefixes response parameter contains + * the prefixes that are associated with the in-progress multipart uploads. For + * more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + * ``` + * + * [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html + */ + commonPrefixes: Array + /** + * Metadata about each object returned. + */ + contents: Array + /** + * If ContinuationToken was sent with the request, it is included in the + * response. You can use the returned ContinuationToken for pagination of the list + * response. You can use this ContinuationToken for pagination of the list + * results. + */ + continuationToken?: string + /** + * Causes keys that contain the same string between the prefix and the first + * occurrence of the delimiter to be rolled up into a single result element in the + * CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + * the response. Each rolled-up result counts as only one return against the + * MaxKeys value. + * + * Directory buckets - For directory buckets, / is the only supported delimiter. + */ + delimiter?: string + /** + * Encoding type used by Amazon S3 to encode object key names in the XML response. + * + * If you specify the encoding-type request parameter, Amazon S3 includes this + * element in the response, and returns encoded key name values in the following + * response elements: + * + * Delimiter, Prefix, Key, and StartAfter . + */ + encodingType: types.EncodingType + /** + * Set to false if all of the results were returned. Set to true if more keys are + * available to return. If the number of results exceeds that specified by MaxKeys + * , all of the results might not be returned. + */ + isTruncated?: boolean + /** + * KeyCount is the number of keys returned with this request. KeyCount will always + * be less than or equal to the MaxKeys field. For example, if you ask for 50 + * keys, your result will include 50 keys or fewer. + */ + keyCount?: number + /** + * Sets the maximum number of keys returned in the response. By default, the + * action returns up to 1,000 key names. The response might contain fewer keys but + * will never contain more. + */ + maxKeys?: number + /** + * The bucket name. + */ + name?: string + /** + * NextContinuationToken is sent when isTruncated is true, which means there are + * more keys in the bucket that can be listed. The next list requests to Amazon S3 + * can be continued with this NextContinuationToken . NextContinuationToken is + * obfuscated and is not a real key + */ + nextContinuationToken?: string + /** + * Keys that begin with the indicated prefix. + * + * Directory buckets - For directory buckets, only prefixes that end in a + * delimiter ( / ) are supported. + */ + prefix?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If StartAfter was sent with the request, it is included in the response. + * + * This functionality is not supported for directory buckets. + */ + startAfter?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subLIeSp = noSmithyDocumentSerde + interface ListPartsInput extends _subLIeSp { + /** + * The name of the bucket to which the parts are being uploaded. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Object key for which the multipart upload was initiated. + * + * This member is required. + */ + key?: string + /** + * Upload ID identifying the multipart upload whose parts are being listed. + * + * This member is required. + */ + uploadId?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Sets the maximum number of parts to return. + */ + maxParts?: number + /** + * Specifies the part after which listing should begin. Only parts with higher + * part numbers will be listed. + */ + partNumberMarker?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The server-side encryption (SSE) algorithm used to encrypt the object. This + * parameter is needed only when the object was created using a checksum algorithm. + * For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerAlgorithm?: string + /** + * The server-side encryption (SSE) customer managed key. This parameter is needed + * only when the object was created using a checksum algorithm. For more + * information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKey?: string + /** + * The MD5 server-side encryption (SSE) customer managed key. This parameter is + * needed only when the object was created using a checksum algorithm. For more + * information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKeyMD5?: string + } + type _subVoJFh = noSmithyDocumentSerde + interface ListPartsOutput extends _subVoJFh { + /** + * If the bucket has a lifecycle rule configured with an action to abort + * incomplete multipart uploads and the prefix in the lifecycle rule matches the + * object name in the request, then the response includes this header indicating + * when the initiated multipart upload will become eligible for abort operation. + * For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. + * + * The response will also include the x-amz-abort-rule-id header that will provide + * the ID of the lifecycle configuration rule that defines this action. + * + * This functionality is not supported for directory buckets. + * + * [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + */ + abortDate?: time.Time + /** + * This header is returned along with the x-amz-abort-date header. It identifies + * applicable lifecycle configuration rule that defines the action to abort + * incomplete multipart uploads. + * + * This functionality is not supported for directory buckets. + */ + abortRuleId?: string + /** + * The name of the bucket to which the multipart upload was initiated. Does not + * return the access point ARN or access point alias if used. + */ + bucket?: string + /** + * The algorithm that was used to create a checksum of the object. + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * Container element that identifies who initiated the multipart upload. If the + * initiator is an Amazon Web Services account, this element provides the same + * information as the Owner element. If the initiator is an IAM User, this element + * provides the user ARN and display name. + */ + initiator?: types.Initiator + /** + * Indicates whether the returned list of parts is truncated. A true value + * indicates that the list was truncated. A list can be truncated if the number of + * parts exceeds the limit returned in the MaxParts element. + */ + isTruncated?: boolean + /** + * Object key for which the multipart upload was initiated. + */ + key?: string + /** + * Maximum number of parts that were allowed in the response. + */ + maxParts?: number + /** + * When a list is truncated, this element specifies the last part in the list, as + * well as the value to use for the part-number-marker request parameter in a + * subsequent request. + */ + nextPartNumberMarker?: string + /** + * Container element that identifies the object owner, after the object is + * created. If multipart upload is initiated by an IAM user, this element provides + * the parent account ID and display name. + * + * Directory buckets - The bucket owner is returned as the object owner for all + * the parts. + */ + owner?: types.Owner + /** + * Specifies the part after which listing should begin. Only parts with higher + * part numbers will be listed. + */ + partNumberMarker?: string + /** + * Container for elements related to a particular part. A response can contain + * zero or more Part elements. + */ + parts: Array + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * The class of storage used to store the uploaded object. + * + * Directory buckets - Only the S3 Express One Zone storage class is supported by + * directory buckets to store objects. + */ + storageClass: types.StorageClass + /** + * Upload ID identifying the multipart upload whose parts are being listed. + */ + uploadId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subdvmub = noSmithyDocumentSerde + interface PutBucketAccelerateConfigurationInput extends _subdvmub { + /** + * Container for setting the transfer acceleration state. + * + * This member is required. + */ + accelerateConfiguration?: types.AccelerateConfiguration + /** + * The name of the bucket for which the accelerate configuration is set. + * + * This member is required. + */ + bucket?: string + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subIvJxE = noSmithyDocumentSerde + interface PutBucketAccelerateConfigurationOutput extends _subIvJxE { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subYpAFl = noSmithyDocumentSerde + interface PutBucketAclInput extends _subYpAFl { + /** + * The bucket to which to apply the ACL. + * + * This member is required. + */ + bucket?: string + /** + * The canned ACL to apply to the bucket. + */ + acl: types.BucketCannedACL + /** + * Contains the elements that set the ACL permissions for an object per grantee. + */ + accessControlPolicy?: types.AccessControlPolicy + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the data. This header must be used as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, go to [RFC 1864.] + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Allows grantee the read, write, read ACP, and write ACP permissions on the + * bucket. + */ + grantFullControl?: string + /** + * Allows grantee to list the objects in the bucket. + */ + grantRead?: string + /** + * Allows grantee to read the bucket ACL. + */ + grantReadACP?: string + /** + * Allows grantee to create new objects in the bucket. + * + * For the bucket and object owners of existing objects, also allows deletions and + * overwrites of those objects. + */ + grantWrite?: string + /** + * Allows grantee to write the ACL for the applicable bucket. + */ + grantWriteACP?: string + } + type _sublhrtH = noSmithyDocumentSerde + interface PutBucketAclOutput extends _sublhrtH { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subZSIfw = noSmithyDocumentSerde + interface PutBucketAnalyticsConfigurationInput extends _subZSIfw { + /** + * The configuration and any analyses for the analytics filter. + * + * This member is required. + */ + analyticsConfiguration?: types.AnalyticsConfiguration + /** + * The name of the bucket to which an analytics configuration is stored. + * + * This member is required. + */ + bucket?: string + /** + * The ID that identifies the analytics configuration. + * + * This member is required. + */ + id?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subRNcDs = noSmithyDocumentSerde + interface PutBucketAnalyticsConfigurationOutput extends _subRNcDs { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subMrBSr = noSmithyDocumentSerde + interface PutBucketCorsInput extends _subMrBSr { + /** + * Specifies the bucket impacted by the cors configuration. + * + * This member is required. + */ + bucket?: string + /** + * Describes the cross-origin access configuration for objects in an Amazon S3 + * bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. + * + * [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + * + * This member is required. + */ + corsConfiguration?: types.CORSConfiguration + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the data. This header must be used as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, go to [RFC 1864.] + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subGcPQY = noSmithyDocumentSerde + interface PutBucketCorsOutput extends _subGcPQY { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subWpkys = noSmithyDocumentSerde + interface PutBucketEncryptionInput extends _subWpkys { + /** + * Specifies default encryption for a bucket using server-side encryption with + * different key options. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * + * This member is required. + */ + bucket?: string + /** + * Specifies the default server-side-encryption configuration. + * + * This member is required. + */ + serverSideEncryptionConfiguration?: types.ServerSideEncryptionConfiguration + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + * default checksum algorithm that's used for performance. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the server-side encryption + * configuration. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * This functionality is not supported for directory buckets. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + * + * For directory buckets, this header is not supported in this API operation. If + * you specify this header, the request fails with the HTTP status code 501 Not + * Implemented . + */ + expectedBucketOwner?: string + } + type _subQzgEL = noSmithyDocumentSerde + interface PutBucketEncryptionOutput extends _subQzgEL { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subusqpL = noSmithyDocumentSerde + interface PutBucketIntelligentTieringConfigurationInput extends _subusqpL { + /** + * The name of the Amazon S3 bucket whose configuration you want to modify or + * retrieve. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the S3 Intelligent-Tiering configuration. + * + * This member is required. + */ + id?: string + /** + * Container for S3 Intelligent-Tiering configuration. + * + * This member is required. + */ + intelligentTieringConfiguration?: types.IntelligentTieringConfiguration + } + type _subCYkED = noSmithyDocumentSerde + interface PutBucketIntelligentTieringConfigurationOutput extends _subCYkED { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subEDQDU = noSmithyDocumentSerde + interface PutBucketInventoryConfigurationInput extends _subEDQDU { + /** + * The name of the bucket where the inventory configuration will be stored. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the inventory configuration. + * + * This member is required. + */ + id?: string + /** + * Specifies the inventory configuration. + * + * This member is required. + */ + inventoryConfiguration?: types.InventoryConfiguration + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subrBuEk = noSmithyDocumentSerde + interface PutBucketInventoryConfigurationOutput extends _subrBuEk { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subNvNOa = noSmithyDocumentSerde + interface PutBucketLifecycleConfigurationInput extends _subNvNOa { + /** + * The name of the bucket for which to set the configuration. + * + * This member is required. + */ + bucket?: string + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Container for lifecycle rules. You can add as many as 1,000 rules. + */ + lifecycleConfiguration?: types.BucketLifecycleConfiguration + /** + * Indicates which default minimum object size behavior is applied to the + * lifecycle configuration. + * + * ``` + * - all_storage_classes_128K - Objects smaller than 128 KB will not transition + * to any storage class by default. + * + * - varies_by_storage_class - Objects smaller than 128 KB will transition to + * Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + * all other storage classes will prevent transitions smaller than 128 KB. + * ``` + * + * To customize the minimum object size for any transition you can add a filter + * that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + * of your transition rule. Custom filters always take precedence over the default + * transition behavior. + */ + transitionDefaultMinimumObjectSize: types.TransitionDefaultMinimumObjectSize + } + type _subGWwQT = noSmithyDocumentSerde + interface PutBucketLifecycleConfigurationOutput extends _subGWwQT { + /** + * Indicates which default minimum object size behavior is applied to the + * lifecycle configuration. + * + * ``` + * - all_storage_classes_128K - Objects smaller than 128 KB will not transition + * to any storage class by default. + * + * - varies_by_storage_class - Objects smaller than 128 KB will transition to + * Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + * all other storage classes will prevent transitions smaller than 128 KB. + * ``` + * + * To customize the minimum object size for any transition you can add a filter + * that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + * of your transition rule. Custom filters always take precedence over the default + * transition behavior. + */ + transitionDefaultMinimumObjectSize: types.TransitionDefaultMinimumObjectSize + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subuRdDU = noSmithyDocumentSerde + interface PutBucketLoggingInput extends _subuRdDU { + /** + * The name of the bucket for which to set the logging parameters. + * + * This member is required. + */ + bucket?: string + /** + * Container for logging status information. + * + * This member is required. + */ + bucketLoggingStatus?: types.BucketLoggingStatus + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The MD5 hash of the PutBucketLogging request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subVqzPz = noSmithyDocumentSerde + interface PutBucketLoggingOutput extends _subVqzPz { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subAuGIi = noSmithyDocumentSerde + interface PutBucketMetricsConfigurationInput extends _subAuGIi { + /** + * The name of the bucket for which the metrics configuration is set. + * + * This member is required. + */ + bucket?: string + /** + * The ID used to identify the metrics configuration. The ID has a 64 character + * limit and can only contain letters, numbers, periods, dashes, and underscores. + * + * This member is required. + */ + id?: string + /** + * Specifies the metrics configuration. + * + * This member is required. + */ + metricsConfiguration?: types.MetricsConfiguration + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subZZnAv = noSmithyDocumentSerde + interface PutBucketMetricsConfigurationOutput extends _subZZnAv { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subVChPt = noSmithyDocumentSerde + interface PutBucketNotificationConfigurationInput extends _subVChPt { + /** + * The name of the bucket. + * + * This member is required. + */ + bucket?: string + /** + * A container for specifying the notification configuration of the bucket. If + * this element is empty, notifications are turned off for the bucket. + * + * This member is required. + */ + notificationConfiguration?: types.NotificationConfiguration + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or + * false value. + */ + skipDestinationValidation?: boolean + } + type _subzhWTS = noSmithyDocumentSerde + interface PutBucketNotificationConfigurationOutput extends _subzhWTS { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subUkynR = noSmithyDocumentSerde + interface PutBucketOwnershipControlsInput extends _subUkynR { + /** + * The name of the Amazon S3 bucket whose OwnershipControls you want to set. + * + * This member is required. + */ + bucket?: string + /** + * The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + * ObjectWriter) that you want to apply to this Amazon S3 bucket. + * + * This member is required. + */ + ownershipControls?: types.OwnershipControls + /** + * The MD5 hash of the OwnershipControls request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subgncqn = noSmithyDocumentSerde + interface PutBucketOwnershipControlsOutput extends _subgncqn { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subfjRvj = noSmithyDocumentSerde + interface PutBucketPolicyInput extends _subfjRvj { + /** + * The name of the bucket. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use path-style requests in the format + * https://s3express-control.region_code.amazonaws.com/bucket-name . + * Virtual-hosted-style requests aren't supported. Directory bucket names must be + * unique in the chosen Availability Zone. Bucket names must also follow the format + * bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + * ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + * Guide + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * + * This member is required. + */ + bucket?: string + /** + * The bucket policy as a JSON document. + * + * For directory buckets, the only IAM action supported in the bucket policy is + * s3express:CreateSession . + * + * This member is required. + */ + policy?: string + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + * fails the request with the HTTP status code 400 Bad Request . + * + * For the x-amz-checksum-algorithm header, replace algorithm with the + * supported algorithm from the following list: + * + * ``` + * - CRC32 + * + * - CRC32C + * + * - SHA1 + * + * - SHA256 + * ``` + * + * For more information, see [Checking object integrity] in the Amazon S3 User Guide. + * + * If the individual checksum value you provide through x-amz-checksum-algorithm + * doesn't match the checksum algorithm you set through + * x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm + * parameter and uses the checksum algorithm that matches the provided value in + * x-amz-checksum-algorithm . + * + * For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + * default checksum algorithm that's used for performance. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * Set this parameter to true to confirm that you want to remove your permissions + * to change this bucket policy in the future. + * + * This functionality is not supported for directory buckets. + */ + confirmRemoveSelfBucketAccess?: boolean + /** + * The MD5 hash of the request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * This functionality is not supported for directory buckets. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + * + * For directory buckets, this header is not supported in this API operation. If + * you specify this header, the request fails with the HTTP status code 501 Not + * Implemented . + */ + expectedBucketOwner?: string + } + type _subJNMQZ = noSmithyDocumentSerde + interface PutBucketPolicyOutput extends _subJNMQZ { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subhBUWh = noSmithyDocumentSerde + interface PutBucketReplicationInput extends _subhBUWh { + /** + * The name of the bucket + * + * This member is required. + */ + bucket?: string + /** + * A container for replication rules. You can add up to 1,000 rules. The maximum + * size of a replication configuration is 2 MB. + * + * This member is required. + */ + replicationConfiguration?: types.ReplicationConfiguration + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the data. You must use this header as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, see [RFC 1864]. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * A token to allow Object Lock to be enabled for an existing bucket. + */ + token?: string + } + type _subhwzAr = noSmithyDocumentSerde + interface PutBucketReplicationOutput extends _subhwzAr { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subyrzSF = noSmithyDocumentSerde + interface PutBucketRequestPaymentInput extends _subyrzSF { + /** + * The bucket name. + * + * This member is required. + */ + bucket?: string + /** + * Container for Payer. + * + * This member is required. + */ + requestPaymentConfiguration?: types.RequestPaymentConfiguration + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the data. You must use this header as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, see [RFC 1864]. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _sublqlWX = noSmithyDocumentSerde + interface PutBucketRequestPaymentOutput extends _sublqlWX { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subsIsNs = noSmithyDocumentSerde + interface PutBucketTaggingInput extends _subsIsNs { + /** + * The bucket name. + * + * This member is required. + */ + bucket?: string + /** + * Container for the TagSet and Tag elements. + * + * This member is required. + */ + tagging?: types.Tagging + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the data. You must use this header as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, see [RFC 1864]. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subJklpP = noSmithyDocumentSerde + interface PutBucketTaggingOutput extends _subJklpP { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subZTOJl = noSmithyDocumentSerde + interface PutBucketVersioningInput extends _subZTOJl { + /** + * The bucket name. + * + * This member is required. + */ + bucket?: string + /** + * Container for setting the versioning state. + * + * This member is required. + */ + versioningConfiguration?: types.VersioningConfiguration + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * >The base64-encoded 128-bit MD5 digest of the data. You must use this header as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, see [RFC 1864]. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The concatenation of the authentication device's serial number, a space, and + * the value that is displayed on your authentication device. + */ + mfa?: string + } + type _subGTuLM = noSmithyDocumentSerde + interface PutBucketVersioningOutput extends _subGTuLM { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subnRJwE = noSmithyDocumentSerde + interface PutBucketWebsiteInput extends _subnRJwE { + /** + * The bucket name. + * + * This member is required. + */ + bucket?: string + /** + * Container for the request. + * + * This member is required. + */ + websiteConfiguration?: types.WebsiteConfiguration + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the data. You must use this header as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, see [RFC 1864]. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _submkdXl = noSmithyDocumentSerde + interface PutBucketWebsiteOutput extends _submkdXl { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subeJRpI = noSmithyDocumentSerde + interface PutObjectInput extends _subeJRpI { + /** + * The bucket name to which the PUT action was initiated. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Object key for which the PUT action was initiated. + * + * This member is required. + */ + key?: string + /** + * The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon + * S3 User Guide. + * + * When adding a new object, you can use headers to grant ACL-based permissions to + * individual Amazon Web Services accounts or to predefined groups defined by + * Amazon S3. These permissions are then added to the ACL on the object. By + * default, all objects are private. Only the owner has full access control. For + * more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide. + * + * If the bucket that you're uploading objects to uses the bucket owner enforced + * setting for S3 Object Ownership, ACLs are disabled and no longer affect + * permissions. Buckets that use this setting only accept PUT requests that don't + * specify an ACL or PUT requests that specify bucket owner full control ACLs, such + * as the bucket-owner-full-control canned ACL or an equivalent form of this ACL + * expressed in the XML format. PUT requests that contain other ACLs (for example, + * custom grants to certain Amazon Web Services accounts) fail and return a 400 + * error with the error code AccessControlListNotSupported . For more information, + * see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + * + * [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html + * [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + * [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + * [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + */ + acl: types.ObjectCannedACL + /** + * Object data. + */ + body: io.Reader + /** + * Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + * with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + * + * General purpose buckets - Setting this header to true causes Amazon S3 to use + * an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + * header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + * + * Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + * operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + * supported, when you copy SSE-KMS encrypted objects from general purpose buckets + * to directory buckets, from directory buckets to general purpose buckets, or + * between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + * call to KMS every time a copy request is made for a KMS-encrypted object. + * + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + */ + bucketKeyEnabled?: boolean + /** + * Can be used to specify caching behavior along the request/reply chain. For more + * information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]. + * + * [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + */ + cacheControl?: string + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + * fails the request with the HTTP status code 400 Bad Request . + * + * For the x-amz-checksum-algorithm header, replace algorithm with the + * supported algorithm from the following list: + * + * ``` + * - CRC32 + * + * - CRC32C + * + * - SHA1 + * + * - SHA256 + * ``` + * + * For more information, see [Checking object integrity] in the Amazon S3 User Guide. + * + * If the individual checksum value you provide through x-amz-checksum-algorithm + * doesn't match the checksum algorithm you set through + * x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm + * parameter and uses the checksum algorithm that matches the provided value in + * x-amz-checksum-algorithm . + * + * For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + * default checksum algorithm that's used for performance. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see + * [Checking object integrity]in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32C?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA1?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA256?: string + /** + * Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4]. + * + * [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4 + */ + contentDisposition?: string + /** + * Specifies what content encodings have been applied to the object and thus what + * decoding mechanisms must be applied to obtain the media-type referenced by the + * Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]. + * + * [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding + */ + contentEncoding?: string + /** + * The language the content is in. + */ + contentLanguage?: string + /** + * Size of the body in bytes. This parameter is useful when the size of the body + * cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]. + * + * [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length + */ + contentLength?: number + /** + * The base64-encoded 128-bit MD5 digest of the message (without the headers) + * according to RFC 1864. This header can be used as a message integrity check to + * verify that the data is the same data that was originally sent. Although it is + * optional, we recommend using the Content-MD5 mechanism as an end-to-end + * integrity check. For more information about REST request authentication, see [REST Authentication]. + * + * The Content-MD5 header is required for any request to upload an object with a + * retention period configured using Amazon S3 Object Lock. For more information + * about Amazon S3 Object Lock, see [Amazon S3 Object Lock Overview]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + * [Amazon S3 Object Lock Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html + */ + contentMD5?: string + /** + * A standard MIME type describing the format of the contents. For more + * information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]. + * + * [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type + */ + contentType?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The date and time at which the object is no longer cacheable. For more + * information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]. + * + * [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 + */ + expires?: time.Time + /** + * Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantFullControl?: string + /** + * Allows grantee to read the object data and its metadata. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantRead?: string + /** + * Allows grantee to read the object ACL. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantReadACP?: string + /** + * Allows grantee to write the ACL for the applicable object. + * + * ``` + * - This functionality is not supported for directory buckets. + * + * - This functionality is not supported for Amazon S3 on Outposts. + * ``` + */ + grantWriteACP?: string + /** + * Uploads the object only if the object key name does not already exist in the + * bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + * + * If a conflicting operation occurs during the upload S3 returns a 409 + * ConditionalRequestConflict response. On a 409 failure you should retry the + * upload. + * + * Expects the '*' (asterisk) character. + * + * For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + * User Guide. + * + * [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + * [RFC 7232]: https://tools.ietf.org/html/rfc7232 + */ + ifNoneMatch?: string + /** + * A map of metadata to store with the object in S3. + */ + metadata: _TygojaDict + /** + * Specifies whether a legal hold will be applied to this object. For more + * information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + */ + objectLockLegalHoldStatus: types.ObjectLockLegalHoldStatus + /** + * The Object Lock mode that you want to apply to this object. + * + * This functionality is not supported for directory buckets. + */ + objectLockMode: types.ObjectLockMode + /** + * The date and time when you want this object's Object Lock to expire. Must be + * formatted as a timestamp parameter. + * + * This functionality is not supported for directory buckets. + */ + objectLockRetainUntilDate?: time.Time + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Specifies the algorithm to use when encrypting the object (for example, AES256 ). + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use in + * encrypting data. This value is used to store the object and then it is + * discarded; Amazon S3 does not store the encryption key. The key must be + * appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * Specifies the Amazon Web Services KMS Encryption Context as an additional + * encryption context to use for object encryption. The value of this header is a + * Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption + * context as key-value pairs. This value is stored as object metadata and + * automatically gets passed on to Amazon Web Services KMS for future GetObject + * operations on this object. + * + * General purpose buckets - This value must be explicitly added during CopyObject + * operations if you want an additional encryption context for your object. For + * more information, see [Encryption context]in the Amazon S3 User Guide. + * + * Directory buckets - You can optionally provide an explicit encryption context + * value. The value must match the default encryption context - the bucket Amazon + * Resource Name (ARN). An additional encryption context value is not supported. + * + * [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context + */ + ssekmsEncryptionContext?: string + /** + * Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + * encryption. If the KMS key doesn't exist in the same account that's issuing the + * command, you must use the full Key ARN not the Key ID. + * + * General purpose buckets - If you specify x-amz-server-side-encryption with + * aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + * Alias) of the KMS key to use. If you specify + * x-amz-server-side-encryption:aws:kms or + * x-amz-server-side-encryption:aws:kms:dsse , but do not provide + * x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + * Services managed key ( aws/s3 ) to protect the data. + * + * Directory buckets - If you specify x-amz-server-side-encryption with aws:kms , + * you must specify the x-amz-server-side-encryption-aws-kms-key-id header with + * the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key + * to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID + * or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS + * configuration can only support 1 [customer managed key]per directory bucket for the lifetime of the + * bucket. [Amazon Web Services managed key]( aws/s3 ) isn't supported. + * + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm that was used when you store this object + * in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + * + * ``` + * - General purpose buckets - You have four mutually exclusive options to + * protect data using server-side encryption in Amazon S3, depending on how you + * choose to manage the encryption keys. Specifically, the encryption key options + * are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + * DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + * server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You + * can optionally tell Amazon S3 to encrypt data at rest by using server-side + * encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3 + * User Guide. + * + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: server-side encryption with Amazon S3 + * managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + * (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + * the desired encryption configuration and you don't override the bucket default + * encryption in your CreateSession requests or PUT object requests. Then, new + * objects are automatically encrypted with the desired encryption settings. For + * more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + * the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + * ``` + * + * In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + * ``` + * encryption request headers must match the encryption settings that are specified + * in the CreateSession request. You can't override the values of the encryption + * settings ( x-amz-server-side-encryption , + * x-amz-server-side-encryption-aws-kms-key-id , + * x-amz-server-side-encryption-context , and + * x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + * CreateSession request. You don't need to explicitly specify these encryption + * settings values in Zonal endpoint API calls, and Amazon S3 will use the + * encryption settings values from the CreateSession request to protect new + * objects in the directory bucket. + * ``` + * + * When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + * ``` + * session token refreshes automatically to avoid service interruptions when a + * session expires. The CLI or the Amazon Web Services SDKs use the bucket's + * default encryption configuration for the CreateSession request. It's not + * supported to override the encryption settings values in the CreateSession + * request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + * request headers must match the default encryption configuration of the directory + * bucket. + * ``` + * + * [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + * [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + * [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + */ + serverSideEncryption: types.ServerSideEncryption + /** + * By default, Amazon S3 uses the STANDARD Storage Class to store newly created + * objects. The STANDARD storage class provides high durability and high + * availability. Depending on performance needs, you can specify a different + * Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + * + * ``` + * - For directory buckets, only the S3 Express One Zone storage class is + * supported to store newly created objects. + * + * - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + * ``` + * + * [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + */ + storageClass: types.StorageClass + /** + * The tag-set for the object. The tag-set must be encoded as URL Query + * parameters. (For example, "Key1=Value1") + * + * This functionality is not supported for directory buckets. + */ + tagging?: string + /** + * If the bucket is configured as a website, redirects requests for this object to + * another object in the same bucket or to an external URL. Amazon S3 stores the + * value of this header in the object metadata. For information about object + * metadata, see [Object Key and Metadata]in the Amazon S3 User Guide. + * + * In the following example, the request header sets the redirect to an object + * (anotherPage.html) in the same bucket: + * + * ``` + * x-amz-website-redirect-location: /anotherPage.html + * ``` + * + * In the following example, the request header sets the object redirect to + * another website: + * + * ``` + * x-amz-website-redirect-location: http://www.example.com/ + * ``` + * + * For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the + * Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. + * + * [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + * [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + * [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + */ + websiteRedirectLocation?: string + } + type _subPlEPr = noSmithyDocumentSerde + interface PutObjectOutput extends _subPlEPr { + /** + * Indicates whether the uploaded object uses an S3 Bucket Key for server-side + * encryption with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + /** + * Entity tag for the uploaded object. + * + * General purpose buckets - To ensure that data is not corrupted traversing the + * network, for objects where the ETag is the MD5 digest of the object, you can + * calculate the MD5 while putting an object to Amazon S3 and compare the returned + * ETag to the calculated MD5 value. + * + * Directory buckets - The ETag for the object in a directory bucket isn't the MD5 + * digest of the object. + */ + eTag?: string + /** + * If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User + * Guide, the response includes this header. It includes the expiry-date and + * rule-id key-value pairs that provide information about object expiration. The + * value of the rule-id is URL-encoded. + * + * This functionality is not supported for directory buckets. + * + * [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + */ + expiration?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to confirm the encryption + * algorithm that's used. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to provide the round-trip + * message integrity verification of the customer-provided encryption key. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * If present, indicates the Amazon Web Services KMS Encryption Context to use for + * object encryption. The value of this header is a Base64-encoded string of a + * UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + * This value is stored as object metadata and automatically gets passed on to + * Amazon Web Services KMS for future GetObject operations on this object. + */ + ssekmsEncryptionContext?: string + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3. + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Version ID of the object. + * + * If you enable versioning for a bucket, Amazon S3 automatically generates a + * unique version ID for the object being stored. Amazon S3 returns this ID in the + * response. When you enable versioning for a bucket, if Amazon S3 receives + * multiple write requests for the same object simultaneously, it stores all of the + * objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User + * Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]. + * + * This functionality is not supported for directory buckets. + * + * [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html + * [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subGFZDA = noSmithyDocumentSerde + interface PutObjectAclInput extends _subGFZDA { + /** + * The bucket name that contains the object to which you want to attach the ACL. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Key for which the PUT action was initiated. + * + * This member is required. + */ + key?: string + /** + * The canned ACL to apply to the object. For more information, see [Canned ACL]. + * + * [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + */ + acl: types.ObjectCannedACL + /** + * Contains the elements that set the ACL permissions for an object per grantee. + */ + accessControlPolicy?: types.AccessControlPolicy + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The base64-encoded 128-bit MD5 digest of the data. This header must be used as + * a message integrity check to verify that the request body was not corrupted in + * transit. For more information, go to [RFC 1864.>] + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + * + * [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Allows grantee the read, write, read ACP, and write ACP permissions on the + * bucket. + * + * This functionality is not supported for Amazon S3 on Outposts. + */ + grantFullControl?: string + /** + * Allows grantee to list the objects in the bucket. + * + * This functionality is not supported for Amazon S3 on Outposts. + */ + grantRead?: string + /** + * Allows grantee to read the bucket ACL. + * + * This functionality is not supported for Amazon S3 on Outposts. + */ + grantReadACP?: string + /** + * Allows grantee to create new objects in the bucket. + * + * For the bucket and object owners of existing objects, also allows deletions and + * overwrites of those objects. + */ + grantWrite?: string + /** + * Allows grantee to write the ACL for the applicable bucket. + * + * This functionality is not supported for Amazon S3 on Outposts. + */ + grantWriteACP?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Version ID used to reference a specific version of the object. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + } + type _subhYLJz = noSmithyDocumentSerde + interface PutObjectAclOutput extends _subhYLJz { + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subyfpPe = noSmithyDocumentSerde + interface PutObjectLegalHoldInput extends _subyfpPe { + /** + * The bucket name containing the object that you want to place a legal hold on. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The key name for the object that you want to place a legal hold on. + * + * This member is required. + */ + key?: string + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The MD5 hash for the request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Container element for the legal hold configuration you want to apply to the + * specified object. + */ + legalHold?: types.ObjectLockLegalHold + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The version ID of the object that you want to place a legal hold on. + */ + versionId?: string + } + type _subPWMsH = noSmithyDocumentSerde + interface PutObjectLegalHoldOutput extends _subPWMsH { + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subpFViB = noSmithyDocumentSerde + interface PutObjectLockConfigurationInput extends _subpFViB { + /** + * The bucket whose Object Lock configuration you want to create or replace. + * + * This member is required. + */ + bucket?: string + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The MD5 hash for the request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The Object Lock configuration that you want to apply to the specified bucket. + */ + objectLockConfiguration?: types.ObjectLockConfiguration + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * A token to allow Object Lock to be enabled for an existing bucket. + */ + token?: string + } + type _subCfBIC = noSmithyDocumentSerde + interface PutObjectLockConfigurationOutput extends _subCfBIC { + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subQDqpO = noSmithyDocumentSerde + interface PutObjectRetentionInput extends _subQDqpO { + /** + * The bucket name that contains the object you want to apply this Object + * Retention configuration to. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * The key name for the object that you want to apply this Object Retention + * configuration to. + * + * This member is required. + */ + key?: string + /** + * Indicates whether this action should bypass Governance-mode restrictions. + */ + bypassGovernanceRetention?: boolean + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The MD5 hash for the request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The container element for the Object Retention configuration. + */ + retention?: types.ObjectLockRetention + /** + * The version ID for the object that you want to apply this Object Retention + * configuration to. + */ + versionId?: string + } + type _subditGI = noSmithyDocumentSerde + interface PutObjectRetentionOutput extends _subditGI { + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _suboiOeh = noSmithyDocumentSerde + interface PutObjectTaggingInput extends _suboiOeh { + /** + * The bucket name containing the object. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Name of the object key. + * + * This member is required. + */ + key?: string + /** + * Container for the TagSet and Tag elements + * + * This member is required. + */ + tagging?: types.Tagging + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The MD5 hash for the request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * The versionId of the object that the tag-set will be added to. + */ + versionId?: string + } + type _subuVSga = noSmithyDocumentSerde + interface PutObjectTaggingOutput extends _subuVSga { + /** + * The versionId of the object the tag-set was added to. + */ + versionId?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subwMCvj = noSmithyDocumentSerde + interface PutPublicAccessBlockInput extends _subwMCvj { + /** + * The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + * to set. + * + * This member is required. + */ + bucket?: string + /** + * The PublicAccessBlock configuration that you want to apply to this Amazon S3 + * bucket. You can enable the configuration options in any combination. For more + * information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in + * the Amazon S3 User Guide. + * + * [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + * + * This member is required. + */ + publicAccessBlockConfiguration?: types.PublicAccessBlockConfiguration + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The MD5 hash of the PutPublicAccessBlock request body. + * + * For requests made using the Amazon Web Services Command Line Interface (CLI) or + * Amazon Web Services SDKs, this field is calculated automatically. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + } + type _subKeqew = noSmithyDocumentSerde + interface PutPublicAccessBlockOutput extends _subKeqew { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subkObCn = noSmithyDocumentSerde + interface RestoreObjectInput extends _subkObCn { + /** + * The bucket name containing the object to restore. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Object key for which the action was initiated. + * + * This member is required. + */ + key?: string + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Container for restore job parameters. + */ + restoreRequest?: types.RestoreRequest + /** + * VersionId used to reference a specific version of the object. + */ + versionId?: string + } + type _subHBstk = noSmithyDocumentSerde + interface RestoreObjectOutput extends _subHBstk { + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Indicates the path in the provided S3 output location where Select results will + * be restored to. + */ + restoreOutputPath?: string + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + // @ts-ignore + import smithysync = sync + /** + * Learn Amazon S3 Select is no longer available to new customers. Existing + * customers of Amazon S3 Select can continue to use the feature as usual. [Learn more] + * + * Request to filter the contents of an Amazon S3 object based on a simple + * Structured Query Language (SQL) statement. In the request, along with the SQL + * expression, you must specify a data serialization format (JSON or CSV) of the + * object. Amazon S3 uses this to parse object data into records. It returns only + * records that match the specified SQL expression. You must also specify the data + * serialization format for the response. For more information, see [S3Select API Documentation]. + * + * [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + * [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + */ + type _subjzKEZ = noSmithyDocumentSerde + interface SelectObjectContentInput extends _subjzKEZ { + /** + * The S3 bucket. + * + * This member is required. + */ + bucket?: string + /** + * The expression that is used to query the object. + * + * This member is required. + */ + expression?: string + /** + * The type of the provided expression (for example, SQL). + * + * This member is required. + */ + expressionType: types.ExpressionType + /** + * Describes the format of the data in the object that is being queried. + * + * This member is required. + */ + inputSerialization?: types.InputSerialization + /** + * The object key. + * + * This member is required. + */ + key?: string + /** + * Describes the format of the data that you want Amazon S3 to return in response. + * + * This member is required. + */ + outputSerialization?: types.OutputSerialization + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Specifies if periodic request progress information should be enabled. + */ + requestProgress?: types.RequestProgress + /** + * The server-side encryption (SSE) algorithm used to encrypt the object. This + * parameter is needed only when the object was created using a checksum algorithm. + * For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerAlgorithm?: string + /** + * The server-side encryption (SSE) customer managed key. This parameter is needed + * only when the object was created using a checksum algorithm. For more + * information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKey?: string + /** + * The MD5 server-side encryption (SSE) customer managed key. This parameter is + * needed only when the object was created using a checksum algorithm. For more + * information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + * + * [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKeyMD5?: string + /** + * Specifies the byte range of the object to get the records from. A record is + * processed when its first byte is contained by the range. This parameter is + * optional, but when specified, it must not be empty. See RFC 2616, Section + * 14.35.1 about how to specify the start and end of the range. + * + * ScanRange may be used in the following ways: + * + * ``` + * - 50100 - process only the records starting between the bytes 50 and 100 + * (inclusive, counting from zero) + * + * - 50 - process only the records starting after the byte 50 + * + * - 50 - process only the records within the last 50 bytes of the file. + * ``` + */ + scanRange?: types.ScanRange + } + type _subGliMM = noSmithyDocumentSerde + interface SelectObjectContentOutput extends _subGliMM { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + interface SelectObjectContentOutput { + /** + * GetStream returns the type to interact with the event stream. + */ + getStream(): (SelectObjectContentEventStream) + } + type _subXWhvG = noSmithyDocumentSerde + interface UploadPartInput extends _subXWhvG { + /** + * The name of the bucket to which the multipart upload was initiated. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Object key for which the multipart upload was initiated. + * + * This member is required. + */ + key?: string + /** + * Part number of part being uploaded. This is a positive integer between 1 and + * 10,000. + * + * This member is required. + */ + partNumber?: number + /** + * Upload ID identifying the multipart upload whose part is being uploaded. + * + * This member is required. + */ + uploadId?: string + /** + * Object data. + */ + body: io.Reader + /** + * Indicates the algorithm used to create the checksum for the object when you use + * the SDK. This header will not provide any additional functionality if you don't + * use the SDK. When you send this header, there must be a corresponding + * x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + * request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * If you provide an individual checksum, Amazon S3 ignores any provided + * ChecksumAlgorithm parameter. + * + * This checksum algorithm must be the same for all parts and it match the + * checksum value supplied in the CreateMultipartUpload request. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumAlgorithm: types.ChecksumAlgorithm + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see + * [Checking object integrity]in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32C?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA1?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA256?: string + /** + * Size of the body in bytes. This parameter is useful when the size of the body + * cannot be determined automatically. + */ + contentLength?: number + /** + * The base64-encoded 128-bit MD5 digest of the part data. This parameter is + * auto-populated when using the command from the CLI. This parameter is required + * if object lock parameters are specified. + * + * This functionality is not supported for directory buckets. + */ + contentMD5?: string + /** + * The account ID of the expected bucket owner. If the account ID that you provide + * does not match the actual owner of the bucket, the request fails with the HTTP + * status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Specifies the algorithm to use when encrypting the object (for example, AES256). + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use in + * encrypting data. This value is used to store the object and then it is + * discarded; Amazon S3 does not store the encryption key. The key must be + * appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header . This must be the same + * encryption key specified in the initiate multipart upload request. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + } + type _subuZGom = noSmithyDocumentSerde + interface UploadPartOutput extends _subuZGom { + /** + * Indicates whether the multipart upload uses an S3 Bucket Key for server-side + * encryption with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + /** + * Entity tag for the uploaded object. + */ + eTag?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to confirm the encryption + * algorithm that's used. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to provide the round-trip + * message integrity verification of the customer-provided encryption key. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3 (for example, AES256 , aws:kms ). + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subIrsiu = noSmithyDocumentSerde + interface UploadPartCopyInput extends _subIrsiu { + /** + * The bucket name. + * + * Directory buckets - When you use this operation with a directory bucket, you + * must use virtual-hosted-style requests in the format + * Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + * supported. Directory bucket names must be unique in the chosen Availability + * Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + * example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + * naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + * + * Access points - When you use this action with an access point, you must provide + * the alias of the access point in place of the bucket name or specify the access + * point ARN. When using the access point ARN, you must direct requests to the + * access point hostname. The access point hostname takes the form + * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + * action with an access point through the Amazon Web Services SDKs, you provide + * the access point ARN in place of the bucket name. For more information about + * access point ARNs, see [Using access points]in the Amazon S3 User Guide. + * + * Access points and Object Lambda access points are not supported by directory + * buckets. + * + * S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + * direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + * takes the form + * AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + * use this action with S3 on Outposts through the Amazon Web Services SDKs, you + * provide the Outposts access point ARN in place of the bucket name. For more + * information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + * + * [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + * [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + * [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + * + * This member is required. + */ + bucket?: string + /** + * Specifies the source object for the copy operation. You specify the value in + * one of two formats, depending on whether you want to access the source object + * through an [access point]: + * + * ``` + * - For objects not accessed through an access point, specify the name of the + * source bucket and key of the source object, separated by a slash (/). For + * example, to copy the object reports/january.pdf from the bucket + * awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must + * be URL-encoded. + * + * - For objects accessed through access points, specify the Amazon Resource + * Name (ARN) of the object as accessed through the access point, in the format + * arn:aws:s3:::accesspoint//object/ . For example, to copy the object + * reports/january.pdf through access point my-access-point owned by account + * 123456789012 in Region us-west-2 , use the URL encoding of + * arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf + * . The value must be URL encoded. + * + * - Amazon S3 supports copy operations using Access points only when the source + * and destination buckets are in the same Amazon Web Services Region. + * + * - Access points are not supported by directory buckets. + * ``` + * + * Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + * ``` + * ARN of the object as accessed in the format + * arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + * reports/january.pdf through outpost my-outpost owned by account 123456789012 + * in Region us-west-2 , use the URL encoding of + * arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf + * . The value must be URL-encoded. + * ``` + * + * If your bucket has versioning enabled, you could have multiple versions of the + * same object. By default, x-amz-copy-source identifies the current version of + * the source object to copy. To copy a specific version of the source object to + * copy, append ?versionId= to the x-amz-copy-source request header (for example, + * x-amz-copy-source: + * /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 + * ). + * + * If the current version is a delete marker and you don't specify a versionId in + * the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, + * because the object does not exist. If you specify versionId in the + * x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an + * HTTP 400 Bad Request error, because you are not allowed to specify a delete + * marker as a version for the x-amz-copy-source . + * + * Directory buckets - S3 Versioning isn't enabled and supported for directory + * buckets. + * + * [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + * + * This member is required. + */ + copySource?: string + /** + * Object key for which the multipart upload was initiated. + * + * This member is required. + */ + key?: string + /** + * Part number of part being copied. This is a positive integer between 1 and + * 10,000. + * + * This member is required. + */ + partNumber?: number + /** + * Upload ID identifying the multipart upload whose part is being copied. + * + * This member is required. + */ + uploadId?: string + /** + * Copies the object if its entity tag (ETag) matches the specified tag. + * + * If both of the x-amz-copy-source-if-match and + * x-amz-copy-source-if-unmodified-since headers are present in the request as + * follows: + * + * x-amz-copy-source-if-match condition evaluates to true , and; + * + * x-amz-copy-source-if-unmodified-since condition evaluates to false ; + * + * Amazon S3 returns 200 OK and copies the data. + */ + copySourceIfMatch?: string + /** + * Copies the object if it has been modified since the specified time. + * + * If both of the x-amz-copy-source-if-none-match and + * x-amz-copy-source-if-modified-since headers are present in the request as + * follows: + * + * x-amz-copy-source-if-none-match condition evaluates to false , and; + * + * x-amz-copy-source-if-modified-since condition evaluates to true ; + * + * Amazon S3 returns 412 Precondition Failed response code. + */ + copySourceIfModifiedSince?: time.Time + /** + * Copies the object if its entity tag (ETag) is different than the specified ETag. + * + * If both of the x-amz-copy-source-if-none-match and + * x-amz-copy-source-if-modified-since headers are present in the request as + * follows: + * + * x-amz-copy-source-if-none-match condition evaluates to false , and; + * + * x-amz-copy-source-if-modified-since condition evaluates to true ; + * + * Amazon S3 returns 412 Precondition Failed response code. + */ + copySourceIfNoneMatch?: string + /** + * Copies the object if it hasn't been modified since the specified time. + * + * If both of the x-amz-copy-source-if-match and + * x-amz-copy-source-if-unmodified-since headers are present in the request as + * follows: + * + * x-amz-copy-source-if-match condition evaluates to true , and; + * + * x-amz-copy-source-if-unmodified-since condition evaluates to false ; + * + * Amazon S3 returns 200 OK and copies the data. + */ + copySourceIfUnmodifiedSince?: time.Time + /** + * The range of bytes to copy from the source object. The range value must use the + * form bytes=first-last, where the first and last are the zero-based byte offsets + * to copy. For example, bytes=0-9 indicates that you want to copy the first 10 + * bytes of the source. You can copy a range only if the source object is greater + * than 5 MB. + */ + copySourceRange?: string + /** + * Specifies the algorithm to use when decrypting the source object (for example, + * AES256 ). + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceSSECustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + * the source object. The encryption key provided in this header must be one that + * was used when the source object was created. + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceSSECustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceSSECustomerKeyMD5?: string + /** + * The account ID of the expected destination bucket owner. If the account ID that + * you provide does not match the actual owner of the destination bucket, the + * request fails with the HTTP status code 403 Forbidden (access denied). + */ + expectedBucketOwner?: string + /** + * The account ID of the expected source bucket owner. If the account ID that you + * provide does not match the actual owner of the source bucket, the request fails + * with the HTTP status code 403 Forbidden (access denied). + */ + expectedSourceBucketOwner?: string + /** + * Confirms that the requester knows that they will be charged for the request. + * Bucket owners need not specify this parameter in their requests. If either the + * source or destination S3 bucket has Requester Pays enabled, the requester will + * pay for corresponding charges to copy the object. For information about + * downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + * Guide. + * + * This functionality is not supported for directory buckets. + * + * [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + */ + requestPayer: types.RequestPayer + /** + * Specifies the algorithm to use when encrypting the object (for example, AES256). + * + * This functionality is not supported when the destination bucket is a directory + * bucket. + */ + sseCustomerAlgorithm?: string + /** + * Specifies the customer-provided encryption key for Amazon S3 to use in + * encrypting data. This value is used to store the object and then it is + * discarded; Amazon S3 does not store the encryption key. The key must be + * appropriate for use with the algorithm specified in the + * x-amz-server-side-encryption-customer-algorithm header. This must be the same + * encryption key specified in the initiate multipart upload request. + * + * This functionality is not supported when the destination bucket is a directory + * bucket. + */ + sseCustomerKey?: string + /** + * Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + * Amazon S3 uses this header for a message integrity check to ensure that the + * encryption key was transmitted without error. + * + * This functionality is not supported when the destination bucket is a directory + * bucket. + */ + sseCustomerKeyMD5?: string + } + type _subCjJBx = noSmithyDocumentSerde + interface UploadPartCopyOutput extends _subCjJBx { + /** + * Indicates whether the multipart upload uses an S3 Bucket Key for server-side + * encryption with Key Management Service (KMS) keys (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * Container for all response elements. + */ + copyPartResult?: types.CopyPartResult + /** + * The version of the source object that was copied, if you have enabled + * versioning on the source bucket. + * + * This functionality is not supported when the source object is in a directory + * bucket. + */ + copySourceVersionId?: string + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to confirm the encryption + * algorithm that's used. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerAlgorithm?: string + /** + * If server-side encryption with a customer-provided encryption key was + * requested, the response will include this header to provide the round-trip + * message integrity verification of the customer-provided encryption key. + * + * This functionality is not supported for directory buckets. + */ + sseCustomerKeyMD5?: string + /** + * If present, indicates the ID of the KMS key that was used for object encryption. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when you store this object in Amazon + * S3 (for example, AES256 , aws:kms ). + */ + serverSideEncryption: types.ServerSideEncryption + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + type _subPezLW = noSmithyDocumentSerde + interface WriteGetObjectResponseInput extends _subPezLW { + /** + * Route prefix to the HTTP URL generated. + * + * This member is required. + */ + requestRoute?: string + /** + * A single use encrypted token that maps WriteGetObjectResponse to the end user + * GetObject request. + * + * This member is required. + */ + requestToken?: string + /** + * Indicates that a range of bytes was specified. + */ + acceptRanges?: string + /** + * The object data. + */ + body: io.Reader + /** + * Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + * server-side encryption with Amazon Web Services KMS (SSE-KMS). + */ + bucketKeyEnabled?: boolean + /** + * Specifies caching behavior along the request/reply chain. + */ + cacheControl?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This specifies the + * base64-encoded, 32-bit CRC-32 checksum of the object returned by the Object + * Lambda function. This may not match the checksum for the object stored in Amazon + * S3. Amazon S3 will perform validation of the checksum values only when the + * original GetObject request required checksum validation. For more information + * about checksums, see [Checking object integrity]in the Amazon S3 User Guide. + * + * Only one checksum header can be specified at a time. If you supply multiple + * checksum headers, this request will fail. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This specifies the + * base64-encoded, 32-bit CRC-32C checksum of the object returned by the Object + * Lambda function. This may not match the checksum for the object stored in Amazon + * S3. Amazon S3 will perform validation of the checksum values only when the + * original GetObject request required checksum validation. For more information + * about checksums, see [Checking object integrity]in the Amazon S3 User Guide. + * + * Only one checksum header can be specified at a time. If you supply multiple + * checksum headers, this request will fail. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32C?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This specifies the + * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda + * function. This may not match the checksum for the object stored in Amazon S3. + * Amazon S3 will perform validation of the checksum values only when the original + * GetObject request required checksum validation. For more information about + * checksums, see [Checking object integrity]in the Amazon S3 User Guide. + * + * Only one checksum header can be specified at a time. If you supply multiple + * checksum headers, this request will fail. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA1?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This specifies the + * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object + * Lambda function. This may not match the checksum for the object stored in Amazon + * S3. Amazon S3 will perform validation of the checksum values only when the + * original GetObject request required checksum validation. For more information + * about checksums, see [Checking object integrity]in the Amazon S3 User Guide. + * + * Only one checksum header can be specified at a time. If you supply multiple + * checksum headers, this request will fail. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA256?: string + /** + * Specifies presentational information for the object. + */ + contentDisposition?: string + /** + * Specifies what content encodings have been applied to the object and thus what + * decoding mechanisms must be applied to obtain the media-type referenced by the + * Content-Type header field. + */ + contentEncoding?: string + /** + * The language the content is in. + */ + contentLanguage?: string + /** + * The size of the content body in bytes. + */ + contentLength?: number + /** + * The portion of the object returned in the response. + */ + contentRange?: string + /** + * A standard MIME type describing the format of the object data. + */ + contentType?: string + /** + * Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) + * a delete marker. + */ + deleteMarker?: boolean + /** + * An opaque identifier assigned by a web server to a specific version of a + * resource found at a URL. + */ + eTag?: string + /** + * A string that uniquely identifies an error condition. Returned in the tag of + * the error XML response for a corresponding GetObject call. Cannot be used with + * a successful StatusCode header or when the transformed object is provided in + * the body. All error codes from S3 are sentence-cased. The regular expression + * (regex) value is "^[A-Z][a-zA-Z]+$" . + */ + errorCode?: string + /** + * Contains a generic description of the error condition. Returned in the tag of + * the error XML response for a corresponding GetObject call. Cannot be used with + * a successful StatusCode header or when the transformed object is provided in + * body. + */ + errorMessage?: string + /** + * If the object expiration is configured (see PUT Bucket lifecycle), the response + * includes this header. It includes the expiry-date and rule-id key-value pairs + * that provide the object expiration information. The value of the rule-id is + * URL-encoded. + */ + expiration?: string + /** + * The date and time at which the object is no longer cacheable. + */ + expires?: time.Time + /** + * The date and time that the object was last modified. + */ + lastModified?: time.Time + /** + * A map of metadata to store with the object in S3. + */ + metadata: _TygojaDict + /** + * Set to the number of metadata entries not returned in x-amz-meta headers. This + * can happen if you create metadata using an API like SOAP that supports more + * flexible metadata than the REST API. For example, using SOAP, you can create + * metadata whose values are not legal HTTP headers. + */ + missingMeta?: number + /** + * Indicates whether an object stored in Amazon S3 has an active legal hold. + */ + objectLockLegalHoldStatus: types.ObjectLockLegalHoldStatus + /** + * Indicates whether an object stored in Amazon S3 has Object Lock enabled. For + * more information about S3 Object Lock, see [Object Lock]. + * + * [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html + */ + objectLockMode: types.ObjectLockMode + /** + * The date and time when Object Lock is configured to expire. + */ + objectLockRetainUntilDate?: time.Time + /** + * The count of parts this object has. + */ + partsCount?: number + /** + * Indicates if request involves bucket that is either a source or destination in + * a Replication rule. For more information about S3 Replication, see [Replication]. + * + * [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html + */ + replicationStatus: types.ReplicationStatus + /** + * If present, indicates that the requester was successfully charged for the + * request. + * + * This functionality is not supported for directory buckets. + */ + requestCharged: types.RequestCharged + /** + * Provides information about object restoration operation and expiration time of + * the restored object copy. + */ + restore?: string + /** + * Encryption algorithm used if server-side encryption with a customer-provided + * encryption key was specified for object stored in Amazon S3. + */ + sseCustomerAlgorithm?: string + /** + * 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + * encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]. + * + * [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html + */ + sseCustomerKeyMD5?: string + /** + * If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + * Services Key Management Service (Amazon Web Services KMS) symmetric encryption + * customer managed key that was used for stored in Amazon S3 object. + */ + ssekmsKeyId?: string + /** + * The server-side encryption algorithm used when storing requested object in + * Amazon S3 (for example, AES256, aws:kms ). + */ + serverSideEncryption: types.ServerSideEncryption + /** + * The integer status code for an HTTP response of a corresponding GetObject + * request. The following is a list of status codes. + * + * ``` + * - 200 - OK + * + * - 206 - Partial Content + * + * - 304 - Not Modified + * + * - 400 - Bad Request + * + * - 401 - Unauthorized + * + * - 403 - Forbidden + * + * - 404 - Not Found + * + * - 405 - Method Not Allowed + * + * - 409 - Conflict + * + * - 411 - Length Required + * + * - 412 - Precondition Failed + * + * - 416 - Range Not Satisfiable + * + * - 500 - Internal Server Error + * + * - 503 - Service Unavailable + * ``` + */ + statusCode?: number + /** + * Provides storage class information of the object. Amazon S3 returns this header + * for all objects except for S3 Standard storage class objects. + * + * For more information, see [Storage Classes]. + * + * [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + */ + storageClass: types.StorageClass + /** + * The number of tags, if any, on the object. + */ + tagCount?: number + /** + * An ID used to reference a specific version of the object. + */ + versionId?: string + } + type _subqFpGs = noSmithyDocumentSerde + interface WriteGetObjectResponseOutput extends _subqFpGs { + /** + * Metadata pertaining to the operation's result. + */ + resultMetadata: middleware.Metadata + } + // @ts-ignore + import internalcontext = context + // @ts-ignore + import awsxml = xml + // @ts-ignore + import internalendpoints = endpoints + // @ts-ignore + import smithyendpoints = endpoints + interface Options { + /** + * Set of options to modify how an operation is invoked. These apply to all + * operations invoked for this client. Use functional options on operation call to + * modify this list for per operation behavior. + */ + apiOptions: Array<(_arg0: middleware.Stack) => void> + /** + * The optional application specific identifier appended to the User-Agent header. + */ + appID: string + /** + * This endpoint will be given as input to an EndpointResolverV2. It is used for + * providing a custom base endpoint that is subject to modifications by the + * processing EndpointResolverV2. + */ + baseEndpoint?: string + /** + * Configures the events that will be sent to the configured logger. + */ + clientLogMode: aws.ClientLogMode + /** + * The threshold ContentLength in bytes for HTTP PUT request to receive {Expect: + * 100-continue} header. Setting to -1 will disable adding the Expect header to + * requests; setting to 0 will set the threshold to default 2MB + */ + continueHeaderThresholdBytes: number + /** + * The credentials object to use when signing requests. + */ + credentials: aws.CredentialsProvider + /** + * The configuration DefaultsMode that the SDK should use when constructing the + * clients initial default settings. + */ + defaultsMode: aws.DefaultsMode + /** + * Allows you to disable S3 Multi-Region access points feature. + */ + disableMultiRegionAccessPoints: boolean + /** + * Disables this client's usage of Session Auth for S3Express buckets and reverts + * to using conventional SigV4 for those. + */ + disableS3ExpressSessionAuth?: boolean + /** + * The endpoint options to be used when attempting to resolve an endpoint. + */ + endpointOptions: EndpointResolverOptions + /** + * The service endpoint resolver. + * + * Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + * value for this field will likely prevent you from using any endpoint-related + * service features released after the introduction of EndpointResolverV2 and + * BaseEndpoint. + * + * To migrate an EndpointResolver implementation that uses a custom endpoint, set + * the client option BaseEndpoint instead. + */ + endpointResolver: EndpointResolver + /** + * Resolves the endpoint used for a particular service operation. This should be + * used over the deprecated EndpointResolver. + */ + endpointResolverV2: EndpointResolverV2 + /** + * The credentials provider for S3Express requests. + */ + expressCredentials: ExpressCredentialsProvider + /** + * Signature Version 4 (SigV4) Signer + */ + httpSignerV4: HTTPSignerV4 + /** + * The logger writer interface to write logging messages to. + */ + logger: logging.Logger + /** + * The client meter provider. + */ + meterProvider: metrics.MeterProvider + /** + * The region to send requests to. (Required) + */ + region: string + /** + * RetryMaxAttempts specifies the maximum number attempts an API client will call + * an operation that fails with a retryable error. A value of 0 is ignored, and + * will not be used to configure the API client created default retryer, or modify + * per operation call's retry max attempts. + * + * If specified in an operation call's functional options with a value that is + * different than the constructed client's Options, the Client's Retryer will be + * wrapped to use the operation's specific RetryMaxAttempts value. + */ + retryMaxAttempts: number + /** + * RetryMode specifies the retry mode the API client will be created with, if + * Retryer option is not also specified. + * + * When creating a new API Clients this member will only be used if the Retryer + * Options member is nil. This value will be ignored if Retryer is not nil. + * + * Currently does not support per operation call overrides, may in the future. + */ + retryMode: aws.RetryMode + /** + * Retryer guides how HTTP requests should be retried in case of recoverable + * failures. When nil the API client will use a default retryer. The kind of + * default retry created by the API client can be changed with the RetryMode + * option. + */ + retryer: aws.Retryer + /** + * The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + * to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + * should not populate this structure programmatically, or rely on the values here + * within your applications. + */ + runtimeEnvironment: aws.RuntimeEnvironment + /** + * The client tracer provider. + */ + tracerProvider: tracing.TracerProvider + /** + * Allows you to enable arn region support for the service. + */ + useARNRegion: boolean + /** + * Allows you to enable S3 Accelerate feature. All operations compatible with S3 + * Accelerate will use the accelerate endpoint for requests. Requests not + * compatible will fall back to normal S3 requests. The bucket must be enabled for + * accelerate to be used with S3 client with accelerate enabled. If the bucket is + * not enabled for accelerate an error will be returned. The bucket name must be + * DNS compatible to work with accelerate. + */ + useAccelerate: boolean + /** + * Allows you to enable dual-stack endpoint support for the service. + * + * Deprecated: Set dual-stack by setting UseDualStackEndpoint on + * EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint + * field is set it overrides this field value. + */ + useDualstack: boolean + /** + * Allows you to enable the client to use path-style addressing, i.e., + * https://s3.amazonaws.com/BUCKET/KEY . By default, the S3 client will use virtual + * hosted bucket addressing when possible( https://BUCKET.s3.amazonaws.com/KEY ). + */ + usePathStyle: boolean + /** + * The HTTP client to invoke API calls with. Defaults to client's default HTTP + * implementation if nil. + */ + httpClient: HTTPClient + /** + * The auth scheme resolver which determines how to authenticate for each + * operation. + */ + authSchemeResolver: AuthSchemeResolver + /** + * The list of auth schemes supported by the client. + */ + authSchemes: Array + } + interface Options { + /** + * Copy creates a clone where the APIOptions list is deep copied. + */ + copy(): Options + } + interface Options { + getIdentityResolver(schemeID: string): smithyauth.IdentityResolver + } + // @ts-ignore + import v4 = signer } /** @@ -16446,8 +34146,8 @@ namespace models { */ validate(): void } - type _subWyIIM = BaseModel - interface Log extends _subWyIIM { + type _subOHEED = BaseModel + interface Log extends _subOHEED { data: types.JsonMap message: string level: number @@ -16455,8 +34155,8 @@ namespace models { interface Log { tableName(): string } - type _subXUePJ = BaseModel - interface Param extends _subXUePJ { + type _subpuOmJ = BaseModel + interface Param extends _subpuOmJ { key: string value: types.JsonRaw } @@ -16985,6 +34685,14 @@ namespace oauth2 { * mechanisms for that TokenSource will not be used. */ expiry: time.Time + /** + * ExpiresIn is the OAuth2 wire format "expires_in" field, + * which specifies how many seconds later the token expires, + * relative to an unknown time base approximately around "now". + * It is the application's responsibility to populate + * `Expiry` from `ExpiresIn` when required. + */ + expiresIn: number } interface Token { /** @@ -17209,87 +34917,6 @@ namespace daos { import validation = ozzo_validation } -namespace hook { - /** - * Hook defines a concurrent safe structure for handling event hooks - * (aka. callbacks propagation). - */ - interface Hook { - } - interface Hook { - /** - * PreAdd registers a new handler to the hook by prepending it to the existing queue. - * - * Returns an autogenerated hook id that could be used later to remove the hook with Hook.Remove(id). - */ - preAdd(fn: Handler): string - } - interface Hook { - /** - * Add registers a new handler to the hook by appending it to the existing queue. - * - * Returns an autogenerated hook id that could be used later to remove the hook with Hook.Remove(id). - */ - add(fn: Handler): string - } - interface Hook { - /** - * Remove removes a single hook handler by its id. - */ - remove(id: string): void - } - interface Hook { - /** - * RemoveAll removes all registered handlers. - */ - removeAll(): void - } - interface Hook { - /** - * Trigger executes all registered hook handlers one by one - * with the specified `data` as an argument. - * - * Optionally, this method allows also to register additional one off - * handlers that will be temporary appended to the handlers queue. - * - * The execution stops when: - * - hook.StopPropagation is returned in one of the handlers - * - any non-nil error is returned in one of the handlers - */ - trigger(data: T, ...oneOffHandlers: Handler[]): void - } - /** - * TaggedHook defines a proxy hook which register handlers that are triggered only - * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). - */ - type _subhfFNE = mainHook - interface TaggedHook extends _subhfFNE { - } - interface TaggedHook { - /** - * CanTriggerOn checks if the current TaggedHook can be triggered with - * the provided event data tags. - */ - canTriggerOn(tags: Array): boolean - } - interface TaggedHook { - /** - * PreAdd registers a new handler to the hook by prepending it to the existing queue. - * - * The fn handler will be called only if the event data tags satisfy h.CanTriggerOn. - */ - preAdd(fn: Handler): string - } - interface TaggedHook { - /** - * Add registers a new handler to the hook by appending it to the existing queue. - * - * The fn handler will be called only if the event data tags satisfy h.CanTriggerOn. - */ - add(fn: Handler): string - } -} - /** * Package slog provides structured logging, * in which log records include a message, @@ -17654,8 +35281,10 @@ namespace hook { * Now computeExpensiveValue will only be called when the line is enabled. * * The built-in handlers acquire a lock before calling [io.Writer.Write] - * to ensure that each record is written in one piece. User-defined - * handlers are responsible for their own locking. + * to ensure that exactly one [Record] is written at a time in its entirety. + * Although each log record has a timestamp, + * the built-in handlers do not use that time to sort the written records. + * User-defined handlers are responsible for their own locking and sorting. * * # Writing a handler * @@ -17838,12 +35467,12 @@ namespace core { httpContext: echo.Context error: Error } - type _subIqXTj = BaseModelEvent - interface ModelEvent extends _subIqXTj { + type _subyaEim = BaseModelEvent + interface ModelEvent extends _subyaEim { dao?: daos.Dao } - type _subYBsYP = BaseCollectionEvent - interface MailerRecordEvent extends _subYBsYP { + type _subskeUa = BaseCollectionEvent + interface MailerRecordEvent extends _subskeUa { mailClient: mailer.Mailer message?: mailer.Message record?: models.Record @@ -17883,50 +35512,50 @@ namespace core { oldSettings?: settings.Settings newSettings?: settings.Settings } - type _subLkmRK = BaseCollectionEvent - interface RecordsListEvent extends _subLkmRK { + type _subwpEYD = BaseCollectionEvent + interface RecordsListEvent extends _subwpEYD { httpContext: echo.Context records: Array<(models.Record | undefined)> result?: search.Result } - type _subrYldn = BaseCollectionEvent - interface RecordViewEvent extends _subrYldn { + type _subtaOqT = BaseCollectionEvent + interface RecordViewEvent extends _subtaOqT { httpContext: echo.Context record?: models.Record } - type _subShsfQ = BaseCollectionEvent - interface RecordCreateEvent extends _subShsfQ { + type _subxKFWr = BaseCollectionEvent + interface RecordCreateEvent extends _subxKFWr { httpContext: echo.Context record?: models.Record uploadedFiles: _TygojaDict } - type _subBQgiv = BaseCollectionEvent - interface RecordUpdateEvent extends _subBQgiv { + type _subciQwE = BaseCollectionEvent + interface RecordUpdateEvent extends _subciQwE { httpContext: echo.Context record?: models.Record uploadedFiles: _TygojaDict } - type _subwKNJA = BaseCollectionEvent - interface RecordDeleteEvent extends _subwKNJA { + type _sublAvhJ = BaseCollectionEvent + interface RecordDeleteEvent extends _sublAvhJ { httpContext: echo.Context record?: models.Record } - type _subNVvZz = BaseCollectionEvent - interface RecordAuthEvent extends _subNVvZz { + type _subxEibi = BaseCollectionEvent + interface RecordAuthEvent extends _subxEibi { httpContext: echo.Context record?: models.Record token: string meta: any } - type _subxBJeP = BaseCollectionEvent - interface RecordAuthWithPasswordEvent extends _subxBJeP { + type _subVKrAt = BaseCollectionEvent + interface RecordAuthWithPasswordEvent extends _subVKrAt { httpContext: echo.Context record?: models.Record identity: string password: string } - type _subUSiim = BaseCollectionEvent - interface RecordAuthWithOAuth2Event extends _subUSiim { + type _subZBmyR = BaseCollectionEvent + interface RecordAuthWithOAuth2Event extends _subZBmyR { httpContext: echo.Context providerName: string providerClient: auth.Provider @@ -17934,49 +35563,49 @@ namespace core { oAuth2User?: auth.AuthUser isNewRecord: boolean } - type _subdXIjq = BaseCollectionEvent - interface RecordAuthRefreshEvent extends _subdXIjq { + type _subhzcxC = BaseCollectionEvent + interface RecordAuthRefreshEvent extends _subhzcxC { httpContext: echo.Context record?: models.Record } - type _subvFage = BaseCollectionEvent - interface RecordRequestPasswordResetEvent extends _subvFage { + type _subiLmJt = BaseCollectionEvent + interface RecordRequestPasswordResetEvent extends _subiLmJt { httpContext: echo.Context record?: models.Record } - type _subojVuh = BaseCollectionEvent - interface RecordConfirmPasswordResetEvent extends _subojVuh { + type _subGFxjK = BaseCollectionEvent + interface RecordConfirmPasswordResetEvent extends _subGFxjK { httpContext: echo.Context record?: models.Record } - type _subhRgnz = BaseCollectionEvent - interface RecordRequestVerificationEvent extends _subhRgnz { + type _subzficU = BaseCollectionEvent + interface RecordRequestVerificationEvent extends _subzficU { httpContext: echo.Context record?: models.Record } - type _subOEyNW = BaseCollectionEvent - interface RecordConfirmVerificationEvent extends _subOEyNW { + type _subohIST = BaseCollectionEvent + interface RecordConfirmVerificationEvent extends _subohIST { httpContext: echo.Context record?: models.Record } - type _subJiOHg = BaseCollectionEvent - interface RecordRequestEmailChangeEvent extends _subJiOHg { + type _subCykoA = BaseCollectionEvent + interface RecordRequestEmailChangeEvent extends _subCykoA { httpContext: echo.Context record?: models.Record } - type _suboVOGS = BaseCollectionEvent - interface RecordConfirmEmailChangeEvent extends _suboVOGS { + type _subnopwl = BaseCollectionEvent + interface RecordConfirmEmailChangeEvent extends _subnopwl { httpContext: echo.Context record?: models.Record } - type _subQMnYt = BaseCollectionEvent - interface RecordListExternalAuthsEvent extends _subQMnYt { + type _subFAnbA = BaseCollectionEvent + interface RecordListExternalAuthsEvent extends _subFAnbA { httpContext: echo.Context record?: models.Record externalAuths: Array<(models.ExternalAuth | undefined)> } - type _subAmSHR = BaseCollectionEvent - interface RecordUnlinkExternalAuthEvent extends _subAmSHR { + type _subfYBlb = BaseCollectionEvent + interface RecordUnlinkExternalAuthEvent extends _subfYBlb { httpContext: echo.Context record?: models.Record externalAuth?: models.ExternalAuth @@ -18030,33 +35659,33 @@ namespace core { collections: Array<(models.Collection | undefined)> result?: search.Result } - type _subsmzBX = BaseCollectionEvent - interface CollectionViewEvent extends _subsmzBX { + type _subjPeMq = BaseCollectionEvent + interface CollectionViewEvent extends _subjPeMq { httpContext: echo.Context } - type _subErkii = BaseCollectionEvent - interface CollectionCreateEvent extends _subErkii { + type _subXjljn = BaseCollectionEvent + interface CollectionCreateEvent extends _subXjljn { httpContext: echo.Context } - type _subwhxGp = BaseCollectionEvent - interface CollectionUpdateEvent extends _subwhxGp { + type _subnVfEX = BaseCollectionEvent + interface CollectionUpdateEvent extends _subnVfEX { httpContext: echo.Context } - type _subLGwgi = BaseCollectionEvent - interface CollectionDeleteEvent extends _subLGwgi { + type _subMtvMx = BaseCollectionEvent + interface CollectionDeleteEvent extends _subMtvMx { httpContext: echo.Context } interface CollectionsImportEvent { httpContext: echo.Context collections: Array<(models.Collection | undefined)> } - type _subJwbPo = BaseModelEvent - interface FileTokenEvent extends _subJwbPo { + type _subzChAu = BaseModelEvent + interface FileTokenEvent extends _subzChAu { httpContext: echo.Context token: string } - type _subcppDF = BaseCollectionEvent - interface FileDownloadEvent extends _subcppDF { + type _subKnVrU = BaseCollectionEvent + interface FileDownloadEvent extends _subKnVrU { httpContext: echo.Context record?: models.Record fileField?: schema.SchemaField @@ -18114,44 +35743,18 @@ namespace cobra { } } -namespace migrate { - interface Migration { - file: string - up: (db: dbx.Builder) => void - down: (db: dbx.Builder) => void - } -} - /** - * Package url parses URLs and implements query escaping. + * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer + * object, creating another object (Reader or Writer) that also implements + * the interface but provides buffering and some help for textual I/O. */ -namespace url { +namespace bufio { /** - * The Userinfo type is an immutable encapsulation of username and - * password details for a [URL]. An existing Userinfo value is guaranteed - * to have a username set (potentially empty, as allowed by RFC 2396), - * and optionally a password. + * ReadWriter stores pointers to a [Reader] and a [Writer]. + * It implements [io.ReadWriter]. */ - interface Userinfo { - } - interface Userinfo { - /** - * Username returns the username. - */ - username(): string - } - interface Userinfo { - /** - * Password returns the password in case it is set, and whether it is set. - */ - password(): [string, boolean] - } - interface Userinfo { - /** - * String returns the encoded userinfo information in the standard form - * of "username[:password]". - */ - string(): string + type _subNCmDN = Reader&Writer + interface ReadWriter extends _subNCmDN { } } @@ -18203,16 +35806,19 @@ namespace url { * listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C * library routines such as getaddrinfo and getnameinfo. * - * By default the pure Go resolver is used, because a blocked DNS request consumes - * only a goroutine, while a blocked C call consumes an operating system thread. + * On Unix the pure Go resolver is preferred over the cgo resolver, because a blocked DNS + * request consumes only a goroutine, while a blocked C call consumes an operating system thread. * When cgo is available, the cgo-based resolver is used instead under a variety of * conditions: on systems that do not let programs make direct DNS requests (OS X), * when the LOCALDOMAIN environment variable is present (even if empty), * when the RES_OPTIONS or HOSTALIASES environment variable is non-empty, * when the ASR_CONFIG environment variable is non-empty (OpenBSD only), * when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the - * Go resolver does not implement, and when the name being looked up ends in .local - * or is an mDNS name. + * Go resolver does not implement. + * + * On all systems (except Plan 9), when the cgo resolver is being used + * this package applies a concurrent cgo lookup limit to prevent the system + * from running out of system threads. Currently, it is limited to 500 concurrent lookups. * * The resolver decision can be overridden by setting the netdns value of the * GODEBUG environment variable (see package runtime) to go or cgo, as in: @@ -18230,6 +35836,12 @@ namespace url { * To force a particular resolver while also printing debugging information, * join the two settings by a plus sign, as in GODEBUG=netdns=go+1. * + * The Go resolver will send an EDNS0 additional header with a DNS request, + * to signal a willingness to accept a larger DNS packet size. + * This can reportedly cause sporadic failures with the DNS server run + * by some modems and routers. Setting GODEBUG=netedns0=0 will disable + * sending the additional header. + * * On macOS, if Go code that uses the net package is built with * -buildmode=c-archive, linking the resulting archive into a C program * requires passing -lresolv when linking the C code. @@ -18254,63 +35866,6 @@ namespace net { } } -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { - /** - * JsonRaw defines a json value type that is safe for db read/write. - */ - interface JsonRaw extends Array{} - interface JsonRaw { - /** - * String returns the current JsonRaw instance as a json encoded string. - */ - string(): string - } - interface JsonRaw { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface JsonRaw { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface JsonRaw { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface JsonRaw { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current JsonRaw instance. - */ - scan(value: any): void - } -} - -/** - * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer - * object, creating another object (Reader or Writer) that also implements - * the interface but provides buffering and some help for textual I/O. - */ -namespace bufio { - /** - * ReadWriter stores pointers to a [Reader] and a [Writer]. - * It implements [io.ReadWriter]. - */ - type _subjuTbM = Reader&Writer - interface ReadWriter extends _subjuTbM { - } -} - /** * Package multipart implements MIME multipart parsing, as defined in RFC * 2046. @@ -18323,8 +35878,8 @@ namespace bufio { * To protect against malicious inputs, this package sets limits on the size * of the MIME data it processes. * - * Reader.NextPart and Reader.NextRawPart limit the number of headers in a - * part to 10000 and Reader.ReadForm limits the total number of headers in all + * [Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a + * part to 10000 and [Reader.ReadForm] limits the total number of headers in all * FileHeaders to 10000. * These limits may be adjusted with the GODEBUG=multipartmaxheaders= * setting. @@ -18333,11 +35888,6 @@ namespace bufio { * This limit may be adjusted with the GODEBUG=multipartmaxparts= * setting. */ -/** - * Copyright 2023 The Go Authors. All rights reserved. - * Use of this source code is governed by a BSD-style - * license that can be found in the LICENSE file. - */ namespace multipart { /** * A Part represents a single part in a multipart body. @@ -18359,7 +35909,7 @@ namespace multipart { } interface Part { /** - * FileName returns the filename parameter of the Part's Content-Disposition + * FileName returns the filename parameter of the [Part]'s Content-Disposition * header. If not empty, the filename is passed through filepath.Base (which is * platform dependent) before being returned. */ @@ -18532,6 +36082,3439 @@ namespace mailer { } } +namespace logging { + /** + * Classification is the type of the log entry's classification name. + */ + interface Classification extends String{} +} + +/** + * Package tracing defines tracing APIs to be used by Smithy clients. + */ +namespace tracing { + /** + * TracerProvider is the entry point for creating client traces. + */ + interface TracerProvider { + [key:string]: any; + tracer(scope: string, ...opts: TracerOption[]): Tracer + } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { + /** + * JsonRaw defines a json value type that is safe for db read/write. + */ + interface JsonRaw extends Array{} + interface JsonRaw { + /** + * String returns the current JsonRaw instance as a json encoded string. + */ + string(): string + } + interface JsonRaw { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface JsonRaw { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface JsonRaw { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface JsonRaw { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current JsonRaw instance. + */ + scan(value: any): void + } +} + +namespace search { + /** + * Result defines the returned search result structure. + */ + interface Result { + page: number + perPage: number + totalItems: number + totalPages: number + items: any + } +} + +namespace hook { + /** + * Handler defines a hook handler function. + */ + interface Handler {(e: T): void } + /** + * wrapped local Hook embedded struct to limit the public API surface. + */ + type _suboKSlp = Hook + interface mainHook extends _suboKSlp { + } +} + +/** + * Package metrics defines the metrics APIs used by Smithy clients. + */ +namespace metrics { + /** + * MeterProvider is the entry point for creating a Meter. + */ + interface MeterProvider { + [key:string]: any; + meter(scope: string, ...opts: MeterOption[]): Meter + } +} + +/** + * Package auth defines protocol-agnostic authentication types for smithy + * clients. + */ +namespace auth { + /** + * IdentityResolver defines the interface through which an Identity is + * retrieved. + */ + interface IdentityResolver { + [key:string]: any; + getIdentity(_arg0: context.Context, _arg1: smithy.Properties): Identity + } +} + +/** + * Package middleware provides transport agnostic middleware for decorating SDK + * handlers. + * + * The Smithy middleware stack provides ordered behavior to be invoked on an + * underlying handler. The stack is separated into steps that are invoked in a + * static order. A step is a collection of middleware that are injected into a + * ordered list defined by the user. The user may add, insert, swap, and remove a + * step's middleware. When the stack is invoked the step middleware become static, + * and their order cannot be modified. + * + * A stack and its step middleware are **not** safe to modify concurrently. + * + * A stack will use the ordered list of middleware to decorate a underlying + * handler. A handler could be something like an HTTP Client that round trips an + * API operation over HTTP. + * + * Smithy Middleware Stack + * + * A Stack is a collection of middleware that wrap a handler. The stack can be + * broken down into discreet steps. Each step may contain zero or more middleware + * specific to that stack's step. + * + * A Stack Step is a predefined set of middleware that are invoked in a static + * order by the Stack. These steps represent fixed points in the middleware stack + * for organizing specific behavior, such as serialize and build. A Stack Step is + * composed of zero or more middleware that are specific to that step. A step may + * define its own set of input/output parameters the generic input/output + * parameters are cast from. A step calls its middleware recursively, before + * calling the next step in the stack returning the result or error of the step + * middleware decorating the underlying handler. + * + * * Initialize: Prepares the input, and sets any default parameters as needed, + * (e.g. idempotency token, and presigned URLs). + * + * * Serialize: Serializes the prepared input into a data structure that can be + * consumed by the target transport's message, (e.g. REST-JSON serialization). + * + * * Build: Adds additional metadata to the serialized transport message, (e.g. + * HTTP's Content-Length header, or body checksum). Decorations and + * modifications to the message should be copied to all message attempts. + * + * * Finalize: Performs final preparations needed before sending the message. The + * message should already be complete by this stage, and is only alternated to + * meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request + * signing). + * + * * Deserialize: Reacts to the handler's response returned by the recipient of + * the request message. Deserializes the response into a structured type or + * error above stacks can react to. + * + * Adding Middleware to a Stack Step + * + * Middleware can be added to a step front or back, or relative, by name, to an + * existing middleware in that stack. If a middleware does not have a name, a + * unique name will be generated at the middleware and be added to the step. + * + * ``` + * // Create middleware stack + * stack := middleware.NewStack() + * + * // Add middleware to stack steps + * stack.Initialize.Add(paramValidationMiddleware, middleware.After) + * stack.Serialize.Add(marshalOperationFoo, middleware.After) + * stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) + * + * // Invoke middleware on handler. + * resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) + * ``` + */ +namespace middleware { + /** + * Metadata provides storing and reading metadata values. Keys may be any + * comparable value type. Get and set will panic if key is not a comparable + * value type. + * + * Metadata uses lazy initialization, and Set method must be called as an + * addressable value, or pointer. Not doing so may cause key/value pair to not + * be set. + */ + interface Metadata { + } + interface Metadata { + /** + * Get attempts to retrieve the value the key points to. Returns nil if the + * key was not found. + * + * Panics if key type is not comparable. + */ + get(key: { + }): { + } + } + interface Metadata { + /** + * Clone creates a shallow copy of Metadata entries, returning a new Metadata + * value with the original entries copied into it. + */ + clone(): Metadata + } + interface Metadata { + /** + * Set stores the value pointed to by the key. If a value already exists at + * that key it will be replaced with the new value. + * + * Set method must be called as an addressable value, or pointer. If Set is not + * called as an addressable value or pointer, the key value pair being set may + * be lost. + * + * Panics if the key type is not comparable. + */ + set(key: { + }, value: { + }): void + } + interface Metadata { + /** + * Has returns whether the key exists in the metadata. + * + * Panics if the key type is not comparable. + */ + has(key: { + }): boolean + } + /** + * Handler provides the interface for performing the logic to obtain an output, + * or error for the given input. + */ + interface Handler { + [key:string]: any; + /** + * Handle performs logic to obtain an output for the given input. Handler + * should be decorated with middleware to perform input specific behavior. + */ + handle(ctx: context.Context, input: { + }): [{ + }, Metadata] + } + /** + * BuildStep provides the ordered grouping of BuildMiddleware to be invoked on + * a handler. + */ + interface BuildStep { + } + interface BuildStep { + /** + * ID returns the unique name of the step as a middleware. + */ + id(): string + } + interface BuildStep { + /** + * HandleMiddleware invokes the middleware by decorating the next handler + * provided. Returns the result of the middleware and handler being invoked. + * + * Implements Middleware interface. + */ + handleMiddleware(ctx: context.Context, _arg10: { + }, next: Handler): [{ + }, Metadata] + } + interface BuildStep { + /** + * Get retrieves the middleware identified by id. If the middleware is not present, returns false. + */ + get(id: string): [BuildMiddleware, boolean] + } + interface BuildStep { + /** + * Add injects the middleware to the relative position of the middleware group. + * Returns an error if the middleware already exists. + */ + add(m: BuildMiddleware, pos: RelativePosition): void + } + interface BuildStep { + /** + * Insert injects the middleware relative to an existing middleware id. + * Returns an error if the original middleware does not exist, or the middleware + * being added already exists. + */ + insert(m: BuildMiddleware, relativeTo: string, pos: RelativePosition): void + } + interface BuildStep { + /** + * Swap removes the middleware by id, replacing it with the new middleware. + * Returns the middleware removed, or an error if the middleware to be removed + * doesn't exist. + */ + swap(id: string, m: BuildMiddleware): BuildMiddleware + } + interface BuildStep { + /** + * Remove removes the middleware by id. Returns error if the middleware + * doesn't exist. + */ + remove(id: string): BuildMiddleware + } + interface BuildStep { + /** + * List returns a list of the middleware in the step. + */ + list(): Array + } + interface BuildStep { + /** + * Clear removes all middleware in the step. + */ + clear(): void + } + /** + * DeserializeStep provides the ordered grouping of DeserializeMiddleware to be + * invoked on a handler. + */ + interface DeserializeStep { + } + interface DeserializeStep { + /** + * ID returns the unique ID of the step as a middleware. + */ + id(): string + } + interface DeserializeStep { + /** + * HandleMiddleware invokes the middleware by decorating the next handler + * provided. Returns the result of the middleware and handler being invoked. + * + * Implements Middleware interface. + */ + handleMiddleware(ctx: context.Context, _arg10: { + }, next: Handler): [{ + }, Metadata] + } + interface DeserializeStep { + /** + * Get retrieves the middleware identified by id. If the middleware is not present, returns false. + */ + get(id: string): [DeserializeMiddleware, boolean] + } + interface DeserializeStep { + /** + * Add injects the middleware to the relative position of the middleware group. + * Returns an error if the middleware already exists. + */ + add(m: DeserializeMiddleware, pos: RelativePosition): void + } + interface DeserializeStep { + /** + * Insert injects the middleware relative to an existing middleware ID. + * Returns error if the original middleware does not exist, or the middleware + * being added already exists. + */ + insert(m: DeserializeMiddleware, relativeTo: string, pos: RelativePosition): void + } + interface DeserializeStep { + /** + * Swap removes the middleware by id, replacing it with the new middleware. + * Returns the middleware removed, or error if the middleware to be removed + * doesn't exist. + */ + swap(id: string, m: DeserializeMiddleware): DeserializeMiddleware + } + interface DeserializeStep { + /** + * Remove removes the middleware by id. Returns error if the middleware + * doesn't exist. + */ + remove(id: string): DeserializeMiddleware + } + interface DeserializeStep { + /** + * List returns a list of the middleware in the step. + */ + list(): Array + } + interface DeserializeStep { + /** + * Clear removes all middleware in the step. + */ + clear(): void + } + /** + * FinalizeStep provides the ordered grouping of FinalizeMiddleware to be + * invoked on a handler. + */ + interface FinalizeStep { + } + interface FinalizeStep { + /** + * ID returns the unique id of the step as a middleware. + */ + id(): string + } + interface FinalizeStep { + /** + * HandleMiddleware invokes the middleware by decorating the next handler + * provided. Returns the result of the middleware and handler being invoked. + * + * Implements Middleware interface. + */ + handleMiddleware(ctx: context.Context, _arg10: { + }, next: Handler): [{ + }, Metadata] + } + interface FinalizeStep { + /** + * Get retrieves the middleware identified by id. If the middleware is not present, returns false. + */ + get(id: string): [FinalizeMiddleware, boolean] + } + interface FinalizeStep { + /** + * Add injects the middleware to the relative position of the middleware group. + * Returns an error if the middleware already exists. + */ + add(m: FinalizeMiddleware, pos: RelativePosition): void + } + interface FinalizeStep { + /** + * Insert injects the middleware relative to an existing middleware ID. + * Returns error if the original middleware does not exist, or the middleware + * being added already exists. + */ + insert(m: FinalizeMiddleware, relativeTo: string, pos: RelativePosition): void + } + interface FinalizeStep { + /** + * Swap removes the middleware by id, replacing it with the new middleware. + * Returns the middleware removed, or error if the middleware to be removed + * doesn't exist. + */ + swap(id: string, m: FinalizeMiddleware): FinalizeMiddleware + } + interface FinalizeStep { + /** + * Remove removes the middleware by id. Returns error if the middleware + * doesn't exist. + */ + remove(id: string): FinalizeMiddleware + } + interface FinalizeStep { + /** + * List returns a list of the middleware in the step. + */ + list(): Array + } + interface FinalizeStep { + /** + * Clear removes all middleware in the step. + */ + clear(): void + } + /** + * InitializeStep provides the ordered grouping of InitializeMiddleware to be + * invoked on a handler. + */ + interface InitializeStep { + } + interface InitializeStep { + /** + * ID returns the unique ID of the step as a middleware. + */ + id(): string + } + interface InitializeStep { + /** + * HandleMiddleware invokes the middleware by decorating the next handler + * provided. Returns the result of the middleware and handler being invoked. + * + * Implements Middleware interface. + */ + handleMiddleware(ctx: context.Context, _arg10: { + }, next: Handler): [{ + }, Metadata] + } + interface InitializeStep { + /** + * Get retrieves the middleware identified by id. If the middleware is not present, returns false. + */ + get(id: string): [InitializeMiddleware, boolean] + } + interface InitializeStep { + /** + * Add injects the middleware to the relative position of the middleware group. + * Returns an error if the middleware already exists. + */ + add(m: InitializeMiddleware, pos: RelativePosition): void + } + interface InitializeStep { + /** + * Insert injects the middleware relative to an existing middleware ID. + * Returns error if the original middleware does not exist, or the middleware + * being added already exists. + */ + insert(m: InitializeMiddleware, relativeTo: string, pos: RelativePosition): void + } + interface InitializeStep { + /** + * Swap removes the middleware by id, replacing it with the new middleware. + * Returns the middleware removed, or error if the middleware to be removed + * doesn't exist. + */ + swap(id: string, m: InitializeMiddleware): InitializeMiddleware + } + interface InitializeStep { + /** + * Remove removes the middleware by id. Returns error if the middleware + * doesn't exist. + */ + remove(id: string): InitializeMiddleware + } + interface InitializeStep { + /** + * List returns a list of the middleware in the step. + */ + list(): Array + } + interface InitializeStep { + /** + * Clear removes all middleware in the step. + */ + clear(): void + } + /** + * SerializeStep provides the ordered grouping of SerializeMiddleware to be + * invoked on a handler. + */ + interface SerializeStep { + } + interface SerializeStep { + /** + * ID returns the unique ID of the step as a middleware. + */ + id(): string + } + interface SerializeStep { + /** + * HandleMiddleware invokes the middleware by decorating the next handler + * provided. Returns the result of the middleware and handler being invoked. + * + * Implements Middleware interface. + */ + handleMiddleware(ctx: context.Context, _arg10: { + }, next: Handler): [{ + }, Metadata] + } + interface SerializeStep { + /** + * Get retrieves the middleware identified by id. If the middleware is not present, returns false. + */ + get(id: string): [SerializeMiddleware, boolean] + } + interface SerializeStep { + /** + * Add injects the middleware to the relative position of the middleware group. + * Returns an error if the middleware already exists. + */ + add(m: SerializeMiddleware, pos: RelativePosition): void + } + interface SerializeStep { + /** + * Insert injects the middleware relative to an existing middleware ID. + * Returns error if the original middleware does not exist, or the middleware + * being added already exists. + */ + insert(m: SerializeMiddleware, relativeTo: string, pos: RelativePosition): void + } + interface SerializeStep { + /** + * Swap removes the middleware by id, replacing it with the new middleware. + * Returns the middleware removed, or error if the middleware to be removed + * doesn't exist. + */ + swap(id: string, m: SerializeMiddleware): SerializeMiddleware + } + interface SerializeStep { + /** + * Remove removes the middleware by id. Returns error if the middleware + * doesn't exist. + */ + remove(id: string): SerializeMiddleware + } + interface SerializeStep { + /** + * List returns a list of the middleware in the step. + */ + list(): Array + } + interface SerializeStep { + /** + * Clear removes all middleware in the step. + */ + clear(): void + } +} + +/** + * Package http provides the HTTP transport client and request/response types + * needed to round trip API operation calls with an service. + */ +namespace http { + // @ts-ignore + import smithy = smithy_go + /** + * AuthScheme defines an HTTP authentication scheme. + */ + interface AuthScheme { + [key:string]: any; + schemeID(): string + identityResolver(_arg0: auth.IdentityResolverOptions): auth.IdentityResolver + signer(): Signer + } + // @ts-ignore + import iointernal = io + // @ts-ignore + import smithytime = time +} + +namespace subscriptions { + /** + * Message defines a client's channel data. + */ + interface Message { + name: string + data: string|Array + } + /** + * Client is an interface for a generic subscription client. + */ + interface Client { + [key:string]: any; + /** + * Id Returns the unique id of the client. + */ + id(): string + /** + * Channel returns the client's communication channel. + */ + channel(): undefined + /** + * Subscriptions returns a shallow copy of the client subscriptions matching the prefixes. + * If no prefix is specified, returns all subscriptions. + */ + subscriptions(...prefixes: string[]): _TygojaDict + /** + * Subscribe subscribes the client to the provided subscriptions list. + * + * Each subscription can also have "options" (json serialized SubscriptionOptions) as query parameter. + * + * Example: + * + * ``` + * Subscribe( + * "subscriptionA", + * `subscriptionB?options={"query":{"a":1},"headers":{"x_token":"abc"}}`, + * ) + * ``` + */ + subscribe(...subs: string[]): void + /** + * Unsubscribe unsubscribes the client from the provided subscriptions list. + */ + unsubscribe(...subs: string[]): void + /** + * HasSubscription checks if the client is subscribed to `sub`. + */ + hasSubscription(sub: string): boolean + /** + * Set stores any value to the client's context. + */ + set(key: string, value: any): void + /** + * Unset removes a single value from the client's context. + */ + unset(key: string): void + /** + * Get retrieves the key value from the client's context. + */ + get(key: string): any + /** + * Discard marks the client as "discarded", meaning that it + * shouldn't be used anymore for sending new messages. + * + * It is safe to call Discard() multiple times. + */ + discard(): void + /** + * IsDiscarded indicates whether the client has been "discarded" + * and should no longer be used. + */ + isDiscarded(): boolean + /** + * Send sends the specified message to the client's channel (if not discarded). + */ + send(m: Message): void + } +} + +namespace types { + interface ArchiveStatus extends String{} + interface ArchiveStatus { + /** + * Values returns all known values for ArchiveStatus. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface BucketAccelerateStatus extends String{} + interface BucketAccelerateStatus { + /** + * Values returns all known values for BucketAccelerateStatus. Note that this can + * be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface BucketCannedACL extends String{} + interface BucketCannedACL { + /** + * Values returns all known values for BucketCannedACL. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface BucketLocationConstraint extends String{} + interface BucketLocationConstraint { + /** + * Values returns all known values for BucketLocationConstraint. Note that this + * can be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface BucketVersioningStatus extends String{} + interface BucketVersioningStatus { + /** + * Values returns all known values for BucketVersioningStatus. Note that this can + * be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ChecksumAlgorithm extends String{} + interface ChecksumAlgorithm { + /** + * Values returns all known values for ChecksumAlgorithm. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ChecksumMode extends String{} + interface ChecksumMode { + /** + * Values returns all known values for ChecksumMode. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface EncodingType extends String{} + interface EncodingType { + /** + * Values returns all known values for EncodingType. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ExpressionType extends String{} + interface ExpressionType { + /** + * Values returns all known values for ExpressionType. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface LocationType extends String{} + interface LocationType { + /** + * Values returns all known values for LocationType. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface MetadataDirective extends String{} + interface MetadataDirective { + /** + * Values returns all known values for MetadataDirective. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface MFADeleteStatus extends String{} + interface MFADeleteStatus { + /** + * Values returns all known values for MFADeleteStatus. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectAttributes extends String{} + interface ObjectAttributes { + /** + * Values returns all known values for ObjectAttributes. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectCannedACL extends String{} + interface ObjectCannedACL { + /** + * Values returns all known values for ObjectCannedACL. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectLockLegalHoldStatus extends String{} + interface ObjectLockLegalHoldStatus { + /** + * Values returns all known values for ObjectLockLegalHoldStatus. Note that this + * can be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectLockMode extends String{} + interface ObjectLockMode { + /** + * Values returns all known values for ObjectLockMode. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectOwnership extends String{} + interface ObjectOwnership { + /** + * Values returns all known values for ObjectOwnership. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface OptionalObjectAttributes extends String{} + interface OptionalObjectAttributes { + /** + * Values returns all known values for OptionalObjectAttributes. Note that this + * can be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface Payer extends String{} + interface Payer { + /** + * Values returns all known values for Payer. Note that this can be expanded in + * the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ReplicationStatus extends String{} + interface ReplicationStatus { + /** + * Values returns all known values for ReplicationStatus. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface RequestCharged extends String{} + interface RequestCharged { + /** + * Values returns all known values for RequestCharged. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface RequestPayer extends String{} + interface RequestPayer { + /** + * Values returns all known values for RequestPayer. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ServerSideEncryption extends String{} + interface ServerSideEncryption { + /** + * Values returns all known values for ServerSideEncryption. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface SessionMode extends String{} + interface SessionMode { + /** + * Values returns all known values for SessionMode. Note that this can be expanded + * in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface StorageClass extends String{} + interface StorageClass { + /** + * Values returns all known values for StorageClass. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface TaggingDirective extends String{} + interface TaggingDirective { + /** + * Values returns all known values for TaggingDirective. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface TransitionDefaultMinimumObjectSize extends String{} + interface TransitionDefaultMinimumObjectSize { + /** + * Values returns all known values for TransitionDefaultMinimumObjectSize. Note + * that this can be expanded in the future, and so it is only as up to date as the + * client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithydocument = document + /** + * Configures the transfer acceleration state for an Amazon S3 bucket. For more + * information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide. + * + * [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + */ + type _subAdQMO = noSmithyDocumentSerde + interface AccelerateConfiguration extends _subAdQMO { + /** + * Specifies the transfer acceleration status of the bucket. + */ + status: BucketAccelerateStatus + } + /** + * Contains the elements that set the ACL permissions for an object per grantee. + */ + type _subvBLGU = noSmithyDocumentSerde + interface AccessControlPolicy extends _subvBLGU { + /** + * A list of grants. + */ + grants: Array + /** + * Container for the bucket owner's display name and ID. + */ + owner?: Owner + } + /** + * Specifies the configuration and any analyses for the analytics filter of an + * Amazon S3 bucket. + */ + type _subgdymU = noSmithyDocumentSerde + interface AnalyticsConfiguration extends _subgdymU { + /** + * The ID that identifies the analytics configuration. + * + * This member is required. + */ + id?: string + /** + * Contains data related to access patterns to be collected and made available to + * analyze the tradeoffs between different storage classes. + * + * This member is required. + */ + storageClassAnalysis?: StorageClassAnalysis + /** + * The filter used to describe a set of objects for analyses. A filter must have + * exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no + * filter is provided, all objects will be considered in any analysis. + */ + filter: AnalyticsFilter + } + /** + * In terms of implementation, a Bucket is a resource. + */ + type _subnVWkk = noSmithyDocumentSerde + interface Bucket extends _subnVWkk { + /** + * Date the bucket was created. This date can change when making changes to your + * bucket, such as editing its bucket policy. + */ + creationDate?: time.Time + /** + * The name of the bucket. + */ + name?: string + } + /** + * Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For + * more information, see [Object Lifecycle Management]in the Amazon S3 User Guide. + * + * [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + */ + type _subJAxMi = noSmithyDocumentSerde + interface BucketLifecycleConfiguration extends _subJAxMi { + /** + * A lifecycle rule for individual objects in an Amazon S3 bucket. + * + * This member is required. + */ + rules: Array + } + /** + * Container for logging status information. + */ + type _subednLl = noSmithyDocumentSerde + interface BucketLoggingStatus extends _subednLl { + /** + * Describes where logs are stored and the prefix that Amazon S3 assigns to all + * log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + * Reference. + * + * [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html + */ + loggingEnabled?: LoggingEnabled + } + /** + * Contains all the possible checksum or digest values for an object. + */ + type _subwHZAb = noSmithyDocumentSerde + interface Checksum extends _subwHZAb { + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + } + /** + * Container for all (if there are any) keys between Prefix and the next + * occurrence of the string specified by a delimiter. CommonPrefixes lists keys + * that act like subdirectories in the directory specified by Prefix. For example, + * if the prefix is notes/ and the delimiter is a slash (/) as in + * notes/summer/july, the common prefix is notes/summer/. + */ + type _subatVOG = noSmithyDocumentSerde + interface CommonPrefix extends _subatVOG { + /** + * Container for the specified common prefix. + */ + prefix?: string + } + /** + * The container for the completed multipart upload details. + */ + type _subSccUS = noSmithyDocumentSerde + interface CompletedMultipartUpload extends _subSccUS { + /** + * Array of CompletedPart data types. + * + * If you do not supply a valid Part with your request, the service sends back an + * HTTP 400 response. + */ + parts: Array + } + /** + * Container for all response elements. + */ + type _subIajoo = noSmithyDocumentSerde + interface CopyObjectResult extends _subIajoo { + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. For more information, see [Checking object integrity]in the + * Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA256?: string + /** + * Returns the ETag of the new object. The ETag reflects only changes to the + * contents of an object, not its metadata. + */ + eTag?: string + /** + * Creation date of the object. + */ + lastModified?: time.Time + } + /** + * Container for all response elements. + */ + type _subOurrh = noSmithyDocumentSerde + interface CopyPartResult extends _subOurrh { + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + /** + * Entity tag of the object. + */ + eTag?: string + /** + * Date and time at which the object was uploaded. + */ + lastModified?: time.Time + } + /** + * Describes the cross-origin access configuration for objects in an Amazon S3 + * bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. + * + * [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + */ + type _submJiRs = noSmithyDocumentSerde + interface CORSConfiguration extends _submJiRs { + /** + * A set of origins and methods (cross-origin access that you want to allow). You + * can add up to 100 rules to the configuration. + * + * This member is required. + */ + corsRules: Array + } + /** + * Specifies a cross-origin access rule for an Amazon S3 bucket. + */ + type _subXkLgD = noSmithyDocumentSerde + interface CORSRule extends _subXkLgD { + /** + * An HTTP method that you allow the origin to execute. Valid values are GET , PUT + * , HEAD , POST , and DELETE . + * + * This member is required. + */ + allowedMethods: Array + /** + * One or more origins you want customers to be able to access the bucket from. + * + * This member is required. + */ + allowedOrigins: Array + /** + * Headers that are specified in the Access-Control-Request-Headers header. These + * headers are allowed in a preflight OPTIONS request. In response to any preflight + * OPTIONS request, Amazon S3 returns any requested headers that are allowed. + */ + allowedHeaders: Array + /** + * One or more headers in the response that you want customers to be able to + * access from their applications (for example, from a JavaScript XMLHttpRequest + * object). + */ + exposeHeaders: Array + /** + * Unique identifier for the rule. The value cannot be longer than 255 characters. + */ + id?: string + /** + * The time in seconds that your browser is to cache the preflight response for + * the specified resource. + */ + maxAgeSeconds?: number + } + /** + * The configuration information for the bucket. + */ + type _subUJNvK = noSmithyDocumentSerde + interface CreateBucketConfiguration extends _subUJNvK { + /** + * Specifies the information about the bucket that will be created. + * + * This functionality is only supported by directory buckets. + */ + bucket?: BucketInfo + /** + * Specifies the location where the bucket will be created. + * + * For directory buckets, the location type is Availability Zone. + * + * This functionality is only supported by directory buckets. + */ + location?: LocationInfo + /** + * Specifies the Region where the bucket will be created. You might choose a + * Region to optimize latency, minimize costs, or address regulatory requirements. + * For example, if you reside in Europe, you will probably find it advantageous to + * create buckets in the Europe (Ireland) Region. For more information, see [Accessing a bucket]in the + * Amazon S3 User Guide. + * + * If you don't specify a Region, the bucket is created in the US East (N. + * Virginia) Region (us-east-1) by default. + * + * This functionality is not supported for directory buckets. + * + * [Accessing a bucket]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + */ + locationConstraint: BucketLocationConstraint + } + /** + * Container for the objects to delete. + */ + type _subTMlPO = noSmithyDocumentSerde + interface Delete extends _subTMlPO { + /** + * The object to delete. + * + * Directory buckets - For directory buckets, an object that's composed entirely + * of whitespace characters is not supported by the DeleteObjects API operation. + * The request will receive a 400 Bad Request error and none of the objects in the + * request will be deleted. + * + * This member is required. + */ + objects: Array + /** + * Element to enable quiet mode for the request. When you add this element, you + * must set its value to true . + */ + quiet?: boolean + } + /** + * Information about the deleted object. + */ + type _submlJyi = noSmithyDocumentSerde + interface DeletedObject extends _submlJyi { + /** + * Indicates whether the specified object version that was permanently deleted was + * (true) or was not (false) a delete marker before deletion. In a simple DELETE, + * this header indicates whether (true) or not (false) the current version of the + * object is a delete marker. + * + * This functionality is not supported for directory buckets. + */ + deleteMarker?: boolean + /** + * The version ID of the delete marker created as a result of the DELETE + * operation. If you delete a specific object version, the value returned by this + * header is the version ID of the object version deleted. + * + * This functionality is not supported for directory buckets. + */ + deleteMarkerVersionId?: string + /** + * The name of the deleted object. + */ + key?: string + /** + * The version ID of the deleted object. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + } + /** + * Information about the delete marker. + */ + type _submPJJq = noSmithyDocumentSerde + interface DeleteMarkerEntry extends _submPJJq { + /** + * Specifies whether the object is (true) or is not (false) the latest version of + * an object. + */ + isLatest?: boolean + /** + * The object key. + */ + key?: string + /** + * Date and time when the object was last modified. + */ + lastModified?: time.Time + /** + * The account that created the delete marker.> + */ + owner?: Owner + /** + * Version ID of an object. + */ + versionId?: string + } + /** + * Container for all error elements. + */ + type _subIGHMu = noSmithyDocumentSerde + interface Error extends _subIGHMu { + /** + * The error code is a string that uniquely identifies an error condition. It is + * meant to be read and understood by programs that detect and handle errors by + * type. The following is a list of Amazon S3 error codes. For more information, + * see [Error responses]. + * + * ``` + * - Code: AccessDenied + * + * - Description: Access Denied + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: AccountProblem + * + * - Description: There is a problem with your Amazon Web Services account that + * prevents the action from completing successfully. Contact Amazon Web Services + * Support for further assistance. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: AllAccessDisabled + * + * - Description: All access to this Amazon S3 resource has been disabled. + * Contact Amazon Web Services Support for further assistance. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: AmbiguousGrantByEmailAddress + * + * - Description: The email address you provided is associated with more than + * one account. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: AuthorizationHeaderMalformed + * + * - Description: The authorization header you provided is invalid. + * + * - HTTP Status Code: 400 Bad Request + * + * - HTTP Status Code: N/A + * + * - Code: BadDigest + * + * - Description: The Content-MD5 you specified did not match what we received. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: BucketAlreadyExists + * + * - Description: The requested bucket name is not available. The bucket + * namespace is shared by all users of the system. Please select a different name + * and try again. + * + * - HTTP Status Code: 409 Conflict + * + * - SOAP Fault Code Prefix: Client + * + * - Code: BucketAlreadyOwnedByYou + * + * - Description: The bucket you tried to create already exists, and you own it. + * Amazon S3 returns this error in all Amazon Web Services Regions except in the + * North Virginia Region. For legacy compatibility, if you re-create an existing + * bucket that you already own in the North Virginia Region, Amazon S3 returns 200 + * OK and resets the bucket access control lists (ACLs). + * + * - Code: 409 Conflict (in all Regions except the North Virginia Region) + * + * - SOAP Fault Code Prefix: Client + * + * - Code: BucketNotEmpty + * + * - Description: The bucket you tried to delete is not empty. + * + * - HTTP Status Code: 409 Conflict + * + * - SOAP Fault Code Prefix: Client + * + * - Code: CredentialsNotSupported + * + * - Description: This request does not support credentials. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: CrossLocationLoggingProhibited + * + * - Description: Cross-location logging not allowed. Buckets in one geographic + * location cannot log information to a bucket in another location. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: EntityTooSmall + * + * - Description: Your proposed upload is smaller than the minimum allowed + * object size. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: EntityTooLarge + * + * - Description: Your proposed upload exceeds the maximum allowed object size. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: ExpiredToken + * + * - Description: The provided token has expired. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: IllegalVersioningConfigurationException + * + * - Description: Indicates that the versioning configuration specified in the + * request is invalid. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: IncompleteBody + * + * - Description: You did not provide the number of bytes specified by the + * Content-Length HTTP header + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: IncorrectNumberOfFilesInPostRequest + * + * - Description: POST requires exactly one file upload per request. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InlineDataTooLarge + * + * - Description: Inline data exceeds the maximum allowed size. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InternalError + * + * - Description: We encountered an internal error. Please try again. + * + * - HTTP Status Code: 500 Internal Server Error + * + * - SOAP Fault Code Prefix: Server + * + * - Code: InvalidAccessKeyId + * + * - Description: The Amazon Web Services access key ID you provided does not + * exist in our records. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidAddressingHeader + * + * - Description: You must specify the Anonymous role. + * + * - HTTP Status Code: N/A + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidArgument + * + * - Description: Invalid Argument + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidBucketName + * + * - Description: The specified bucket is not valid. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidBucketState + * + * - Description: The request is not valid with the current state of the bucket. + * + * - HTTP Status Code: 409 Conflict + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidDigest + * + * - Description: The Content-MD5 you specified is not valid. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidEncryptionAlgorithmError + * + * - Description: The encryption request you specified is not valid. The valid + * value is AES256. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidLocationConstraint + * + * - Description: The specified location constraint is not valid. For more + * information about Regions, see [How to Select a Region for Your Buckets]. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidObjectState + * + * - Description: The action is not valid for the current state of the object. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidPart + * + * - Description: One or more of the specified parts could not be found. The + * part might not have been uploaded, or the specified entity tag might not have + * matched the part's entity tag. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidPartOrder + * + * - Description: The list of parts was not in ascending order. Parts list must + * be specified in order by part number. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidPayer + * + * - Description: All access to this object has been disabled. Please contact + * Amazon Web Services Support for further assistance. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidPolicyDocument + * + * - Description: The content of the form does not meet the conditions specified + * in the policy document. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidRange + * + * - Description: The requested range cannot be satisfied. + * + * - HTTP Status Code: 416 Requested Range Not Satisfiable + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidRequest + * + * - Description: Please use AWS4-HMAC-SHA256 . + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidRequest + * + * - Description: SOAP requests must be made over an HTTPS connection. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidRequest + * + * - Description: Amazon S3 Transfer Acceleration is not supported for buckets + * with non-DNS compliant names. + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidRequest + * + * - Description: Amazon S3 Transfer Acceleration is not supported for buckets + * with periods (.) in their names. + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidRequest + * + * - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual + * style requests. + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidRequest + * + * - Description: Amazon S3 Transfer Accelerate is not configured on this bucket. + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidRequest + * + * - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidRequest + * + * - Description: Amazon S3 Transfer Acceleration is not supported on this + * bucket. Contact Amazon Web Services Support for more information. + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidRequest + * + * - Description: Amazon S3 Transfer Acceleration cannot be enabled on this + * bucket. Contact Amazon Web Services Support for more information. + * + * - HTTP Status Code: 400 Bad Request + * + * - Code: N/A + * + * - Code: InvalidSecurity + * + * - Description: The provided security credentials are not valid. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidSOAPRequest + * + * - Description: The SOAP request body is invalid. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidStorageClass + * + * - Description: The storage class you specified is not valid. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidTargetBucketForLogging + * + * - Description: The target bucket for logging does not exist, is not owned by + * you, or does not have the appropriate grants for the log-delivery group. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidToken + * + * - Description: The provided token is malformed or otherwise invalid. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: InvalidURI + * + * - Description: Couldn't parse the specified URI. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: KeyTooLongError + * + * - Description: Your key is too long. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MalformedACLError + * + * - Description: The XML you provided was not well-formed or did not validate + * against our published schema. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MalformedPOSTRequest + * + * - Description: The body of your POST request is not well-formed + * multipart/form-data. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MalformedXML + * + * - Description: This happens when the user sends malformed XML (XML that + * doesn't conform to the published XSD) for the configuration. The error message + * is, "The XML you provided was not well-formed or did not validate against our + * published schema." + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MaxMessageLengthExceeded + * + * - Description: Your request was too big. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MaxPostPreDataLengthExceededError + * + * - Description: Your POST request fields preceding the upload file were too + * large. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MetadataTooLarge + * + * - Description: Your metadata headers exceed the maximum allowed metadata size. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MethodNotAllowed + * + * - Description: The specified method is not allowed against this resource. + * + * - HTTP Status Code: 405 Method Not Allowed + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MissingAttachment + * + * - Description: A SOAP attachment was expected, but none were found. + * + * - HTTP Status Code: N/A + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MissingContentLength + * + * - Description: You must provide the Content-Length HTTP header. + * + * - HTTP Status Code: 411 Length Required + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MissingRequestBodyError + * + * - Description: This happens when the user sends an empty XML document as a + * request. The error message is, "Request body is empty." + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MissingSecurityElement + * + * - Description: The SOAP 1.1 request is missing a security element. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: MissingSecurityHeader + * + * - Description: Your request is missing a required header. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NoLoggingStatusForKey + * + * - Description: There is no such thing as a logging status subresource for a + * key. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NoSuchBucket + * + * - Description: The specified bucket does not exist. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NoSuchBucketPolicy + * + * - Description: The specified bucket does not have a bucket policy. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NoSuchKey + * + * - Description: The specified key does not exist. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NoSuchLifecycleConfiguration + * + * - Description: The lifecycle configuration does not exist. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NoSuchUpload + * + * - Description: The specified multipart upload does not exist. The upload ID + * might be invalid, or the multipart upload might have been aborted or completed. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NoSuchVersion + * + * - Description: Indicates that the version ID specified in the request does + * not match an existing version. + * + * - HTTP Status Code: 404 Not Found + * + * - SOAP Fault Code Prefix: Client + * + * - Code: NotImplemented + * + * - Description: A header you provided implies functionality that is not + * implemented. + * + * - HTTP Status Code: 501 Not Implemented + * + * - SOAP Fault Code Prefix: Server + * + * - Code: NotSignedUp + * + * - Description: Your account is not signed up for the Amazon S3 service. You + * must sign up before you can use Amazon S3. You can sign up at the following URL: + * [Amazon S3] + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: OperationAborted + * + * - Description: A conflicting conditional action is currently in progress + * against this resource. Try again. + * + * - HTTP Status Code: 409 Conflict + * + * - SOAP Fault Code Prefix: Client + * + * - Code: PermanentRedirect + * + * - Description: The bucket you are attempting to access must be addressed + * using the specified endpoint. Send all future requests to this endpoint. + * + * - HTTP Status Code: 301 Moved Permanently + * + * - SOAP Fault Code Prefix: Client + * + * - Code: PreconditionFailed + * + * - Description: At least one of the preconditions you specified did not hold. + * + * - HTTP Status Code: 412 Precondition Failed + * + * - SOAP Fault Code Prefix: Client + * + * - Code: Redirect + * + * - Description: Temporary redirect. + * + * - HTTP Status Code: 307 Moved Temporarily + * + * - SOAP Fault Code Prefix: Client + * + * - Code: RestoreAlreadyInProgress + * + * - Description: Object restore is already in progress. + * + * - HTTP Status Code: 409 Conflict + * + * - SOAP Fault Code Prefix: Client + * + * - Code: RequestIsNotMultiPartContent + * + * - Description: Bucket POST must be of the enclosure-type multipart/form-data. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: RequestTimeout + * + * - Description: Your socket connection to the server was not read from or + * written to within the timeout period. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: RequestTimeTooSkewed + * + * - Description: The difference between the request time and the server's time + * is too large. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: RequestTorrentOfBucketError + * + * - Description: Requesting the torrent file of a bucket is not permitted. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: SignatureDoesNotMatch + * + * - Description: The request signature we calculated does not match the + * signature you provided. Check your Amazon Web Services secret access key and + * signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details. + * + * - HTTP Status Code: 403 Forbidden + * + * - SOAP Fault Code Prefix: Client + * + * - Code: ServiceUnavailable + * + * - Description: Service is unable to handle request. + * + * - HTTP Status Code: 503 Service Unavailable + * + * - SOAP Fault Code Prefix: Server + * + * - Code: SlowDown + * + * - Description: Reduce your request rate. + * + * - HTTP Status Code: 503 Slow Down + * + * - SOAP Fault Code Prefix: Server + * + * - Code: TemporaryRedirect + * + * - Description: You are being redirected to the bucket while DNS updates. + * + * - HTTP Status Code: 307 Moved Temporarily + * + * - SOAP Fault Code Prefix: Client + * + * - Code: TokenRefreshRequired + * + * - Description: The provided token must be refreshed. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: TooManyBuckets + * + * - Description: You have attempted to create more buckets than allowed. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: UnexpectedContent + * + * - Description: This request does not support content. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: UnresolvableGrantByEmailAddress + * + * - Description: The email address you provided does not match any account on + * record. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * + * - Code: UserKeyMustBeSpecified + * + * - Description: The bucket POST must contain the specified field name. If it + * is specified, check the order of the fields. + * + * - HTTP Status Code: 400 Bad Request + * + * - SOAP Fault Code Prefix: Client + * ``` + * + * [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + * [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + * [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + * [Amazon S3]: http://aws.amazon.com/s3 + * [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html + */ + code?: string + /** + * The error key. + */ + key?: string + /** + * The error message contains a generic description of the error condition in + * English. It is intended for a human audience. Simple programs display the + * message directly to the end user if they encounter an error condition they don't + * know how or don't care to handle. Sophisticated programs with more exhaustive + * error handling and proper internationalization are more likely to ignore the + * error message. + */ + message?: string + /** + * The version ID of the error. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + } + /** + * The error information. + */ + type _subQUEOW = noSmithyDocumentSerde + interface ErrorDocument extends _subQUEOW { + /** + * The object key name to use when a 4XX class error occurs. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + * + * This member is required. + */ + key?: string + } + /** + * A container for specifying the configuration for Amazon EventBridge. + */ + type _subUGNlL = noSmithyDocumentSerde + interface EventBridgeConfiguration extends _subUGNlL { + } + /** + * A collection of parts associated with a multipart upload. + */ + type _subblghU = noSmithyDocumentSerde + interface GetObjectAttributesParts extends _subblghU { + /** + * Indicates whether the returned list of parts is truncated. A value of true + * indicates that the list was truncated. A list can be truncated if the number of + * parts exceeds the limit returned in the MaxParts element. + */ + isTruncated?: boolean + /** + * The maximum number of parts allowed in the response. + */ + maxParts?: number + /** + * When a list is truncated, this element specifies the last part in the list, as + * well as the value to use for the PartNumberMarker request parameter in a + * subsequent request. + */ + nextPartNumberMarker?: string + /** + * The marker for the current part. + */ + partNumberMarker?: string + /** + * A container for elements related to a particular part. A response can contain + * zero or more Parts elements. + * + * ``` + * - General purpose buckets - For GetObjectAttributes , if a additional checksum + * (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 + * , or x-amz-checksum-sha256 ) isn't applied to the object specified in the + * request, the response doesn't return Part . + * + * - Directory buckets - For GetObjectAttributes , no matter whether a additional + * checksum is applied to the object specified in the request, the response returns + * Part . + * ``` + */ + parts: Array + /** + * The total number of parts. + */ + totalPartsCount?: number + } + /** + * Container for grant information. + */ + type _subwnudN = noSmithyDocumentSerde + interface Grant extends _subwnudN { + /** + * The person being granted permissions. + */ + grantee?: Grantee + /** + * Specifies the permission given to the grantee. + */ + permission: Permission + } + /** + * Container for the Suffix element. + */ + type _subEyvCu = noSmithyDocumentSerde + interface IndexDocument extends _subEyvCu { + /** + * A suffix that is appended to a request that is for a directory on the website + * endpoint. (For example, if the suffix is index.html and you make a request to + * samplebucket/images/ , the data that is returned will be for the object with the + * key name images/index.html .) The suffix must not be empty and must not include + * a slash character. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + * + * This member is required. + */ + suffix?: string + } + /** + * Container element that identifies who initiated the multipart upload. + */ + type _sublRNgr = noSmithyDocumentSerde + interface Initiator extends _sublRNgr { + /** + * Name of the Principal. + * + * This functionality is not supported for directory buckets. + */ + displayName?: string + /** + * If the principal is an Amazon Web Services account, it provides the Canonical + * User ID. If the principal is an IAM User, it provides a user ARN value. + * + * Directory buckets - If the principal is an Amazon Web Services account, it + * provides the Amazon Web Services account ID. If the principal is an IAM User, it + * provides a user ARN value. + */ + id?: string + } + /** + * Describes the serialization format of the object. + */ + type _subfCWeH = noSmithyDocumentSerde + interface InputSerialization extends _subfCWeH { + /** + * Describes the serialization of a CSV-encoded object. + */ + csv?: CSVInput + /** + * Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + * Value: NONE. + */ + compressionType: CompressionType + /** + * Specifies JSON as object's input serialization format. + */ + json?: JSONInput + /** + * Specifies Parquet as object's input serialization format. + */ + parquet?: ParquetInput + } + /** + * Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. + * + * For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. + * + * [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + */ + type _subtWzGE = noSmithyDocumentSerde + interface IntelligentTieringConfiguration extends _subtWzGE { + /** + * The ID used to identify the S3 Intelligent-Tiering configuration. + * + * This member is required. + */ + id?: string + /** + * Specifies the status of the configuration. + * + * This member is required. + */ + status: IntelligentTieringStatus + /** + * Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + * + * This member is required. + */ + tierings: Array + /** + * Specifies a bucket filter. The configuration only includes objects that meet + * the filter's criteria. + */ + filter?: IntelligentTieringFilter + } + /** + * Specifies the inventory configuration for an Amazon S3 bucket. For more + * information, see [GET Bucket inventory]in the Amazon S3 API Reference. + * + * [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html + */ + type _subrcUOK = noSmithyDocumentSerde + interface InventoryConfiguration extends _subrcUOK { + /** + * Contains information about where to publish the inventory results. + * + * This member is required. + */ + destination?: InventoryDestination + /** + * The ID used to identify the inventory configuration. + * + * This member is required. + */ + id?: string + /** + * Object versions to include in the inventory list. If set to All , the list + * includes all the object versions, which adds the version-related fields + * VersionId , IsLatest , and DeleteMarker to the list. If set to Current , the + * list does not contain these version-related fields. + * + * This member is required. + */ + includedObjectVersions: InventoryIncludedObjectVersions + /** + * Specifies whether the inventory is enabled or disabled. If set to True , an + * inventory list is generated. If set to False , no inventory list is generated. + * + * This member is required. + */ + isEnabled?: boolean + /** + * Specifies the schedule for generating inventory results. + * + * This member is required. + */ + schedule?: InventorySchedule + /** + * Specifies an inventory filter. The inventory only includes objects that meet + * the filter's criteria. + */ + filter?: InventoryFilter + /** + * Contains the optional fields that are included in the inventory results. + */ + optionalFields: Array + } + /** + * A container for specifying the configuration for Lambda notifications. + */ + type _subyrYtM = noSmithyDocumentSerde + interface LambdaFunctionConfiguration extends _subyrYtM { + /** + * The Amazon S3 bucket event for which to invoke the Lambda function. For more + * information, see [Supported Event Types]in the Amazon S3 User Guide. + * + * [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + * + * This member is required. + */ + events: Array + /** + * The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes + * when the specified event type occurs. + * + * This member is required. + */ + lambdaFunctionArn?: string + /** + * Specifies object key name filtering rules. For information about key name + * filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + * + * [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html + */ + filter?: NotificationConfigurationFilter + /** + * An optional unique identifier for configurations in a notification + * configuration. If you don't provide one, Amazon S3 will assign an ID. + */ + id?: string + } + /** + * A lifecycle rule for individual objects in an Amazon S3 bucket. + * + * For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. + * + * [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html + */ + type _subjZLim = noSmithyDocumentSerde + interface LifecycleRule extends _subjZLim { + /** + * If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is + * not currently being applied. + * + * This member is required. + */ + status: ExpirationStatus + /** + * Specifies the days since the initiation of an incomplete multipart upload that + * Amazon S3 will wait before permanently removing all parts of the upload. For + * more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + * + * [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + */ + abortIncompleteMultipartUpload?: AbortIncompleteMultipartUpload + /** + * Specifies the expiration for the lifecycle of the object in the form of date, + * days and, whether the object has a delete marker. + */ + expiration?: LifecycleExpiration + /** + * The Filter is used to identify objects that a Lifecycle Rule applies to. A + * Filter must have exactly one of Prefix , Tag , or And specified. Filter is + * required if the LifecycleRule does not contain a Prefix element. + */ + filter: LifecycleRuleFilter + /** + * Unique identifier for the rule. The value cannot be longer than 255 characters. + */ + id?: string + /** + * Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 + * permanently deletes the noncurrent object versions. You set this lifecycle + * configuration action on a bucket that has versioning enabled (or suspended) to + * request that Amazon S3 delete noncurrent object versions at a specific period in + * the object's lifetime. + */ + noncurrentVersionExpiration?: NoncurrentVersionExpiration + /** + * Specifies the transition rule for the lifecycle rule that describes when + * noncurrent objects transition to a specific storage class. If your bucket is + * versioning-enabled (or versioning is suspended), you can set this action to + * request that Amazon S3 transition noncurrent object versions to a specific + * storage class at a set period in the object's lifetime. + */ + noncurrentVersionTransitions: Array + /** + * Prefix identifying one or more objects to which the rule applies. This is no + * longer used; use Filter instead. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + * + * Deprecated: This member has been deprecated. + */ + prefix?: string + /** + * Specifies when an Amazon S3 object transitions to a specified storage class. + */ + transitions: Array + } + /** + * Describes where logs are stored and the prefix that Amazon S3 assigns to all + * log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + * Reference. + * + * [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html + */ + type _subkbPlf = noSmithyDocumentSerde + interface LoggingEnabled extends _subkbPlf { + /** + * Specifies the bucket where you want Amazon S3 to store server access logs. You + * can have your logs delivered to any bucket that you own, including the same + * bucket that is being logged. You can also configure multiple buckets to deliver + * their logs to the same target bucket. In this case, you should choose a + * different TargetPrefix for each source bucket so that the delivered log files + * can be distinguished by key. + * + * This member is required. + */ + targetBucket?: string + /** + * A prefix for all log object keys. If you store log files from multiple Amazon + * S3 buckets in a single bucket, you can use a prefix to distinguish which log + * files came from which bucket. + * + * This member is required. + */ + targetPrefix?: string + /** + * Container for granting information. + * + * Buckets that use the bucket owner enforced setting for Object Ownership don't + * support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. + * + * [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general + */ + targetGrants: Array + /** + * Amazon S3 key format for log objects. + */ + targetObjectKeyFormat?: TargetObjectKeyFormat + } + /** + * Specifies a metrics configuration for the CloudWatch request metrics (specified + * by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an + * existing metrics configuration, note that this is a full replacement of the + * existing metrics configuration. If you don't include the elements you want to + * keep, they are erased. For more information, see [PutBucketMetricsConfiguration]. + * + * [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html + */ + type _subVyqCh = noSmithyDocumentSerde + interface MetricsConfiguration extends _subVyqCh { + /** + * The ID used to identify the metrics configuration. The ID has a 64 character + * limit and can only contain letters, numbers, periods, dashes, and underscores. + * + * This member is required. + */ + id?: string + /** + * Specifies a metrics configuration filter. The metrics configuration will only + * include objects that meet the filter's criteria. A filter must be a prefix, an + * object tag, an access point ARN, or a conjunction (MetricsAndOperator). + */ + filter: MetricsFilter + } + /** + * Container for the MultipartUpload for the Amazon S3 object. + */ + type _subJfQNT = noSmithyDocumentSerde + interface MultipartUpload extends _subJfQNT { + /** + * The algorithm that was used to create a checksum of the object. + */ + checksumAlgorithm: ChecksumAlgorithm + /** + * Date and time at which the multipart upload was initiated. + */ + initiated?: time.Time + /** + * Identifies who initiated the multipart upload. + */ + initiator?: Initiator + /** + * Key of the object for which the multipart upload was initiated. + */ + key?: string + /** + * Specifies the owner of the object that is part of the multipart upload. + * + * Directory buckets - The bucket owner is returned as the object owner for all + * the objects. + */ + owner?: Owner + /** + * The class of storage used to store the object. + * + * Directory buckets - Only the S3 Express One Zone storage class is supported by + * directory buckets to store objects. + */ + storageClass: StorageClass + /** + * Upload ID that identifies the multipart upload. + */ + uploadId?: string + } + /** + * A container for specifying the notification configuration of the bucket. If + * this element is empty, notifications are turned off for the bucket. + */ + type _submfUYx = noSmithyDocumentSerde + interface NotificationConfiguration extends _submfUYx { + /** + * Enables delivery of events to Amazon EventBridge. + */ + eventBridgeConfiguration?: EventBridgeConfiguration + /** + * Describes the Lambda functions to invoke and the events for which to invoke + * them. + */ + lambdaFunctionConfigurations: Array + /** + * The Amazon Simple Queue Service queues to publish messages to and the events + * for which to publish messages. + */ + queueConfigurations: Array + /** + * The topic to which notifications are sent and the events for which + * notifications are generated. + */ + topicConfigurations: Array + } + /** + * An object consists of data and its descriptive metadata. + */ + type _subKzNJK = noSmithyDocumentSerde + interface Object extends _subKzNJK { + /** + * The algorithm that was used to create a checksum of the object. + */ + checksumAlgorithm: Array + /** + * The entity tag is a hash of the object. The ETag reflects changes only to the + * contents of an object, not its metadata. The ETag may or may not be an MD5 + * digest of the object data. Whether or not it is depends on how the object was + * created and how it is encrypted as described below: + * + * ``` + * - Objects created by the PUT Object, POST Object, or Copy operation, or + * through the Amazon Web Services Management Console, and are encrypted by SSE-S3 + * or plaintext, have ETags that are an MD5 digest of their object data. + * + * - Objects created by the PUT Object, POST Object, or Copy operation, or + * through the Amazon Web Services Management Console, and are encrypted by SSE-C + * or SSE-KMS, have ETags that are not an MD5 digest of their object data. + * + * - If an object is created by either the Multipart Upload or Part Copy + * operation, the ETag is not an MD5 digest, regardless of the method of + * encryption. If an object is larger than 16 MB, the Amazon Web Services + * Management Console will upload or copy that object as a Multipart Upload, and + * therefore the ETag will not be an MD5 digest. + * ``` + * + * Directory buckets - MD5 is not supported by directory buckets. + */ + eTag?: string + /** + * The name that you assign to an object. You use the object key to retrieve the + * object. + */ + key?: string + /** + * Creation date of the object. + */ + lastModified?: time.Time + /** + * The owner of the object + * + * Directory buckets - The bucket owner is returned as the object owner. + */ + owner?: Owner + /** + * Specifies the restoration status of an object. Objects in certain storage + * classes must be restored before they can be retrieved. For more information + * about these storage classes and how to work with archived objects, see [Working with archived objects]in the + * Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. Only the S3 Express + * One Zone storage class is supported by directory buckets to store objects. + * + * [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html + */ + restoreStatus?: RestoreStatus + /** + * Size in bytes of the object + */ + size?: number + /** + * The class of storage used to store the object. + * + * Directory buckets - Only the S3 Express One Zone storage class is supported by + * directory buckets to store objects. + */ + storageClass: ObjectStorageClass + } + /** + * The container element for Object Lock configuration parameters. + */ + type _subslJnE = noSmithyDocumentSerde + interface ObjectLockConfiguration extends _subslJnE { + /** + * Indicates whether this bucket has an Object Lock configuration enabled. Enable + * ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + */ + objectLockEnabled: ObjectLockEnabled + /** + * Specifies the Object Lock rule for the specified object. Enable the this rule + * when you apply ObjectLockConfiguration to a bucket. Bucket settings require + * both a mode and a period. The period can be either Days or Years but you must + * select one. You cannot specify Days and Years at the same time. + */ + rule?: ObjectLockRule + } + /** + * A legal hold configuration for an object. + */ + type _subQNweC = noSmithyDocumentSerde + interface ObjectLockLegalHold extends _subQNweC { + /** + * Indicates whether the specified object has a legal hold in place. + */ + status: ObjectLockLegalHoldStatus + } + /** + * A Retention configuration for an object. + */ + type _sublHSUJ = noSmithyDocumentSerde + interface ObjectLockRetention extends _sublHSUJ { + /** + * Indicates the Retention mode for the specified object. + */ + mode: ObjectLockRetentionMode + /** + * The date on which this Object Lock Retention will expire. + */ + retainUntilDate?: time.Time + } + /** + * The version of an object. + */ + type _subuWjvD = noSmithyDocumentSerde + interface ObjectVersion extends _subuWjvD { + /** + * The algorithm that was used to create a checksum of the object. + */ + checksumAlgorithm: Array + /** + * The entity tag is an MD5 hash of that version of the object. + */ + eTag?: string + /** + * Specifies whether the object is (true) or is not (false) the latest version of + * an object. + */ + isLatest?: boolean + /** + * The object key. + */ + key?: string + /** + * Date and time when the object was last modified. + */ + lastModified?: time.Time + /** + * Specifies the owner of the object. + */ + owner?: Owner + /** + * Specifies the restoration status of an object. Objects in certain storage + * classes must be restored before they can be retrieved. For more information + * about these storage classes and how to work with archived objects, see [Working with archived objects]in the + * Amazon S3 User Guide. + * + * [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html + */ + restoreStatus?: RestoreStatus + /** + * Size in bytes of the object. + */ + size?: number + /** + * The class of storage used to store the object. + */ + storageClass: ObjectVersionStorageClass + /** + * Version ID of an object. + */ + versionId?: string + } + /** + * Describes how results of the Select job are serialized. + */ + type _subgSPBp = noSmithyDocumentSerde + interface OutputSerialization extends _subgSPBp { + /** + * Describes the serialization of CSV-encoded Select results. + */ + csv?: CSVOutput + /** + * Specifies JSON as request's output serialization format. + */ + json?: JSONOutput + } + /** + * Container for the owner's display name and ID. + */ + type _subEdbjx = noSmithyDocumentSerde + interface Owner extends _subEdbjx { + /** + * Container for the display name of the owner. This value is only supported in + * the following Amazon Web Services Regions: + * + * ``` + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * This functionality is not supported for directory buckets. + */ + displayName?: string + /** + * Container for the ID of the owner. + */ + id?: string + } + /** + * The container element for a bucket's ownership controls. + */ + type _subpHgpP = noSmithyDocumentSerde + interface OwnershipControls extends _subpHgpP { + /** + * The container element for an ownership control rule. + * + * This member is required. + */ + rules: Array + } + /** + * Container for elements related to a part. + */ + type _subhxloO = noSmithyDocumentSerde + interface Part extends _subhxloO { + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumSHA256?: string + /** + * Entity tag returned when the part was uploaded. + */ + eTag?: string + /** + * Date and time at which the part was uploaded. + */ + lastModified?: time.Time + /** + * Part number identifying the part. This is a positive integer between 1 and + * 10,000. + */ + partNumber?: number + /** + * Size in bytes of the uploaded part data. + */ + size?: number + } + /** + * The container element for a bucket's policy status. + */ + type _subodOuo = noSmithyDocumentSerde + interface PolicyStatus extends _subodOuo { + /** + * The policy status for this bucket. TRUE indicates that this bucket is public. + * FALSE indicates that the bucket is not public. + */ + isPublic?: boolean + } + /** + * The PublicAccessBlock configuration that you want to apply to this Amazon S3 + * bucket. You can enable the configuration options in any combination. For more + * information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in + * the Amazon S3 User Guide. + * + * [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + */ + type _subabDMY = noSmithyDocumentSerde + interface PublicAccessBlockConfiguration extends _subabDMY { + /** + * Specifies whether Amazon S3 should block public access control lists (ACLs) for + * this bucket and objects in this bucket. Setting this element to TRUE causes the + * following behavior: + * + * ``` + * - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public. + * + * - PUT Object calls fail if the request includes a public ACL. + * + * - PUT Bucket calls fail if the request includes a public ACL. + * ``` + * + * Enabling this setting doesn't affect existing policies or ACLs. + */ + blockPublicAcls?: boolean + /** + * Specifies whether Amazon S3 should block public bucket policies for this + * bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT + * Bucket policy if the specified bucket policy allows public access. + * + * Enabling this setting doesn't affect existing bucket policies. + */ + blockPublicPolicy?: boolean + /** + * Specifies whether Amazon S3 should ignore public ACLs for this bucket and + * objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore + * all public ACLs on this bucket and objects in this bucket. + * + * Enabling this setting doesn't affect the persistence of any existing ACLs and + * doesn't prevent new public ACLs from being set. + */ + ignorePublicAcls?: boolean + /** + * Specifies whether Amazon S3 should restrict public bucket policies for this + * bucket. Setting this element to TRUE restricts access to this bucket to only + * Amazon Web Servicesservice principals and authorized users within this account + * if the bucket has a public policy. + * + * Enabling this setting doesn't affect previously stored bucket policies, except + * that public and cross-account access within any public bucket policy, including + * non-public delegation to specific accounts, is blocked. + */ + restrictPublicBuckets?: boolean + } + /** + * Specifies the configuration for publishing messages to an Amazon Simple Queue + * Service (Amazon SQS) queue when Amazon S3 detects specified events. + */ + type _subtAqlQ = noSmithyDocumentSerde + interface QueueConfiguration extends _subtAqlQ { + /** + * A collection of bucket events for which to send notifications + * + * This member is required. + */ + events: Array + /** + * The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + * publishes a message when it detects events of the specified type. + * + * This member is required. + */ + queueArn?: string + /** + * Specifies object key name filtering rules. For information about key name + * filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + * + * [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html + */ + filter?: NotificationConfigurationFilter + /** + * An optional unique identifier for configurations in a notification + * configuration. If you don't provide one, Amazon S3 will assign an ID. + */ + id?: string + } + /** + * Specifies the redirect behavior of all requests to a website endpoint of an + * Amazon S3 bucket. + */ + type _subTXThv = noSmithyDocumentSerde + interface RedirectAllRequestsTo extends _subTXThv { + /** + * Name of the host where requests are redirected. + * + * This member is required. + */ + hostName?: string + /** + * Protocol to use when redirecting requests. The default is the protocol that is + * used in the original request. + */ + protocol: Protocol + } + /** + * A container for replication rules. You can add up to 1,000 rules. The maximum + * size of a replication configuration is 2 MB. + */ + type _subiVRed = noSmithyDocumentSerde + interface ReplicationConfiguration extends _subiVRed { + /** + * The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role + * that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in + * the Amazon S3 User Guide. + * + * [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html + * + * This member is required. + */ + role?: string + /** + * A container for one or more replication rules. A replication configuration must + * have at least one rule and can contain a maximum of 1,000 rules. + * + * This member is required. + */ + rules: Array + } + /** + * Container for Payer. + */ + type _subsnwVf = noSmithyDocumentSerde + interface RequestPaymentConfiguration extends _subsnwVf { + /** + * Specifies who pays for the download and request fees. + * + * This member is required. + */ + payer: Payer + } + /** + * Container for specifying if periodic QueryProgress messages should be sent. + */ + type _subefJEa = noSmithyDocumentSerde + interface RequestProgress extends _subefJEa { + /** + * Specifies whether periodic QueryProgress frames should be sent. Valid values: + * TRUE, FALSE. Default value: FALSE. + */ + enabled?: boolean + } + /** + * Container for restore job parameters. + */ + type _subiBseV = noSmithyDocumentSerde + interface RestoreRequest extends _subiBseV { + /** + * Lifetime of the active copy in days. Do not use with restores that specify + * OutputLocation . + * + * The Days element is required for regular restores, and must not be provided for + * select requests. + */ + days?: number + /** + * The optional description for the job. + */ + description?: string + /** + * S3 Glacier related parameters pertaining to this job. Do not use with restores + * that specify OutputLocation . + */ + glacierJobParameters?: GlacierJobParameters + /** + * Describes the location where the restore job's output is stored. + */ + outputLocation?: OutputLocation + /** + * Amazon S3 Select is no longer available to new customers. Existing customers of + * Amazon S3 Select can continue to use the feature as usual. [Learn more] + * + * Describes the parameters for Select job types. + * + * [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + */ + selectParameters?: SelectParameters + /** + * Retrieval tier at which the restore will be processed. + */ + tier: Tier + /** + * Amazon S3 Select is no longer available to new customers. Existing customers of + * Amazon S3 Select can continue to use the feature as usual. [Learn more] + * + * Type of restore request. + * + * [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + */ + type: RestoreRequestType + } + /** + * Specifies the redirect behavior and when a redirect is applied. For more + * information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide. + * + * [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects + */ + type _submoxtI = noSmithyDocumentSerde + interface RoutingRule extends _submoxtI { + /** + * Container for redirect information. You can redirect requests to another host, + * to another page, or with another protocol. In the event of an error, you can + * specify a different error code to return. + * + * This member is required. + */ + redirect?: Redirect + /** + * A container for describing a condition that must be met for the specified + * redirect to apply. For example, 1. If request is for pages in the /docs folder, + * redirect to the /documents folder. 2. If request results in HTTP error 4xx, + * redirect request to another host where you might process the error. + */ + condition?: Condition + } + /** + * Specifies the byte range of the object to get the records from. A record is + * processed when its first byte is contained by the range. This parameter is + * optional, but when specified, it must not be empty. See RFC 2616, Section + * 14.35.1 about how to specify the start and end of the range. + */ + type _subRivjC = noSmithyDocumentSerde + interface ScanRange extends _subRivjC { + /** + * Specifies the end of the byte range. This parameter is optional. Valid values: + * non-negative integers. The default value is one less than the size of the object + * being queried. If only the End parameter is supplied, it is interpreted to mean + * scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. + */ + end?: number + /** + * Specifies the start of the byte range. This parameter is optional. Valid + * values: non-negative integers. The default value is 0. If only start is + * supplied, it means scan from that point to the end of the file. For example, 50 + * means scan from byte 50 until the end of the file. + */ + start?: number + } + /** + * Specifies the default server-side-encryption configuration. + */ + type _subVPqsj = noSmithyDocumentSerde + interface ServerSideEncryptionConfiguration extends _subVPqsj { + /** + * Container for information about a particular server-side encryption + * configuration rule. + * + * This member is required. + */ + rules: Array + } + /** + * The established temporary security credentials of the session. + * + * Directory buckets - These session credentials are only supported for the + * authentication and authorization of Zonal endpoint API operations on directory + * buckets. + */ + type _subCZMxI = noSmithyDocumentSerde + interface SessionCredentials extends _subCZMxI { + /** + * A unique identifier that's associated with a secret access key. The access key + * ID and the secret access key are used together to sign programmatic Amazon Web + * Services requests cryptographically. + * + * This member is required. + */ + accessKeyId?: string + /** + * Temporary security credentials expire after a specified interval. After + * temporary credentials expire, any calls that you make with those credentials + * will fail. So you must generate a new set of temporary credentials. Temporary + * credentials cannot be extended or refreshed beyond the original specified + * interval. + * + * This member is required. + */ + expiration?: time.Time + /** + * A key that's used with the access key ID to cryptographically sign programmatic + * Amazon Web Services requests. Signing a request identifies the sender and + * prevents the request from being altered. + * + * This member is required. + */ + secretAccessKey?: string + /** + * A part of the temporary security credentials. The session token is used to + * validate the temporary security credentials. + * + * This member is required. + */ + sessionToken?: string + } + /** + * A container of a key value name pair. + */ + type _sublEKMb = noSmithyDocumentSerde + interface Tag extends _sublEKMb { + /** + * Name of the object key. + * + * This member is required. + */ + key?: string + /** + * Value of the tag. + * + * This member is required. + */ + value?: string + } + /** + * Container for TagSet elements. + */ + type _subDFXKG = noSmithyDocumentSerde + interface Tagging extends _subDFXKG { + /** + * A collection for a set of tags + * + * This member is required. + */ + tagSet: Array + } + /** + * A container for specifying the configuration for publication of messages to an + * Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects + * specified events. + */ + type _subBtArW = noSmithyDocumentSerde + interface TopicConfiguration extends _subBtArW { + /** + * The Amazon S3 bucket event about which to send notifications. For more + * information, see [Supported Event Types]in the Amazon S3 User Guide. + * + * [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + * + * This member is required. + */ + events: Array + /** + * The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + * publishes a message when it detects events of the specified type. + * + * This member is required. + */ + topicArn?: string + /** + * Specifies object key name filtering rules. For information about key name + * filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + * + * [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html + */ + filter?: NotificationConfigurationFilter + /** + * An optional unique identifier for configurations in a notification + * configuration. If you don't provide one, Amazon S3 will assign an ID. + */ + id?: string + } + /** + * Describes the versioning state of an Amazon S3 bucket. For more information, + * see [PUT Bucket versioning]in the Amazon S3 API Reference. + * + * [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html + */ + type _subVyFBK = noSmithyDocumentSerde + interface VersioningConfiguration extends _subVyFBK { + /** + * Specifies whether MFA delete is enabled in the bucket versioning configuration. + * This element is only returned if the bucket has been configured with MFA delete. + * If the bucket has never been so configured, this element is not returned. + */ + mfaDelete: MFADelete + /** + * The versioning state of the bucket. + */ + status: BucketVersioningStatus + } + /** + * Specifies website configuration parameters for an Amazon S3 bucket. + */ + type _submmRKx = noSmithyDocumentSerde + interface WebsiteConfiguration extends _submmRKx { + /** + * The name of the error document for the website. + */ + errorDocument?: ErrorDocument + /** + * The name of the index document for the website. + */ + indexDocument?: IndexDocument + /** + * The redirect behavior for every request to this bucket's website endpoint. + * + * If you specify this property, you can't specify any other property. + */ + redirectAllRequestsTo?: RedirectAllRequestsTo + /** + * Rules that define when a redirect is applied and the redirect behavior. + */ + routingRules: Array + } +} + /** * Package echo implements high performance, minimalist Go web framework. * @@ -18647,17 +39630,366 @@ namespace echo { } } -namespace search { +/** + * Package bearer provides middleware and utilities for authenticating API + * operation calls with a Bearer Token. + */ +namespace bearer { + // @ts-ignore + import smithyhttp = http /** - * Result defines the returned search result structure. + * Token provides a type wrapping a bearer token and expiration metadata. */ - interface Result { - page: number - perPage: number - totalItems: number - totalPages: number - items: any + interface Token { + value: string + canExpire: boolean + expires: time.Time } + interface Token { + /** + * Expired returns if the token's Expires time is before or equal to the time + * provided. If CanExpires is false, Expired will always return false. + */ + expired(now: time.Time): boolean + } + // @ts-ignore + import smithycontext = context +} + +/** + * Package aws provides the core SDK's utilities and shared types. Use this package's + * utilities to simplify setting and reading API operations parameters. + * + * # Value and Pointer Conversion Utilities + * + * This package includes a helper conversion utility for each scalar type the SDK's + * API use. These utilities make getting a pointer of the scalar, and dereferencing + * a pointer easier. + * + * Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. + * The Pointer to value will safely dereference the pointer and return its value. + * If the pointer was nil, the scalar's zero value will be returned. + * + * The value to pointer functions will be named after the scalar type. So get a + * *string from a string value use the "String" function. This makes it easy to + * to get pointer of a literal string value, because getting the address of a + * literal requires assigning the value to a variable first. + * + * ``` + * var strPtr *string + * + * // Without the SDK's conversion functions + * str := "my string" + * strPtr = &str + * + * // With the SDK's conversion functions + * strPtr = aws.String("my string") + * + * // Convert *string to string value + * str = aws.ToString(strPtr) + * ``` + * + * In addition to scalars the aws package also includes conversion utilities for + * map and slice for commonly types used in API parameters. The map and slice + * conversion functions use similar naming pattern as the scalar conversion + * functions. + * + * ``` + * var strPtrs []*string + * var strs []string = []string{"Go", "Gophers", "Go"} + * + * // Convert []string to []*string + * strPtrs = aws.StringSlice(strs) + * + * // Convert []*string to []string + * strs = aws.ToStringSlice(strPtrs) + * ``` + * + * # SDK Default HTTP Client + * + * The SDK will use the http.DefaultClient if a HTTP client is not provided to + * the SDK's Session, or service client constructor. This means that if the + * http.DefaultClient is modified by other components of your application the + * modifications will be picked up by the SDK as well. + * + * In some cases this might be intended, but it is a better practice to create + * a custom HTTP Client to share explicitly through your application. You can + * configure the SDK to use the custom HTTP Client by setting the HTTPClient + * value of the SDK's Config type when creating a Session or service client. + */ +/** + * Package aws provides core functionality for making requests to AWS services. + */ +namespace aws { + // @ts-ignore + import smithybearer = bearer + // @ts-ignore + import sdkrand = rand + /** + * A Credentials is the AWS credentials value for individual credential fields. + */ + interface Credentials { + /** + * AWS Access key ID + */ + accessKeyID: string + /** + * AWS Secret Access Key + */ + secretAccessKey: string + /** + * AWS Session Token + */ + sessionToken: string + /** + * Source of the credentials + */ + source: string + /** + * States if the credentials can expire or not. + */ + canExpire: boolean + /** + * The time the credentials will expire at. Should be ignored if CanExpire + * is false. + */ + expires: time.Time + /** + * The ID of the account for the credentials. + */ + accountID: string + } + interface Credentials { + /** + * Expired returns if the credentials have expired. + */ + expired(): boolean + } + interface Credentials { + /** + * HasKeys returns if the credentials keys are set. + */ + hasKeys(): boolean + } + /** + * Endpoint represents the endpoint a service client should make API operation + * calls to. + * + * The SDK will automatically resolve these endpoints per API client using an + * internal endpoint resolvers. If you'd like to provide custom endpoint + * resolving behavior you can implement the EndpointResolver interface. + * + * Deprecated: This structure was used with the global [EndpointResolver] + * interface, which has been deprecated in favor of service-specific endpoint + * resolution. See the deprecation docs on that interface for more information. + */ + interface Endpoint { + /** + * The base URL endpoint the SDK API clients will use to make API calls to. + * The SDK will suffix URI path and query elements to this endpoint. + */ + url: string + /** + * Specifies if the endpoint's hostname can be modified by the SDK's API + * client. + * + * If the hostname is mutable the SDK API clients may modify any part of + * the hostname based on the requirements of the API, (e.g. adding, or + * removing content in the hostname). Such as, Amazon S3 API client + * prefixing "bucketname" to the hostname, or changing the + * hostname service name component from "s3." to "s3-accesspoint.dualstack." + * for the dualstack endpoint of an S3 Accesspoint resource. + * + * Care should be taken when providing a custom endpoint for an API. If the + * endpoint hostname is mutable, and the client cannot modify the endpoint + * correctly, the operation call will most likely fail, or have undefined + * behavior. + * + * If hostname is immutable, the SDK API clients will not modify the + * hostname of the URL. This may cause the API client not to function + * correctly if the API requires the operation specific hostname values + * to be used by the client. + * + * This flag does not modify the API client's behavior if this endpoint + * will be used instead of Endpoint Discovery, or if the endpoint will be + * used to perform Endpoint Discovery. That behavior is configured via the + * API Client's Options. + */ + hostnameImmutable: boolean + /** + * The AWS partition the endpoint belongs to. + */ + partitionID: string + /** + * The service name that should be used for signing the requests to the + * endpoint. + */ + signingName: string + /** + * The region that should be used for signing the request to the endpoint. + */ + signingRegion: string + /** + * The signing method that should be used for signing the requests to the + * endpoint. + */ + signingMethod: string + /** + * The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. + * When providing a custom endpoint, you should set the source as EndpointSourceCustom. + * If source is not provided when providing a custom endpoint, the SDK may not + * perform required host mutations correctly. Source should be used along with + * HostnameImmutable property as per the usage requirement. + */ + source: EndpointSource + } + /** + * ExecutionEnvironmentID is the AWS execution environment runtime identifier. + */ + interface ExecutionEnvironmentID extends String{} +} + +/** + * Package s3 provides the API client, operations, and parameter types for Amazon + * Simple Storage Service. + */ +namespace s3 { + // @ts-ignore + import awsmiddleware = middleware + // @ts-ignore + import awshttp = http + // @ts-ignore + import internalauth = auth + // @ts-ignore + import internalauthsmithy = smithy + // @ts-ignore + import internalConfig = configsources + // @ts-ignore + import internalmiddleware = middleware + // @ts-ignore + import acceptencodingcust = accept_encoding + // @ts-ignore + import internalChecksum = checksum + // @ts-ignore + import presignedurlcust = presigned_url + // @ts-ignore + import s3sharedconfig = config + // @ts-ignore + import s3cust = customizations + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithyauth = auth + // @ts-ignore + import smithydocument = document + // @ts-ignore + import smithyhttp = http + interface noSmithyDocumentSerde extends smithydocument.NoSerde{} + interface HTTPSignerV4 { + [key:string]: any; + signHTTP(ctx: context.Context, credentials: aws.Credentials, r: http.Request, payloadHash: string, service: string, region: string, signingTime: time.Time, ...optFns: ((_arg0: v4.SignerOptions) => void)[]): void + } + // @ts-ignore + import smithyxml = xml + // @ts-ignore + import smithyio = io + // @ts-ignore + import smithytime = time + // @ts-ignore + import smithywaiter = waiter + // @ts-ignore + import smithysync = sync + /** + * SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent operation. + * + * For testing and mocking the event stream this type should be initialized via + * the NewSelectObjectContentEventStream constructor function. Using the functional options + * to pass in nested mock behavior. + */ + interface SelectObjectContentEventStream { + /** + * SelectObjectContentEventStreamReader is the EventStream reader for the + * SelectObjectContentEventStream events. This value is automatically set by the + * SDK when the API call is made Use this member when unit testing your code with + * the SDK to mock out the EventStream Reader. + * + * Must not be nil. + */ + reader: SelectObjectContentEventStreamReader + } + interface SelectObjectContentEventStream { + /** + * Events returns a channel to read events from. + */ + events(): undefined + } + interface SelectObjectContentEventStream { + /** + * Close closes the stream. This will also cause the stream to be closed. + * Close must be called when done using the stream API. Not calling Close + * may result in resource leaks. + * + * Will close the underlying EventStream writer and reader, and no more events can be + * sent or received. + */ + close(): void + } + interface SelectObjectContentEventStream { + /** + * Err returns any error that occurred while reading or writing EventStream Events + * from the service API's response. Returns nil if there were no errors. + */ + err(): void + } + /** + * AuthSchemeResolver returns a set of possible authentication options for an + * operation. + */ + interface AuthSchemeResolver { + [key:string]: any; + resolveAuthSchemes(_arg0: context.Context, _arg1: AuthResolverParameters): Array<(smithyauth.Option | undefined)> + } + // @ts-ignore + import internalcontext = context + // @ts-ignore + import awsxml = xml + // @ts-ignore + import internalendpoints = endpoints + // @ts-ignore + import smithyendpoints = endpoints + /** + * EndpointResolverOptions is the service endpoint resolver options + */ + interface EndpointResolverOptions extends internalendpoints.Options{} + /** + * EndpointResolver interface for resolving service endpoints. + */ + interface EndpointResolver { + [key:string]: any; + resolveEndpoint(region: string, options: EndpointResolverOptions): aws.Endpoint + } + /** + * EndpointResolverV2 provides the interface for resolving service endpoints. + */ + interface EndpointResolverV2 { + [key:string]: any; + /** + * ResolveEndpoint attempts to resolve the endpoint with the provided options, + * returning the endpoint if found. Otherwise an error is returned. + */ + resolveEndpoint(ctx: context.Context, params: EndpointParameters): smithyendpoints.Endpoint + } + /** + * ExpressCredentialsProvider retrieves credentials for operations against the + * S3Express storage class. + */ + interface ExpressCredentialsProvider extends customizations.S3ExpressCredentialsProvider{} + interface HTTPClient { + [key:string]: any; + do(_arg0: http.Request): (http.Response) + } + // @ts-ignore + import v4 = signer } namespace settings { @@ -18680,87 +40012,7 @@ namespace settings { * Resolve replaces the placeholder parameters in the current email * template and returns its components as ready-to-use strings. */ - resolve(appName: string, appUrl: string, token: string): string - } -} - -namespace subscriptions { - /** - * Message defines a client's channel data. - */ - interface Message { - name: string - data: string|Array - } - /** - * Client is an interface for a generic subscription client. - */ - interface Client { - [key:string]: any; - /** - * Id Returns the unique id of the client. - */ - id(): string - /** - * Channel returns the client's communication channel. - */ - channel(): undefined - /** - * Subscriptions returns a shallow copy of the client subscriptions matching the prefixes. - * If no prefix is specified, returns all subscriptions. - */ - subscriptions(...prefixes: string[]): _TygojaDict - /** - * Subscribe subscribes the client to the provided subscriptions list. - * - * Each subscription can also have "options" (json serialized SubscriptionOptions) as query parameter. - * - * Example: - * - * ``` - * Subscribe( - * "subscriptionA", - * `subscriptionB?options={"query":{"a":1},"headers":{"x_token":"abc"}}`, - * ) - * ``` - */ - subscribe(...subs: string[]): void - /** - * Unsubscribe unsubscribes the client from the provided subscriptions list. - */ - unsubscribe(...subs: string[]): void - /** - * HasSubscription checks if the client is subscribed to `sub`. - */ - hasSubscription(sub: string): boolean - /** - * Set stores any value to the client's context. - */ - set(key: string, value: any): void - /** - * Unset removes a single value from the client's context. - */ - unset(key: string): void - /** - * Get retrieves the key value from the client's context. - */ - get(key: string): any - /** - * Discard marks the client as "discarded", meaning that it - * shouldn't be used anymore for sending new messages. - * - * It is safe to call Discard() multiple times. - */ - discard(): void - /** - * IsDiscarded indicates whether the client has been "discarded" - * and should no longer be used. - */ - isDiscarded(): boolean - /** - * Send sends the specified message to the client's channel (if not discarded). - */ - send(m: Message): void + resolve(appName: string, appUrl: string, token: string): [string, string, string] } } @@ -19128,8 +40380,10 @@ namespace subscriptions { * Now computeExpensiveValue will only be called when the line is enabled. * * The built-in handlers acquire a lock before calling [io.Writer.Write] - * to ensure that each record is written in one piece. User-defined - * handlers are responsible for their own locking. + * to ensure that exactly one [Record] is written at a time in its entirety. + * Although each log record has a timestamp, + * the built-in handlers do not use that time to sort the written records. + * User-defined handlers are responsible for their own locking and sorting. * * # Writing a handler * @@ -19301,19 +40555,6 @@ namespace slog { import loginternal = internal } -namespace hook { - /** - * Handler defines a hook handler function. - */ - interface Handler {(e: T): void } - /** - * wrapped local Hook embedded struct to limit the public API surface. - */ - type _subBldse = Hook - interface mainHook extends _subBldse { - } -} - /** * Package core is the backbone of PocketBase. * @@ -19334,6 +40575,279 @@ namespace core { } } +namespace subscriptions { +} + +/** + * Package middleware provides transport agnostic middleware for decorating SDK + * handlers. + * + * The Smithy middleware stack provides ordered behavior to be invoked on an + * underlying handler. The stack is separated into steps that are invoked in a + * static order. A step is a collection of middleware that are injected into a + * ordered list defined by the user. The user may add, insert, swap, and remove a + * step's middleware. When the stack is invoked the step middleware become static, + * and their order cannot be modified. + * + * A stack and its step middleware are **not** safe to modify concurrently. + * + * A stack will use the ordered list of middleware to decorate a underlying + * handler. A handler could be something like an HTTP Client that round trips an + * API operation over HTTP. + * + * Smithy Middleware Stack + * + * A Stack is a collection of middleware that wrap a handler. The stack can be + * broken down into discreet steps. Each step may contain zero or more middleware + * specific to that stack's step. + * + * A Stack Step is a predefined set of middleware that are invoked in a static + * order by the Stack. These steps represent fixed points in the middleware stack + * for organizing specific behavior, such as serialize and build. A Stack Step is + * composed of zero or more middleware that are specific to that step. A step may + * define its own set of input/output parameters the generic input/output + * parameters are cast from. A step calls its middleware recursively, before + * calling the next step in the stack returning the result or error of the step + * middleware decorating the underlying handler. + * + * * Initialize: Prepares the input, and sets any default parameters as needed, + * (e.g. idempotency token, and presigned URLs). + * + * * Serialize: Serializes the prepared input into a data structure that can be + * consumed by the target transport's message, (e.g. REST-JSON serialization). + * + * * Build: Adds additional metadata to the serialized transport message, (e.g. + * HTTP's Content-Length header, or body checksum). Decorations and + * modifications to the message should be copied to all message attempts. + * + * * Finalize: Performs final preparations needed before sending the message. The + * message should already be complete by this stage, and is only alternated to + * meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request + * signing). + * + * * Deserialize: Reacts to the handler's response returned by the recipient of + * the request message. Deserializes the response into a structured type or + * error above stacks can react to. + * + * Adding Middleware to a Stack Step + * + * Middleware can be added to a step front or back, or relative, by name, to an + * existing middleware in that stack. If a middleware does not have a name, a + * unique name will be generated at the middleware and be added to the step. + * + * ``` + * // Create middleware stack + * stack := middleware.NewStack() + * + * // Add middleware to stack steps + * stack.Initialize.Add(paramValidationMiddleware, middleware.After) + * stack.Serialize.Add(marshalOperationFoo, middleware.After) + * stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) + * + * // Invoke middleware on handler. + * resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) + * ``` + */ +namespace middleware { + /** + * RelativePosition provides specifying the relative position of a middleware + * in an ordered group. + */ + interface RelativePosition extends Number{} + /** + * BuildMiddleware provides the interface for middleware specific to the + * serialize step. Delegates to the next BuildHandler for further + * processing. + */ + interface BuildMiddleware { + [key:string]: any; + /** + * Unique ID for the middleware in theBuildStep. The step does not allow + * duplicate IDs. + */ + id(): string + /** + * Invokes the middleware behavior which must delegate to the next handler + * for the middleware chain to continue. The method must return a result or + * error to its caller. + */ + handleBuild(ctx: context.Context, _arg10: BuildInput, next: BuildHandler): [BuildOutput, Metadata] + } + /** + * DeserializeMiddleware provides the interface for middleware specific to the + * serialize step. Delegates to the next DeserializeHandler for further + * processing. + */ + interface DeserializeMiddleware { + [key:string]: any; + /** + * ID returns a unique ID for the middleware in the DeserializeStep. The step does not + * allow duplicate IDs. + */ + id(): string + /** + * HandleDeserialize invokes the middleware behavior which must delegate to the next handler + * for the middleware chain to continue. The method must return a result or + * error to its caller. + */ + handleDeserialize(ctx: context.Context, _arg10: DeserializeInput, next: DeserializeHandler): [DeserializeOutput, Metadata] + } + /** + * FinalizeMiddleware provides the interface for middleware specific to the + * serialize step. Delegates to the next FinalizeHandler for further + * processing. + */ + interface FinalizeMiddleware { + [key:string]: any; + /** + * ID returns a unique ID for the middleware in the FinalizeStep. The step does not + * allow duplicate IDs. + */ + id(): string + /** + * HandleFinalize invokes the middleware behavior which must delegate to the next handler + * for the middleware chain to continue. The method must return a result or + * error to its caller. + */ + handleFinalize(ctx: context.Context, _arg10: FinalizeInput, next: FinalizeHandler): [FinalizeOutput, Metadata] + } + /** + * InitializeMiddleware provides the interface for middleware specific to the + * initialize step. Delegates to the next InitializeHandler for further + * processing. + */ + interface InitializeMiddleware { + [key:string]: any; + /** + * ID returns a unique ID for the middleware in the InitializeStep. The step does not + * allow duplicate IDs. + */ + id(): string + /** + * HandleInitialize invokes the middleware behavior which must delegate to the next handler + * for the middleware chain to continue. The method must return a result or + * error to its caller. + */ + handleInitialize(ctx: context.Context, _arg10: InitializeInput, next: InitializeHandler): [InitializeOutput, Metadata] + } + /** + * SerializeMiddleware provides the interface for middleware specific to the + * serialize step. Delegates to the next SerializeHandler for further + * processing. + */ + interface SerializeMiddleware { + [key:string]: any; + /** + * ID returns a unique ID for the middleware in the SerializeStep. The step does not + * allow duplicate IDs. + */ + id(): string + /** + * HandleSerialize invokes the middleware behavior which must delegate to the next handler + * for the middleware chain to continue. The method must return a result or + * error to its caller. + */ + handleSerialize(ctx: context.Context, _arg10: SerializeInput, next: SerializeHandler): [SerializeOutput, Metadata] + } +} + +/** + * Package auth defines protocol-agnostic authentication types for smithy + * clients. + */ +namespace auth { + /** + * Identity contains information that identifies who the user making the + * request is. + */ + interface Identity { + [key:string]: any; + expiration(): time.Time + } + /** + * IdentityResolverOptions defines the interface through which an entity can be + * queried to retrieve an IdentityResolver for a given auth scheme. + */ + interface IdentityResolverOptions { + [key:string]: any; + getIdentityResolver(schemeID: string): IdentityResolver + } + /** + * Option represents a possible authentication method for an operation. + */ + interface Option { + schemeID: string + identityProperties: smithy.Properties + signerProperties: smithy.Properties + } +} + +/** + * Package metrics defines the metrics APIs used by Smithy clients. + */ +namespace metrics { + /** + * MeterOption applies configuration to a Meter. + */ + interface MeterOption {(o: MeterOptions): void } + /** + * Meter is the entry point for creation of measurement instruments. + */ + interface Meter { + [key:string]: any; + /** + * integer/synchronous + */ + int64Counter(name: string, ...opts: InstrumentOption[]): Int64Counter + int64UpDownCounter(name: string, ...opts: InstrumentOption[]): Int64UpDownCounter + int64Gauge(name: string, ...opts: InstrumentOption[]): Int64Gauge + int64Histogram(name: string, ...opts: InstrumentOption[]): Int64Histogram + /** + * integer/asynchronous + */ + int64AsyncCounter(name: string, callback: Int64Callback, ...opts: InstrumentOption[]): AsyncInstrument + int64AsyncUpDownCounter(name: string, callback: Int64Callback, ...opts: InstrumentOption[]): AsyncInstrument + int64AsyncGauge(name: string, callback: Int64Callback, ...opts: InstrumentOption[]): AsyncInstrument + /** + * floating-point/synchronous + */ + float64Counter(name: string, ...opts: InstrumentOption[]): Float64Counter + float64UpDownCounter(name: string, ...opts: InstrumentOption[]): Float64UpDownCounter + float64Gauge(name: string, ...opts: InstrumentOption[]): Float64Gauge + float64Histogram(name: string, ...opts: InstrumentOption[]): Float64Histogram + /** + * floating-point/asynchronous + */ + float64AsyncCounter(name: string, callback: Float64Callback, ...opts: InstrumentOption[]): AsyncInstrument + float64AsyncUpDownCounter(name: string, callback: Float64Callback, ...opts: InstrumentOption[]): AsyncInstrument + float64AsyncGauge(name: string, callback: Float64Callback, ...opts: InstrumentOption[]): AsyncInstrument + } +} + +/** + * Package tracing defines tracing APIs to be used by Smithy clients. + */ +namespace tracing { + /** + * TracerOption applies configuration to a tracer. + */ + interface TracerOption {(o: TracerOptions): void } + /** + * Tracer is the entry point for creating observed client Spans. + * + * Spans created by tracers propagate by existing on the Context. Consumers of + * the API can use [GetSpan] to pull the active Span from a Context. + * + * Creation of child Spans is implicit through Context persistence. If + * CreateSpan is called with a Context that holds a Span, the result will be a + * child of that Span. + */ + interface Tracer { + [key:string]: any; + startSpan(ctx: context.Context, name: string, ...opts: SpanOption[]): [context.Context, Span] + } +} + /** * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer * object, creating another object (Reader or Writer) that also implements @@ -19600,52 +41114,150 @@ namespace bufio { } /** - * Package mail implements parsing of mail messages. + * Package http provides HTTP client and server implementations. + * + * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: * - * For the most part, this package follows the syntax as specified by RFC 5322 and - * extended by RFC 6532. - * Notable divergences: * ``` - * - Obsolete address formats are not parsed, including addresses with - * embedded route information. - * - The full range of spacing (the CFWS syntax element) is not supported, - * such as breaking addresses across lines. - * - No unicode normalization is performed. - * - The special characters ()[]:;@\, are allowed to appear unquoted in names. - * - A leading From line is permitted, as in mbox format (RFC 4155). + * resp, err := http.Get("http://example.com/") + * ... + * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) + * ... + * resp, err := http.PostForm("http://example.com/form", + * url.Values{"key": {"Value"}, "id": {"123"}}) * ``` + * + * The caller must close the response body when finished with it: + * + * ``` + * resp, err := http.Get("http://example.com/") + * if err != nil { + * // handle error + * } + * defer resp.Body.Close() + * body, err := io.ReadAll(resp.Body) + * // ... + * ``` + * + * # Clients and Transports + * + * For control over HTTP client headers, redirect policy, and other + * settings, create a [Client]: + * + * ``` + * client := &http.Client{ + * CheckRedirect: redirectPolicyFunc, + * } + * + * resp, err := client.Get("http://example.com") + * // ... + * + * req, err := http.NewRequest("GET", "http://example.com", nil) + * // ... + * req.Header.Add("If-None-Match", `W/"wyzzy"`) + * resp, err := client.Do(req) + * // ... + * ``` + * + * For control over proxies, TLS configuration, keep-alives, + * compression, and other settings, create a [Transport]: + * + * ``` + * tr := &http.Transport{ + * MaxIdleConns: 10, + * IdleConnTimeout: 30 * time.Second, + * DisableCompression: true, + * } + * client := &http.Client{Transport: tr} + * resp, err := client.Get("https://example.com") + * ``` + * + * Clients and Transports are safe for concurrent use by multiple + * goroutines and for efficiency should only be created once and re-used. + * + * # Servers + * + * ListenAndServe starts an HTTP server with a given address and handler. + * The handler is usually nil, which means to use [DefaultServeMux]. + * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: + * + * ``` + * http.Handle("/foo", fooHandler) + * + * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { + * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) + * }) + * + * log.Fatal(http.ListenAndServe(":8080", nil)) + * ``` + * + * More control over the server's behavior is available by creating a + * custom Server: + * + * ``` + * s := &http.Server{ + * Addr: ":8080", + * Handler: myHandler, + * ReadTimeout: 10 * time.Second, + * WriteTimeout: 10 * time.Second, + * MaxHeaderBytes: 1 << 20, + * } + * log.Fatal(s.ListenAndServe()) + * ``` + * + * # HTTP/2 + * + * Starting with Go 1.6, the http package has transparent support for the + * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 + * can do so by setting [Transport.TLSNextProto] (for clients) or + * [Server.TLSNextProto] (for servers) to a non-nil, empty + * map. Alternatively, the following GODEBUG settings are + * currently supported: + * + * ``` + * GODEBUG=http2client=0 # disable HTTP/2 client support + * GODEBUG=http2server=0 # disable HTTP/2 server support + * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs + * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps + * ``` + * + * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug + * + * The http package's [Transport] and [Server] both automatically enable + * HTTP/2 support for simple configurations. To enable HTTP/2 for more + * complex configurations, to use lower-level HTTP/2 features, or to use + * a newer version of Go's http2 package, import "golang.org/x/net/http2" + * directly and use its ConfigureTransport and/or ConfigureServer + * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 + * package takes precedence over the net/http package's built-in HTTP/2 + * support. */ -namespace mail { - /** - * Address represents a single mail address. - * An address such as "Barry Gibbs " is represented - * as Address{Name: "Barry Gibbs", Address: "bg@example.com"}. - */ - interface Address { - name: string // Proper name; may be empty. - address: string // user@domain - } - interface Address { - /** - * String formats the address as a valid RFC 5322 address. - * If the address's name contains non-ASCII characters - * the name will be rendered according to RFC 2047. - */ - string(): string - } +namespace http { + // @ts-ignore + import mathrand = rand + // @ts-ignore + import urlpkg = url } /** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. + * Package http provides the HTTP transport client and request/response types + * needed to round trip API operation calls with an service. */ -namespace types { -} - -namespace search { -} - -namespace subscriptions { +namespace http { + // @ts-ignore + import smithy = smithy_go + /** + * Signer defines the interface through which HTTP requests are supplemented + * with an Identity. + */ + interface Signer { + [key:string]: any; + signRequest(_arg0: context.Context, _arg1: Request, _arg2: auth.Identity, _arg3: smithy.Properties): void + } + // @ts-ignore + import iointernal = io + // @ts-ignore + import smithytime = time } /** @@ -20012,8 +41624,10 @@ namespace subscriptions { * Now computeExpensiveValue will only be called when the line is enabled. * * The built-in handlers acquire a lock before calling [io.Writer.Write] - * to ensure that each record is written in one piece. User-defined - * handlers are responsible for their own locking. + * to ensure that exactly one [Record] is written at a time in its entirety. + * Although each log record has a timestamp, + * the built-in handlers do not use that time to sort the written records. + * User-defined handlers are responsible for their own locking and sorting. * * # Writing a handler * @@ -20191,6 +41805,2388 @@ namespace slog { } } +/** + * Package aws provides the core SDK's utilities and shared types. Use this package's + * utilities to simplify setting and reading API operations parameters. + * + * # Value and Pointer Conversion Utilities + * + * This package includes a helper conversion utility for each scalar type the SDK's + * API use. These utilities make getting a pointer of the scalar, and dereferencing + * a pointer easier. + * + * Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. + * The Pointer to value will safely dereference the pointer and return its value. + * If the pointer was nil, the scalar's zero value will be returned. + * + * The value to pointer functions will be named after the scalar type. So get a + * *string from a string value use the "String" function. This makes it easy to + * to get pointer of a literal string value, because getting the address of a + * literal requires assigning the value to a variable first. + * + * ``` + * var strPtr *string + * + * // Without the SDK's conversion functions + * str := "my string" + * strPtr = &str + * + * // With the SDK's conversion functions + * strPtr = aws.String("my string") + * + * // Convert *string to string value + * str = aws.ToString(strPtr) + * ``` + * + * In addition to scalars the aws package also includes conversion utilities for + * map and slice for commonly types used in API parameters. The map and slice + * conversion functions use similar naming pattern as the scalar conversion + * functions. + * + * ``` + * var strPtrs []*string + * var strs []string = []string{"Go", "Gophers", "Go"} + * + * // Convert []string to []*string + * strPtrs = aws.StringSlice(strs) + * + * // Convert []*string to []string + * strs = aws.ToStringSlice(strPtrs) + * ``` + * + * # SDK Default HTTP Client + * + * The SDK will use the http.DefaultClient if a HTTP client is not provided to + * the SDK's Session, or service client constructor. This means that if the + * http.DefaultClient is modified by other components of your application the + * modifications will be picked up by the SDK as well. + * + * In some cases this might be intended, but it is a better practice to create + * a custom HTTP Client to share explicitly through your application. You can + * configure the SDK to use the custom HTTP Client by setting the HTTPClient + * value of the SDK's Config type when creating a Session or service client. + */ +/** + * Package aws provides core functionality for making requests to AWS services. + */ +namespace aws { + // @ts-ignore + import smithybearer = bearer + // @ts-ignore + import sdkrand = rand + /** + * EndpointSource is the endpoint source type. + * + * Deprecated: The global [Endpoint] structure is deprecated. + */ + interface EndpointSource extends Number{} +} + +namespace endpoints { + // @ts-ignore + import endpoints = endpoints + /** + * Options is the endpoint resolver configuration options + */ + interface Options { + /** + * Logger is a logging implementation that log events should be sent to. + */ + logger: logging.Logger + /** + * LogDeprecated indicates that deprecated endpoints should be logged to the + * provided logger. + */ + logDeprecated: boolean + /** + * ResolvedRegion is used to override the region to be resolved, rather then the + * using the value passed to the ResolveEndpoint method. This value is used by the + * SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + * name. You must not set this value directly in your application. + */ + resolvedRegion: string + /** + * DisableHTTPS informs the resolver to return an endpoint that does not use the + * HTTPS scheme. + */ + disableHTTPS: boolean + /** + * UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + */ + useDualStackEndpoint: aws.DualStackEndpointState + /** + * UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + */ + useFIPSEndpoint: aws.FIPSEndpointState + } + interface Options { + getResolvedRegion(): string + } + interface Options { + getDisableHTTPS(): boolean + } + interface Options { + getUseDualStackEndpoint(): aws.DualStackEndpointState + } + interface Options { + getUseFIPSEndpoint(): aws.FIPSEndpointState + } +} + +/** + * Package v4 implements the AWS signature version 4 algorithm (commonly known + * as SigV4). + * + * For more information about SigV4, see [Signing AWS API requests] in the IAM + * user guide. + * + * While this implementation CAN work in an external context, it is developed + * primarily for SDK use and you may encounter fringe behaviors around header + * canonicalization. + * + * # Pre-escaping a request URI + * + * AWS v4 signature validation requires that the canonical string's URI path + * component must be the escaped form of the HTTP request's path. + * + * The Go HTTP client will perform escaping automatically on the HTTP request. + * This may cause signature validation errors because the request differs from + * the URI path or query from which the signature was generated. + * + * Because of this, we recommend that you explicitly escape the request when + * using this signer outside of the SDK to prevent possible signature mismatch. + * This can be done by setting URL.Opaque on the request. The signer will + * prefer that value, falling back to the return of URL.EscapedPath if unset. + * + * When setting URL.Opaque you must do so in the form of: + * + * ``` + * "///" + * + * // e.g. + * "//example.com/some/path" + * ``` + * + * The leading "//" and hostname are required or the escaping will not work + * correctly. + * + * The TestStandaloneSign unit test provides a complete example of using the + * signer outside of the SDK and pre-escaping the URI path. + * + * [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html + */ +namespace signer { + // @ts-ignore + import awsmiddleware = middleware + // @ts-ignore + import v4Internal = internal + // @ts-ignore + import internalauth = auth + // @ts-ignore + import smithyhttp = http + // @ts-ignore + import smithyHTTP = http + /** + * SignerOptions is the SigV4 Signer options. + */ + interface SignerOptions { + /** + * Disables the Signer's moving HTTP header key/value pairs from the HTTP + * request header to the request's query string. This is most commonly used + * with pre-signed requests preventing headers from being added to the + * request's query string. + */ + disableHeaderHoisting: boolean + /** + * Disables the automatic escaping of the URI path of the request for the + * siganture's canonical string's path. For services that do not need additional + * escaping then use this to disable the signer escaping the path. + * + * S3 is an example of a service that does not need additional escaping. + * + * http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + */ + disableURIPathEscaping: boolean + /** + * The logger to send log messages to. + */ + logger: logging.Logger + /** + * Enable logging of signed requests. + * This will enable logging of the canonical request, the string to sign, and for presigning the subsequent + * presigned URL. + */ + logSigning: boolean + /** + * Disables setting the session token on the request as part of signing + * through X-Amz-Security-Token. This is needed for variations of v4 that + * present the token elsewhere. + */ + disableSessionToken: boolean + } +} + +/** + * Package document provides interface definitions and error types for document types. + * + * A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send + * UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 + * strings to these values. + * + * API Clients expose document constructors in their respective client document packages which must be used to + * Marshal and Unmarshal Go types to and from their respective protocol representations. + * + * See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from + * document types. + */ +namespace document { + /** + * NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled + * into a protocol document. + */ + interface NoSerde { + } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { +} + +namespace types { + interface CompressionType extends String{} + interface CompressionType { + /** + * Values returns all known values for CompressionType. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface Event extends String{} + interface Event { + /** + * Values returns all known values for Event. Note that this can be expanded in + * the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ExpirationStatus extends String{} + interface ExpirationStatus { + /** + * Values returns all known values for ExpirationStatus. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface IntelligentTieringStatus extends String{} + interface IntelligentTieringStatus { + /** + * Values returns all known values for IntelligentTieringStatus. Note that this + * can be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface InventoryIncludedObjectVersions extends String{} + interface InventoryIncludedObjectVersions { + /** + * Values returns all known values for InventoryIncludedObjectVersions. Note that + * this can be expanded in the future, and so it is only as up to date as the + * client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface InventoryOptionalField extends String{} + interface InventoryOptionalField { + /** + * Values returns all known values for InventoryOptionalField. Note that this can + * be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface MFADelete extends String{} + interface MFADelete { + /** + * Values returns all known values for MFADelete. Note that this can be expanded + * in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectLockEnabled extends String{} + interface ObjectLockEnabled { + /** + * Values returns all known values for ObjectLockEnabled. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectLockRetentionMode extends String{} + interface ObjectLockRetentionMode { + /** + * Values returns all known values for ObjectLockRetentionMode. Note that this can + * be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectStorageClass extends String{} + interface ObjectStorageClass { + /** + * Values returns all known values for ObjectStorageClass. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ObjectVersionStorageClass extends String{} + interface ObjectVersionStorageClass { + /** + * Values returns all known values for ObjectVersionStorageClass. Note that this + * can be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface Permission extends String{} + interface Permission { + /** + * Values returns all known values for Permission. Note that this can be expanded + * in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface Protocol extends String{} + interface Protocol { + /** + * Values returns all known values for Protocol. Note that this can be expanded in + * the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface RestoreRequestType extends String{} + interface RestoreRequestType { + /** + * Values returns all known values for RestoreRequestType. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface Tier extends String{} + interface Tier { + /** + * Values returns all known values for Tier. Note that this can be expanded in the + * future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithydocument = document + /** + * Specifies the days since the initiation of an incomplete multipart upload that + * Amazon S3 will wait before permanently removing all parts of the upload. For + * more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + * + * [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + */ + type _subTtsCE = noSmithyDocumentSerde + interface AbortIncompleteMultipartUpload extends _subTtsCE { + /** + * Specifies the number of days after which Amazon S3 aborts an incomplete + * multipart upload. + */ + daysAfterInitiation?: number + } + /** + * The filter used to describe a set of objects for analyses. A filter must have + * exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no + * filter is provided, all objects will be considered in any analysis. + * + * The following types satisfy this interface: + * + * ``` + * AnalyticsFilterMemberAnd + * AnalyticsFilterMemberPrefix + * AnalyticsFilterMemberTag + * ``` + */ + interface AnalyticsFilter { + [key:string]: any; + } + /** + * Specifies the information about the bucket that will be created. For more + * information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. + * + * This functionality is only supported by directory buckets. + * + * [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html + */ + type _suboDMoj = noSmithyDocumentSerde + interface BucketInfo extends _suboDMoj { + /** + * The number of Availability Zone that's used for redundancy for the bucket. + */ + dataRedundancy: DataRedundancy + /** + * The type of bucket. + */ + type: BucketType + } + /** + * Details of the parts that were uploaded. + */ + type _subThMsH = noSmithyDocumentSerde + interface CompletedPart extends _subThMsH { + /** + * The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + /** + * Entity tag returned when the part was uploaded. + */ + eTag?: string + /** + * Part number that identifies the part. This is a positive integer between 1 and + * 10,000. + * + * ``` + * - General purpose buckets - In CompleteMultipartUpload , when a additional + * checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , + * x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the + * PartNumber must start at 1 and the part numbers must be consecutive. + * Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an + * InvalidPartOrder error code. + * + * - Directory buckets - In CompleteMultipartUpload , the PartNumber must start + * at 1 and the part numbers must be consecutive. + * ``` + */ + partNumber?: number + } + /** + * A container for describing a condition that must be met for the specified + * redirect to apply. For example, 1. If request is for pages in the /docs folder, + * redirect to the /documents folder. 2. If request results in HTTP error 4xx, + * redirect request to another host where you might process the error. + */ + type _subslzCU = noSmithyDocumentSerde + interface Condition extends _subslzCU { + /** + * The HTTP error code when the redirect is applied. In the event of an error, if + * the error code equals this value, then the specified redirect is applied. + * Required when parent element Condition is specified and sibling KeyPrefixEquals + * is not specified. If both are specified, then both must be true for the redirect + * to be applied. + */ + httpErrorCodeReturnedEquals?: string + /** + * The object key name prefix when the redirect is applied. For example, to + * redirect requests for ExamplePage.html , the key prefix will be ExamplePage.html + * . To redirect request for all pages with the prefix docs/ , the key prefix will + * be /docs , which identifies all objects in the docs/ folder. Required when the + * parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + * is not specified. If both conditions are specified, both must be true for the + * redirect to be applied. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + */ + keyPrefixEquals?: string + } + /** + * Describes how an uncompressed comma-separated values (CSV)-formatted input + * object is formatted. + */ + type _subpoDVL = noSmithyDocumentSerde + interface CSVInput extends _subpoDVL { + /** + * Specifies that CSV field values may contain quoted record delimiters and such + * records should be allowed. Default value is FALSE. Setting this value to TRUE + * may lower performance. + */ + allowQuotedRecordDelimiter?: boolean + /** + * A single character used to indicate that a row should be ignored when the + * character is present at the start of that row. You can specify any character to + * indicate a comment line. The default character is # . + * + * Default: # + */ + comments?: string + /** + * A single character used to separate individual fields in a record. You can + * specify an arbitrary delimiter. + */ + fieldDelimiter?: string + /** + * Describes the first line of input. Valid values are: + * + * ``` + * - NONE : First line is not a header. + * + * - IGNORE : First line is a header, but you can't use the header values to + * indicate the column in an expression. You can use column position (such as _1, + * _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). + * + * - Use : First line is a header, and you can use the header value to identify a + * column in an expression ( SELECT "name" FROM OBJECT ). + * ``` + */ + fileHeaderInfo: FileHeaderInfo + /** + * A single character used for escaping when the field delimiter is part of the + * value. For example, if the value is a, b , Amazon S3 wraps this field value in + * quotation marks, as follows: " a , b " . + * + * Type: String + * + * Default: " + * + * Ancestors: CSV + */ + quoteCharacter?: string + /** + * A single character used for escaping the quotation mark character inside an + * already escaped value. For example, the value """ a , b """ is parsed as " a , + * b " . + */ + quoteEscapeCharacter?: string + /** + * A single character used to separate individual records in the input. Instead of + * the default value, you can specify an arbitrary delimiter. + */ + recordDelimiter?: string + } + /** + * Describes how uncompressed comma-separated values (CSV)-formatted results are + * formatted. + */ + type _subADOJI = noSmithyDocumentSerde + interface CSVOutput extends _subADOJI { + /** + * The value used to separate individual fields in a record. You can specify an + * arbitrary delimiter. + */ + fieldDelimiter?: string + /** + * A single character used for escaping when the field delimiter is part of the + * value. For example, if the value is a, b , Amazon S3 wraps this field value in + * quotation marks, as follows: " a , b " . + */ + quoteCharacter?: string + /** + * The single character used for escaping the quote character inside an already + * escaped value. + */ + quoteEscapeCharacter?: string + /** + * Indicates whether to use quotation marks around output fields. + * + * ``` + * - ALWAYS : Always use quotation marks for output fields. + * + * - ASNEEDED : Use quotation marks for output fields when needed. + * ``` + */ + quoteFields: QuoteFields + /** + * A single character used to separate individual records in the output. Instead + * of the default value, you can specify an arbitrary delimiter. + */ + recordDelimiter?: string + } + /** + * Container for S3 Glacier job parameters. + */ + type _subdFnGf = noSmithyDocumentSerde + interface GlacierJobParameters extends _subdFnGf { + /** + * Retrieval tier at which the restore will be processed. + * + * This member is required. + */ + tier: Tier + } + /** + * Container for the person being granted permissions. + */ + type _subWGHQu = noSmithyDocumentSerde + interface Grantee extends _subWGHQu { + /** + * Type of grantee + * + * This member is required. + */ + type: Type + /** + * Screen name of the grantee. + */ + displayName?: string + /** + * Email address of the grantee. + * + * Using email addresses to specify a grantee is only supported in the following + * Amazon Web Services Regions: + * + * ``` + * - US East (N. Virginia) + * + * - US West (N. California) + * + * - US West (Oregon) + * + * - Asia Pacific (Singapore) + * + * - Asia Pacific (Sydney) + * + * - Asia Pacific (Tokyo) + * + * - Europe (Ireland) + * + * - South America (São Paulo) + * ``` + * + * For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the + * Amazon Web Services General Reference. + * + * [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + */ + emailAddress?: string + /** + * The canonical user ID of the grantee. + */ + id?: string + /** + * URI of the grantee group. + */ + uri?: string + } + /** + * The Filter is used to identify objects that the S3 Intelligent-Tiering + * configuration applies to. + */ + type _subjMqFn = noSmithyDocumentSerde + interface IntelligentTieringFilter extends _subjMqFn { + /** + * A conjunction (logical AND) of predicates, which is used in evaluating a + * metrics filter. The operator must have at least two predicates, and an object + * must match all of the predicates in order for the filter to apply. + */ + and?: IntelligentTieringAndOperator + /** + * An object key name prefix that identifies the subset of objects to which the + * rule applies. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + */ + prefix?: string + /** + * A container of a key value name pair. + */ + tag?: Tag + } + /** + * Specifies the inventory configuration for an Amazon S3 bucket. + */ + type _subNhQFG = noSmithyDocumentSerde + interface InventoryDestination extends _subNhQFG { + /** + * Contains the bucket name, file format, bucket owner (optional), and prefix + * (optional) where inventory results are published. + * + * This member is required. + */ + s3BucketDestination?: InventoryS3BucketDestination + } + /** + * Specifies an inventory filter. The inventory only includes objects that meet + * the filter's criteria. + */ + type _subaXFzR = noSmithyDocumentSerde + interface InventoryFilter extends _subaXFzR { + /** + * The prefix that an object must have to be included in the inventory results. + * + * This member is required. + */ + prefix?: string + } + /** + * Specifies the schedule for generating inventory results. + */ + type _subApprQ = noSmithyDocumentSerde + interface InventorySchedule extends _subApprQ { + /** + * Specifies how frequently inventory results are produced. + * + * This member is required. + */ + frequency: InventoryFrequency + } + /** + * Specifies JSON as object's input serialization format. + */ + type _subNlzgt = noSmithyDocumentSerde + interface JSONInput extends _subNlzgt { + /** + * The type of JSON. Valid values: Document, Lines. + */ + type: JSONType + } + /** + * Specifies JSON as request's output serialization format. + */ + type _subQMtYm = noSmithyDocumentSerde + interface JSONOutput extends _subQMtYm { + /** + * The value used to separate individual records in the output. If no value is + * specified, Amazon S3 uses a newline character ('\n'). + */ + recordDelimiter?: string + } + /** + * Container for the expiration for the lifecycle of the object. + * + * For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. + * + * [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html + */ + type _subvVmSu = noSmithyDocumentSerde + interface LifecycleExpiration extends _subvVmSu { + /** + * Indicates at what date the object is to be moved or deleted. The date value + * must conform to the ISO 8601 format. The time is always midnight UTC. + */ + date?: time.Time + /** + * Indicates the lifetime, in days, of the objects that are subject to the rule. + * The value must be a non-zero positive integer. + */ + days?: number + /** + * Indicates whether Amazon S3 will remove a delete marker with no noncurrent + * versions. If set to true, the delete marker will be expired; if set to false the + * policy takes no action. This cannot be specified with Days or Date in a + * Lifecycle Expiration Policy. + */ + expiredObjectDeleteMarker?: boolean + } + /** + * The Filter is used to identify objects that a Lifecycle Rule applies to. A + * Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , + * ObjectSizeLessThan , or And specified. If the Filter element is left empty, the + * Lifecycle Rule applies to all objects in the bucket. + * + * The following types satisfy this interface: + * + * ``` + * LifecycleRuleFilterMemberAnd + * LifecycleRuleFilterMemberObjectSizeGreaterThan + * LifecycleRuleFilterMemberObjectSizeLessThan + * LifecycleRuleFilterMemberPrefix + * LifecycleRuleFilterMemberTag + * ``` + */ + interface LifecycleRuleFilter { + [key:string]: any; + } + /** + * Specifies the location where the bucket will be created. + * + * For directory buckets, the location type is Availability Zone. For more + * information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. + * + * This functionality is only supported by directory buckets. + * + * [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html + */ + type _subGUAJG = noSmithyDocumentSerde + interface LocationInfo extends _subGUAJG { + /** + * The name of the location where the bucket will be created. + * + * For directory buckets, the name of the location is the AZ ID of the + * Availability Zone where the bucket will be created. An example AZ ID value is + * usw2-az1 . + */ + name?: string + /** + * The type of location where the bucket will be created. + */ + type: LocationType + } + /** + * Specifies a metrics configuration filter. The metrics configuration only + * includes objects that meet the filter's criteria. A filter must be a prefix, an + * object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more + * information, see [PutBucketMetricsConfiguration]. + * + * The following types satisfy this interface: + * + * ``` + * MetricsFilterMemberAccessPointArn + * MetricsFilterMemberAnd + * MetricsFilterMemberPrefix + * MetricsFilterMemberTag + * ``` + * + * [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + */ + interface MetricsFilter { + [key:string]: any; + } + /** + * Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 + * permanently deletes the noncurrent object versions. You set this lifecycle + * configuration action on a bucket that has versioning enabled (or suspended) to + * request that Amazon S3 delete noncurrent object versions at a specific period in + * the object's lifetime. + */ + type _subPKzco = noSmithyDocumentSerde + interface NoncurrentVersionExpiration extends _subPKzco { + /** + * Specifies how many noncurrent versions Amazon S3 will retain. You can specify + * up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any + * additional noncurrent versions beyond the specified number to retain. For more + * information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + * + * [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html + */ + newerNoncurrentVersions?: number + /** + * Specifies the number of days an object is noncurrent before Amazon S3 can + * perform the associated action. The value must be a non-zero positive integer. + * For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3 + * User Guide. + * + * [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations + */ + noncurrentDays?: number + } + /** + * Container for the transition rule that describes when noncurrent objects + * transition to the STANDARD_IA , ONEZONE_IA , INTELLIGENT_TIERING , GLACIER_IR , + * GLACIER , or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled + * (or versioning is suspended), you can set this action to request that Amazon S3 + * transition noncurrent object versions to the STANDARD_IA , ONEZONE_IA , + * INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE storage class at a + * specific period in the object's lifetime. + */ + type _subDHdHp = noSmithyDocumentSerde + interface NoncurrentVersionTransition extends _subDHdHp { + /** + * Specifies how many noncurrent versions Amazon S3 will retain in the same + * storage class before transitioning objects. You can specify up to 100 noncurrent + * versions to retain. Amazon S3 will transition any additional noncurrent versions + * beyond the specified number to retain. For more information about noncurrent + * versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + * + * [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html + */ + newerNoncurrentVersions?: number + /** + * Specifies the number of days an object is noncurrent before Amazon S3 can + * perform the associated action. For information about the noncurrent days + * calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide. + * + * [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations + */ + noncurrentDays?: number + /** + * The class of storage used to store the object. + */ + storageClass: TransitionStorageClass + } + /** + * Specifies object key name filtering rules. For information about key name + * filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + * + * [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html + */ + type _sublsXwL = noSmithyDocumentSerde + interface NotificationConfigurationFilter extends _sublsXwL { + /** + * A container for object key name prefix and suffix filtering rules. + */ + key?: S3KeyFilter + } + /** + * Object Identifier is unique value to identify objects. + */ + type _subUoIpQ = noSmithyDocumentSerde + interface ObjectIdentifier extends _subUoIpQ { + /** + * Key name of the object. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + * + * This member is required. + */ + key?: string + /** + * Version ID for the specific version of the object to delete. + * + * This functionality is not supported for directory buckets. + */ + versionId?: string + } + /** + * The container element for an Object Lock rule. + */ + type _subBMdqo = noSmithyDocumentSerde + interface ObjectLockRule extends _subBMdqo { + /** + * The default Object Lock retention mode and period that you want to apply to new + * objects placed in the specified bucket. Bucket settings require both a mode and + * a period. The period can be either Days or Years but you must select one. You + * cannot specify Days and Years at the same time. + */ + defaultRetention?: DefaultRetention + } + /** + * A container for elements related to an individual part. + */ + type _subfIADH = noSmithyDocumentSerde + interface ObjectPart extends _subfIADH { + /** + * This header can be used as a data integrity check to verify that the data + * received is the same data that was originally sent. This header specifies the + * base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] + * in the Amazon S3 User Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + */ + checksumCRC32?: string + /** + * The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumCRC32C?: string + /** + * The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + * present if it was uploaded with the object. When you use the API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA1?: string + /** + * The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + * present if it was uploaded with the object. When you use an API operation on an + * object that was uploaded using multipart uploads, this value may not be a direct + * checksum value of the full object. Instead, it's a calculation based on the + * checksum values of each individual part. For more information about how + * checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + * Guide. + * + * [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + */ + checksumSHA256?: string + /** + * The part number identifying the part. This value is a positive integer between + * 1 and 10,000. + */ + partNumber?: number + /** + * The size of the uploaded part in bytes. + */ + size?: number + } + /** + * Describes the location where the restore job's output is stored. + */ + type _subPZwEh = noSmithyDocumentSerde + interface OutputLocation extends _subPZwEh { + /** + * Describes an S3 location that will receive the results of the restore request. + */ + s3?: S3Location + } + /** + * The container element for an ownership control rule. + */ + type _subfpwaA = noSmithyDocumentSerde + interface OwnershipControlsRule extends _subfpwaA { + /** + * The container element for object ownership for a bucket's ownership controls. + * + * BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + * bucket owner if the objects are uploaded with the bucket-owner-full-control + * canned ACL. + * + * ObjectWriter - The uploading account will own the object if the object is + * uploaded with the bucket-owner-full-control canned ACL. + * + * BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + * affect permissions. The bucket owner automatically owns and has full control + * over every object in the bucket. The bucket only accepts PUT requests that don't + * specify an ACL or specify bucket owner full control ACLs (such as the predefined + * bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + * the same permissions). + * + * By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + * disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + * you must control access for each object individually. For more information about + * S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. Directory buckets + * use the bucket owner enforced setting for S3 Object Ownership. + * + * [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + * + * This member is required. + */ + objectOwnership: ObjectOwnership + } + /** + * Container for Parquet. + */ + type _subpytOv = noSmithyDocumentSerde + interface ParquetInput extends _subpytOv { + } + /** + * Specifies how requests are redirected. In the event of an error, you can + * specify a different error code to return. + */ + type _subJWrUl = noSmithyDocumentSerde + interface Redirect extends _subJWrUl { + /** + * The host name to use in the redirect request. + */ + hostName?: string + /** + * The HTTP redirect code to use on the response. Not required if one of the + * siblings is present. + */ + httpRedirectCode?: string + /** + * Protocol to use when redirecting requests. The default is the protocol that is + * used in the original request. + */ + protocol: Protocol + /** + * The object key prefix to use in the redirect request. For example, to redirect + * requests for all pages with prefix docs/ (objects in the docs/ folder) to + * documents/ , you can set a condition block with KeyPrefixEquals set to docs/ + * and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one + * of the siblings is present. Can be present only if ReplaceKeyWith is not + * provided. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + */ + replaceKeyPrefixWith?: string + /** + * The specific object key to use in the redirect request. For example, redirect + * request to error.html . Not required if one of the siblings is present. Can be + * present only if ReplaceKeyPrefixWith is not provided. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + */ + replaceKeyWith?: string + } + /** + * Specifies which Amazon S3 objects to replicate and where to store the replicas. + */ + type _subXbQuq = noSmithyDocumentSerde + interface ReplicationRule extends _subXbQuq { + /** + * A container for information about the replication destination and its + * configurations including enabling the S3 Replication Time Control (S3 RTC). + * + * This member is required. + */ + destination?: Destination + /** + * Specifies whether the rule is enabled. + * + * This member is required. + */ + status: ReplicationRuleStatus + /** + * Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + * in your replication configuration, you must also include a + * DeleteMarkerReplication element. If your Filter includes a Tag element, the + * DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does + * not support replicating delete markers for tag-based rules. For an example + * configuration, see [Basic Rule Configuration]. + * + * For more information about delete marker replication, see [Basic Rule Configuration]. + * + * If you are using an earlier version of the replication configuration, Amazon S3 + * handles replication of delete markers differently. For more information, see [Backward Compatibility]. + * + * [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + * [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations + */ + deleteMarkerReplication?: DeleteMarkerReplication + /** + * Optional configuration to replicate existing source bucket objects. + * + * This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in + * the Amazon S3 User Guide. + * + * [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html + */ + existingObjectReplication?: ExistingObjectReplication + /** + * A filter that identifies the subset of objects to which the replication rule + * applies. A Filter must specify exactly one Prefix , Tag , or an And child + * element. + */ + filter: ReplicationRuleFilter + /** + * A unique identifier for the rule. The maximum value is 255 characters. + */ + id?: string + /** + * An object key name prefix that identifies the object or objects to which the + * rule applies. The maximum prefix length is 1,024 characters. To include all + * objects in a bucket, specify an empty string. + * + * Replacement must be made for object keys containing special characters (such as + * carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + * + * [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + * + * Deprecated: This member has been deprecated. + */ + prefix?: string + /** + * The priority indicates which rule has precedence whenever two or more + * replication rules conflict. Amazon S3 will attempt to replicate objects + * according to all replication rules. However, if there are two or more rules with + * the same destination bucket, then objects will be replicated according to the + * rule with the highest priority. The higher the number, the higher the priority. + * + * For more information, see [Replication] in the Amazon S3 User Guide. + * + * [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + */ + priority?: number + /** + * A container that describes additional filters for identifying the source + * objects that you want to replicate. You can choose to enable or disable the + * replication of these objects. Currently, Amazon S3 supports only the filter that + * you can specify for objects created with server-side encryption using a customer + * managed key stored in Amazon Web Services Key Management Service (SSE-KMS). + */ + sourceSelectionCriteria?: SourceSelectionCriteria + } + /** + * Specifies the restoration status of an object. Objects in certain storage + * classes must be restored before they can be retrieved. For more information + * about these storage classes and how to work with archived objects, see [Working with archived objects]in the + * Amazon S3 User Guide. + * + * This functionality is not supported for directory buckets. Only the S3 Express + * One Zone storage class is supported by directory buckets to store objects. + * + * [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html + */ + type _subFCSIN = noSmithyDocumentSerde + interface RestoreStatus extends _subFCSIN { + /** + * Specifies whether the object is currently being restored. If the object + * restoration is in progress, the header returns the value TRUE . For example: + * + * ``` + * x-amz-optional-object-attributes: IsRestoreInProgress="true" + * ``` + * + * If the object restoration has completed, the header returns the value FALSE . + * For example: + * + * ``` + * x-amz-optional-object-attributes: IsRestoreInProgress="false", + * RestoreExpiryDate="2012-12-21T00:00:00.000Z" + * ``` + * + * If the object hasn't been restored, there is no header response. + */ + isRestoreInProgress?: boolean + /** + * Indicates when the restored copy will expire. This value is populated only if + * the object has already been restored. For example: + * + * ``` + * x-amz-optional-object-attributes: IsRestoreInProgress="false", + * RestoreExpiryDate="2012-12-21T00:00:00.000Z" + * ``` + */ + restoreExpiryDate?: time.Time + } + /** + * Amazon S3 Select is no longer available to new customers. Existing customers of + * Amazon S3 Select can continue to use the feature as usual. [Learn more] + * + * Describes the parameters for Select job types. + * + * Learn [How to optimize querying your data in Amazon S3] using [Amazon Athena], [S3 Object Lambda], or client-side filtering. + * + * [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + * [How to optimize querying your data in Amazon S3]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + * [Amazon Athena]: https://docs.aws.amazon.com/athena/latest/ug/what-is.html + * [S3 Object Lambda]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html + */ + type _subrGrMG = noSmithyDocumentSerde + interface SelectParameters extends _subrGrMG { + /** + * Amazon S3 Select is no longer available to new customers. Existing customers of + * Amazon S3 Select can continue to use the feature as usual. [Learn more] + * + * The expression that is used to query the object. + * + * [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + * + * This member is required. + */ + expression?: string + /** + * The type of the provided expression (for example, SQL). + * + * This member is required. + */ + expressionType: ExpressionType + /** + * Describes the serialization format of the object. + * + * This member is required. + */ + inputSerialization?: InputSerialization + /** + * Describes how the results of the Select job are serialized. + * + * This member is required. + */ + outputSerialization?: OutputSerialization + } + /** + * Specifies the default server-side encryption configuration. + * + * ``` + * - General purpose buckets - If you're specifying a customer managed KMS key, + * we recommend using a fully qualified KMS key ARN. If you use a KMS key alias + * instead, then KMS resolves the key within the requester’s account. This behavior + * can result in data that's encrypted with a KMS key that belongs to the + * requester, and not the bucket owner. + * + * - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory + * bucket, only use the key ID or key ARN. The key alias format of the KMS key + * isn't supported. + * ``` + * + * [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + */ + type _subdvukF = noSmithyDocumentSerde + interface ServerSideEncryptionRule extends _subdvukF { + /** + * Specifies the default server-side encryption to apply to new objects in the + * bucket. If a PUT Object request doesn't specify any server-side encryption, this + * default encryption will be applied. + */ + applyServerSideEncryptionByDefault?: ServerSideEncryptionByDefault + /** + * Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + * encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + * are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 + * to use an S3 Bucket Key. + * + * ``` + * - General purpose buckets - By default, S3 Bucket Key is not enabled. For + * more information, see [Amazon S3 Bucket Keys]in the Amazon S3 User Guide. + * + * - Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + * operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + * supported, when you copy SSE-KMS encrypted objects from general purpose buckets + * to directory buckets, from directory buckets to general purpose buckets, or + * between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + * call to KMS every time a copy request is made for a KMS-encrypted object. + * ``` + * + * [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + * [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + * [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + */ + bucketKeyEnabled?: boolean + } + /** + * Specifies data related to access patterns to be collected and made available to + * analyze the tradeoffs between different storage classes for an Amazon S3 bucket. + */ + type _subsJjAW = noSmithyDocumentSerde + interface StorageClassAnalysis extends _subsJjAW { + /** + * Specifies how data related to the storage class analysis for an Amazon S3 + * bucket should be exported. + */ + dataExport?: StorageClassAnalysisDataExport + } + /** + * Container for granting information. + * + * Buckets that use the bucket owner enforced setting for Object Ownership don't + * support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide. + * + * [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general + */ + type _subZctQi = noSmithyDocumentSerde + interface TargetGrant extends _subZctQi { + /** + * Container for the person being granted permissions. + */ + grantee?: Grantee + /** + * Logging permissions assigned to the grantee for the bucket. + */ + permission: BucketLogsPermission + } + /** + * Amazon S3 key format for log objects. Only one format, PartitionedPrefix or + * SimplePrefix, is allowed. + */ + type _subGqccM = noSmithyDocumentSerde + interface TargetObjectKeyFormat extends _subGqccM { + /** + * Partitioned S3 key for log objects. + */ + partitionedPrefix?: PartitionedPrefix + /** + * To use the simple format for S3 keys for log objects. To specify SimplePrefix + * format, set SimplePrefix to {}. + */ + simplePrefix?: SimplePrefix + } + /** + * The S3 Intelligent-Tiering storage class is designed to optimize storage costs + * by automatically moving data to the most cost-effective storage access tier, + * without additional operational overhead. + */ + type _subpntPW = noSmithyDocumentSerde + interface Tiering extends _subpntPW { + /** + * S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3 + * Intelligent-Tiering storage class. + * + * [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + * + * This member is required. + */ + accessTier: IntelligentTieringAccessTier + /** + * The number of consecutive days of no access after which an object will be + * eligible to be transitioned to the corresponding tier. The minimum number of + * days specified for Archive Access tier must be at least 90 days and Deep Archive + * Access tier must be at least 180 days. The maximum can be up to 2 years (730 + * days). + * + * This member is required. + */ + days?: number + } + /** + * Specifies when an object transitions to a specified storage class. For more + * information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3 + * User Guide. + * + * [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html + */ + type _subTFGzr = noSmithyDocumentSerde + interface Transition extends _subTFGzr { + /** + * Indicates when objects are transitioned to the specified storage class. The + * date value must be in ISO 8601 format. The time is always midnight UTC. + */ + date?: time.Time + /** + * Indicates the number of days after creation when objects are transitioned to + * the specified storage class. The value must be a positive integer. + */ + days?: number + /** + * The storage class to which you want the object to transition. + */ + storageClass: TransitionStorageClass + } + interface noSmithyDocumentSerde extends smithydocument.NoSerde{} +} + +/** + * Package customizations provides customizations for the Amazon S3 API client. + * + * This package provides support for following S3 customizations + * + * ``` + * ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type + * + * UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options + * + * RemoveBucket Middleware: removes a serialized bucket name from request url path + * + * processResponseWith200Error Middleware: Deserializing response error with 200 status code + * ``` + * + * # Virtual Host style url addressing + * + * Since serializers serialize by default as path style url, we use customization + * to modify the endpoint url when `UsePathStyle` option on S3Client is unset or + * false. This flag will be ignored if `UseAccelerate` option is set to true. + * + * If UseAccelerate is not enabled, and the bucket name is not a valid hostname + * label, they SDK will fallback to forcing the request to be made as if + * UsePathStyle was enabled. This behavior is also used if UseDualStackEndpoint is enabled. + * + * https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description + * + * # Transfer acceleration + * + * By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate` + * option on S3Client, one can enable s3 transfer acceleration support. Transfer + * acceleration only works with Virtual Host style addressing, and thus `UsePathStyle` + * option if set is ignored. Transfer acceleration is not supported for S3 operations + * DeleteBucket, ListBuckets, and CreateBucket. + * + * # Dualstack support + * + * By default dualstack support for s3 client is disabled. By enabling `UseDualstack` + * option on s3 client, you can enable dualstack endpoint support. + * + * # Endpoint customizations + * + * Customizations to lookup ARN, process ARN needs to happen before request serialization. + * UpdateEndpoint middleware which mutates resources based on Options such as + * UseDualstack, UseAccelerate for modifying resolved endpoint are executed after + * request serialization. Remove bucket middleware is executed after + * an request is serialized, and removes the serialized bucket name from request path + * + * ``` + * Middleware layering: + * + * Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step + * + * Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware + * ``` + * + * Customization options: + * + * ``` + * UseARNRegion (Disabled by Default) + * + * UsePathStyle (Disabled by Default) + * + * UseAccelerate (Disabled by Default) + * + * UseDualstack (Disabled by Default) + * ``` + * + * # Handle Error response with 200 status code + * + * S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an + * error Response with status code 2xx. The processResponseWith200Error middleware + * customizations enables SDK to check for an error within response body prior to + * deserialization. + * + * As the check for 2xx response containing an error needs to be performed earlier + * than response deserialization. Since the behavior of Deserialization is in + * reverse order to the other stack steps its easier to consider that "after" means + * "before". + * + * ``` + * Middleware layering: + * + * HTTP Response -> handle 200 error customization -> deserialize + * ``` + */ +namespace customizations { + // @ts-ignore + import internalauthsmithy = smithy + /** + * S3ExpressCredentialsProvider retrieves credentials for the S3Express storage + * class. + */ + interface S3ExpressCredentialsProvider { + [key:string]: any; + retrieve(ctx: context.Context, bucket: string): aws.Credentials + } + // @ts-ignore + import ictx = context + // @ts-ignore + import v4 = signer + // @ts-ignore + import smithyhttp = http + // @ts-ignore + import smithyxml = xml + // @ts-ignore + import awsmiddleware = middleware + // @ts-ignore + import s3arn = arn + // @ts-ignore + import internalendpoints = endpoints +} + +/** + * Package mail implements parsing of mail messages. + * + * For the most part, this package follows the syntax as specified by RFC 5322 and + * extended by RFC 6532. + * Notable divergences: + * ``` + * - Obsolete address formats are not parsed, including addresses with + * embedded route information. + * - The full range of spacing (the CFWS syntax element) is not supported, + * such as breaking addresses across lines. + * - No unicode normalization is performed. + * - A leading From line is permitted, as in mbox format (RFC 4155). + * ``` + */ +namespace mail { + /** + * Address represents a single mail address. + * An address such as "Barry Gibbs " is represented + * as Address{Name: "Barry Gibbs", Address: "bg@example.com"}. + */ + interface Address { + name: string // Proper name; may be empty. + address: string // user@domain + } + interface Address { + /** + * String formats the address as a valid RFC 5322 address. + * If the address's name contains non-ASCII characters + * the name will be rendered according to RFC 2047. + */ + string(): string + } +} + +namespace endpoints { + /** + * Endpoint is the endpoint object returned by Endpoint resolution V2 + */ + interface Endpoint { + /** + * The complete URL minimally specfiying the scheme and host. + * May optionally specify the port and base path component. + */ + uri: url.URL + /** + * An optional set of headers to be sent using transport layer headers. + */ + headers: http.Header + /** + * A grab-bag property map of endpoint attributes. The + * values present here are subject to change, or being add/removed at any + * time. + */ + properties: smithy.Properties + } +} + +/** + * Package s3 provides the API client, operations, and parameter types for Amazon + * Simple Storage Service. + */ +namespace s3 { + // @ts-ignore + import awsmiddleware = middleware + // @ts-ignore + import awshttp = http + // @ts-ignore + import internalauth = auth + // @ts-ignore + import internalauthsmithy = smithy + // @ts-ignore + import internalConfig = configsources + // @ts-ignore + import internalmiddleware = middleware + // @ts-ignore + import acceptencodingcust = accept_encoding + // @ts-ignore + import internalChecksum = checksum + // @ts-ignore + import presignedurlcust = presigned_url + // @ts-ignore + import s3sharedconfig = config + // @ts-ignore + import s3cust = customizations + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithyauth = auth + // @ts-ignore + import smithydocument = document + // @ts-ignore + import smithyhttp = http + // @ts-ignore + import smithyxml = xml + // @ts-ignore + import smithyio = io + // @ts-ignore + import smithytime = time + // @ts-ignore + import smithywaiter = waiter + // @ts-ignore + import smithysync = sync + /** + * AuthResolverParameters contains the set of inputs necessary for auth scheme + * resolution. + */ + interface AuthResolverParameters { + /** + * The name of the operation being invoked. + */ + operation: string + /** + * The region in which the operation is being invoked. + */ + region: string + } + // @ts-ignore + import internalcontext = context + // @ts-ignore + import awsxml = xml + // @ts-ignore + import internalendpoints = endpoints + // @ts-ignore + import smithyendpoints = endpoints + /** + * EndpointParameters provides the parameters that influence how endpoints are + * resolved. + */ + interface EndpointParameters { + /** + * The S3 bucket used to send the request. This is an optional parameter that will + * be set automatically for operations that are scoped to an S3 bucket. + * + * Parameter + * is required. + */ + bucket?: string + /** + * The AWS region used to dispatch the request. + * + * Parameter is + * required. + * + * AWS::Region + */ + region?: string + /** + * When true, send this request to the FIPS-compliant regional endpoint. If the + * configured endpoint does not have a FIPS compliant endpoint, dispatching the + * request will return an error. + * + * Defaults to false if no value is + * provided. + * + * AWS::UseFIPS + */ + useFIPS?: boolean + /** + * When true, use the dual-stack endpoint. If the configured endpoint does not + * support dual-stack, dispatching the request MAY return an error. + * + * Defaults to + * false if no value is provided. + * + * AWS::UseDualStack + */ + useDualStack?: boolean + /** + * Override the endpoint used to send this request + * + * Parameter is + * required. + * + * SDK::Endpoint + */ + endpoint?: string + /** + * When true, force a path-style endpoint to be used where the bucket name is part + * of the path. + * + * Defaults to false if no value is + * provided. + * + * AWS::S3::ForcePathStyle + */ + forcePathStyle?: boolean + /** + * When true, use S3 Accelerate. NOTE: Not all regions support S3 + * accelerate. + * + * Defaults to false if no value is provided. + * + * AWS::S3::Accelerate + */ + accelerate?: boolean + /** + * Whether the global endpoint should be used, rather then the regional endpoint + * for us-east-1. + * + * Defaults to false if no value is + * provided. + * + * AWS::S3::UseGlobalEndpoint + */ + useGlobalEndpoint?: boolean + /** + * Internal parameter to use object lambda endpoint for an operation (eg: + * WriteGetObjectResponse) + * + * Parameter is required. + */ + useObjectLambdaEndpoint?: boolean + /** + * The S3 Key used to send the request. This is an optional parameter that will be + * set automatically for operations that are scoped to an S3 Key. + * + * Parameter is + * required. + */ + key?: string + /** + * The S3 Prefix used to send the request. This is an optional parameter that will + * be set automatically for operations that are scoped to an S3 Prefix. + * + * Parameter + * is required. + */ + prefix?: string + /** + * The Copy Source used for Copy Object request. This is an optional parameter that + * will be set automatically for operations that are scoped to Copy + * Source. + * + * Parameter is required. + */ + copySource?: string + /** + * Internal parameter to disable Access Point Buckets + * + * Parameter is required. + */ + disableAccessPoints?: boolean + /** + * Whether multi-region access points (MRAP) should be disabled. + * + * Defaults to false + * if no value is provided. + * + * AWS::S3::DisableMultiRegionAccessPoints + */ + disableMultiRegionAccessPoints?: boolean + /** + * When an Access Point ARN is provided and this flag is enabled, the SDK MUST use + * the ARN's region when constructing the endpoint instead of the client's + * configured region. + * + * Parameter is required. + * + * AWS::S3::UseArnRegion + */ + useArnRegion?: boolean + /** + * Internal parameter to indicate whether S3Express operation should use control + * plane, (ex. CreateBucket) + * + * Parameter is required. + */ + useS3ExpressControlEndpoint?: boolean + /** + * Parameter to indicate whether S3Express session auth should be + * disabled + * + * Parameter is required. + */ + disableS3ExpressSessionAuth?: boolean + } + interface EndpointParameters { + /** + * ValidateRequired validates required parameters are set. + */ + validateRequired(): void + } + interface EndpointParameters { + /** + * WithDefaults returns a shallow copy of EndpointParameterswith default values + * applied to members where applicable. + */ + withDefaults(): EndpointParameters + } + /** + * SelectObjectContentEventStreamReader provides the interface for reading events + * from a stream. + * + * The writer's Close method must allow multiple concurrent calls. + */ + interface SelectObjectContentEventStreamReader { + [key:string]: any; + events(): undefined + close(): void + err(): void + } + // @ts-ignore + import v4 = signer +} + +namespace search { +} + +/** + * Package middleware provides transport agnostic middleware for decorating SDK + * handlers. + * + * The Smithy middleware stack provides ordered behavior to be invoked on an + * underlying handler. The stack is separated into steps that are invoked in a + * static order. A step is a collection of middleware that are injected into a + * ordered list defined by the user. The user may add, insert, swap, and remove a + * step's middleware. When the stack is invoked the step middleware become static, + * and their order cannot be modified. + * + * A stack and its step middleware are **not** safe to modify concurrently. + * + * A stack will use the ordered list of middleware to decorate a underlying + * handler. A handler could be something like an HTTP Client that round trips an + * API operation over HTTP. + * + * Smithy Middleware Stack + * + * A Stack is a collection of middleware that wrap a handler. The stack can be + * broken down into discreet steps. Each step may contain zero or more middleware + * specific to that stack's step. + * + * A Stack Step is a predefined set of middleware that are invoked in a static + * order by the Stack. These steps represent fixed points in the middleware stack + * for organizing specific behavior, such as serialize and build. A Stack Step is + * composed of zero or more middleware that are specific to that step. A step may + * define its own set of input/output parameters the generic input/output + * parameters are cast from. A step calls its middleware recursively, before + * calling the next step in the stack returning the result or error of the step + * middleware decorating the underlying handler. + * + * * Initialize: Prepares the input, and sets any default parameters as needed, + * (e.g. idempotency token, and presigned URLs). + * + * * Serialize: Serializes the prepared input into a data structure that can be + * consumed by the target transport's message, (e.g. REST-JSON serialization). + * + * * Build: Adds additional metadata to the serialized transport message, (e.g. + * HTTP's Content-Length header, or body checksum). Decorations and + * modifications to the message should be copied to all message attempts. + * + * * Finalize: Performs final preparations needed before sending the message. The + * message should already be complete by this stage, and is only alternated to + * meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request + * signing). + * + * * Deserialize: Reacts to the handler's response returned by the recipient of + * the request message. Deserializes the response into a structured type or + * error above stacks can react to. + * + * Adding Middleware to a Stack Step + * + * Middleware can be added to a step front or back, or relative, by name, to an + * existing middleware in that stack. If a middleware does not have a name, a + * unique name will be generated at the middleware and be added to the step. + * + * ``` + * // Create middleware stack + * stack := middleware.NewStack() + * + * // Add middleware to stack steps + * stack.Initialize.Add(paramValidationMiddleware, middleware.After) + * stack.Serialize.Add(marshalOperationFoo, middleware.After) + * stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) + * + * // Invoke middleware on handler. + * resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) + * ``` + */ +namespace middleware { + /** + * BuildInput provides the input parameters for the BuildMiddleware to consume. + * BuildMiddleware may modify the Request value before forwarding the input + * along to the next BuildHandler. + */ + interface BuildInput { + request: { + } + } + /** + * BuildOutput provides the result returned by the next BuildHandler. + */ + interface BuildOutput { + result: { + } + } + /** + * BuildHandler provides the interface for the next handler the + * BuildMiddleware will call in the middleware chain. + */ + interface BuildHandler { + [key:string]: any; + handleBuild(ctx: context.Context, _arg10: BuildInput): [BuildOutput, Metadata] + } + /** + * DeserializeInput provides the input parameters for the DeserializeInput to + * consume. DeserializeMiddleware should not modify the Request, and instead + * forward it along to the next DeserializeHandler. + */ + interface DeserializeInput { + request: { + } + } + /** + * DeserializeOutput provides the result returned by the next + * DeserializeHandler. The DeserializeMiddleware should deserialize the + * RawResponse into a Result that can be consumed by middleware higher up in + * the stack. + */ + interface DeserializeOutput { + rawResponse: { + } + result: { + } + } + /** + * DeserializeHandler provides the interface for the next handler the + * DeserializeMiddleware will call in the middleware chain. + */ + interface DeserializeHandler { + [key:string]: any; + handleDeserialize(ctx: context.Context, _arg10: DeserializeInput): [DeserializeOutput, Metadata] + } + /** + * FinalizeInput provides the input parameters for the FinalizeMiddleware to + * consume. FinalizeMiddleware may modify the Request value before forwarding + * the FinalizeInput along to the next next FinalizeHandler. + */ + interface FinalizeInput { + request: { + } + } + /** + * FinalizeOutput provides the result returned by the next FinalizeHandler. + */ + interface FinalizeOutput { + result: { + } + } + /** + * FinalizeHandler provides the interface for the next handler the + * FinalizeMiddleware will call in the middleware chain. + */ + interface FinalizeHandler { + [key:string]: any; + handleFinalize(ctx: context.Context, _arg10: FinalizeInput): [FinalizeOutput, Metadata] + } + /** + * InitializeInput wraps the input parameters for the InitializeMiddlewares to + * consume. InitializeMiddleware may modify the parameter value before + * forwarding it along to the next InitializeHandler. + */ + interface InitializeInput { + parameters: { + } + } + /** + * InitializeOutput provides the result returned by the next InitializeHandler. + */ + interface InitializeOutput { + result: { + } + } + /** + * InitializeHandler provides the interface for the next handler the + * InitializeMiddleware will call in the middleware chain. + */ + interface InitializeHandler { + [key:string]: any; + handleInitialize(ctx: context.Context, _arg10: InitializeInput): [InitializeOutput, Metadata] + } + /** + * SerializeInput provides the input parameters for the SerializeMiddleware to + * consume. SerializeMiddleware may modify the Request value before forwarding + * SerializeInput along to the next SerializeHandler. The Parameters member + * should not be modified by SerializeMiddleware, InitializeMiddleware should + * be responsible for modifying the provided Parameter value. + */ + interface SerializeInput { + parameters: { + } + request: { + } + } + /** + * SerializeOutput provides the result returned by the next SerializeHandler. + */ + interface SerializeOutput { + result: { + } + } + /** + * SerializeHandler provides the interface for the next handler the + * SerializeMiddleware will call in the middleware chain. + */ + interface SerializeHandler { + [key:string]: any; + handleSerialize(ctx: context.Context, _arg10: SerializeInput): [SerializeOutput, Metadata] + } +} + +/** + * Package smithy provides the core components for a Smithy SDK. + */ +namespace smithy_go { + /** + * Properties provides storing and reading metadata values. Keys may be any + * comparable value type. Get and Set will panic if a key is not comparable. + * + * The zero value for a Properties instance is ready for reads/writes without + * any additional initialization. + */ + interface Properties { + } + interface Properties { + /** + * Get attempts to retrieve the value the key points to. Returns nil if the + * key was not found. + * + * Panics if key type is not comparable. + */ + get(key: any): any + } + interface Properties { + /** + * Set stores the value pointed to by the key. If a value already exists at + * that key it will be replaced with the new value. + * + * Panics if the key type is not comparable. + */ + set(key: any, value: any): void + } + interface Properties { + /** + * Has returns whether the key exists in the metadata. + * + * Panics if the key type is not comparable. + */ + has(key: any): boolean + } + interface Properties { + /** + * SetAll accepts all of the given Properties into the receiver, overwriting + * any existing keys in the case of conflicts. + */ + setAll(other: Properties): void + } + interface Properties { + /** + * Values returns a shallow clone of the property set's values. + */ + values(): _TygojaDict + } +} + +/** + * Package metrics defines the metrics APIs used by Smithy clients. + */ +namespace metrics { + /** + * MeterOptions represents configuration for a Meter. + */ + interface MeterOptions { + properties: smithy.Properties + } + /** + * InstrumentOption applies configuration to an instrument. + */ + interface InstrumentOption {(o: InstrumentOptions): void } + /** + * Int64Counter measures a monotonically increasing int64 value. + */ + interface Int64Counter { + [key:string]: any; + add(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Int64UpDownCounter measures a fluctuating int64 value. + */ + interface Int64UpDownCounter { + [key:string]: any; + add(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Int64Gauge samples a discrete int64 value. + */ + interface Int64Gauge { + [key:string]: any; + sample(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Int64Histogram records multiple data points for an int64 value. + */ + interface Int64Histogram { + [key:string]: any; + record(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Float64Counter measures a monotonically increasing float64 value. + */ + interface Float64Counter { + [key:string]: any; + add(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Float64UpDownCounter measures a fluctuating float64 value. + */ + interface Float64UpDownCounter { + [key:string]: any; + add(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Float64Gauge samples a discrete float64 value. + */ + interface Float64Gauge { + [key:string]: any; + sample(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Float64Histogram records multiple data points for an float64 value. + */ + interface Float64Histogram { + [key:string]: any; + record(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * AsyncInstrument is the universal handle returned for creation of all async + * instruments. + * + * Callers use the Stop() API to unregister the callback passed at instrument + * creation. + */ + interface AsyncInstrument { + [key:string]: any; + stop(): void + } + /** + * Int64Callback describes a function invoked when an async int64 instrument is + * read. + */ + interface Int64Callback {(_arg0: context.Context, _arg1: Int64Observer): void } + /** + * Float64Callback describes a function invoked when an async float64 + * instrument is read. + */ + interface Float64Callback {(_arg0: context.Context, _arg1: Float64Observer): void } +} + +/** + * Package tracing defines tracing APIs to be used by Smithy clients. + */ +namespace tracing { + /** + * TracerOptions represent configuration for tracers. + */ + interface TracerOptions { + properties: smithy.Properties + } + /** + * SpanOption applies configuration to a span. + */ + interface SpanOption {(o: SpanOptions): void } + /** + * Span records a conceptually individual unit of work that takes place in a + * Smithy client operation. + */ + interface Span { + [key:string]: any; + name(): string + context(): SpanContext + addEvent(name: string, ...opts: EventOption[]): void + setStatus(status: SpanStatus): void + setProperty(k: any, v: any): void + end(): void + } +} + +/** + * Package http provides the HTTP transport client and request/response types + * needed to round trip API operation calls with an service. + */ +namespace http { + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import iointernal = io + /** + * Request provides the HTTP specific request structure for HTTP specific + * middleware steps to use to serialize input, and send an operation's request. + */ + type _subOtvso = http.Request + interface Request extends _subOtvso { + } + interface Request { + /** + * IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. + */ + isHTTPS(): boolean + } + interface Request { + /** + * Clone returns a deep copy of the Request for the new context. A reference to + * the Stream is copied, but the underlying stream is not copied. + */ + clone(): (Request) + } + interface Request { + /** + * StreamLength returns the number of bytes of the serialized stream attached + * to the request and ok set. If the length cannot be determined, an error will + * be returned. + */ + streamLength(): [number, boolean] + } + interface Request { + /** + * RewindStream will rewind the io.Reader to the relative start position if it + * is an io.Seeker. + */ + rewindStream(): void + } + interface Request { + /** + * GetStream returns the request stream io.Reader if a stream is set. If no + * stream is present nil will be returned. + */ + getStream(): io.Reader + } + interface Request { + /** + * IsStreamSeekable returns whether the stream is seekable. + */ + isStreamSeekable(): boolean + } + interface Request { + /** + * SetStream returns a clone of the request with the stream set to the provided + * reader. May return an error if the provided reader is seekable but returns + * an error. + */ + setStream(reader: io.Reader): (Request) + } + interface Request { + /** + * Build returns a build standard HTTP request value from the Smithy request. + * The request's stream is wrapped in a safe container that allows it to be + * reused for subsequent attempts. + */ + build(ctx: context.Context): (http.Request) + } + // @ts-ignore + import smithytime = time +} + /** * Package slog provides structured logging, * in which log records include a message, @@ -20555,8 +44551,10 @@ namespace slog { * Now computeExpensiveValue will only be called when the line is enabled. * * The built-in handlers acquire a lock before calling [io.Writer.Write] - * to ensure that each record is written in one piece. User-defined - * handlers are responsible for their own locking. + * to ensure that exactly one [Record] is written at a time in its entirety. + * Although each log record has a timestamp, + * the built-in handlers do not use that time to sort the written records. + * User-defined handlers are responsible for their own locking and sorting. * * # Writing a handler * @@ -20583,3 +44581,1151 @@ namespace slog { logValue(): Value } } + +/** + * Package aws provides the core SDK's utilities and shared types. Use this package's + * utilities to simplify setting and reading API operations parameters. + * + * # Value and Pointer Conversion Utilities + * + * This package includes a helper conversion utility for each scalar type the SDK's + * API use. These utilities make getting a pointer of the scalar, and dereferencing + * a pointer easier. + * + * Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. + * The Pointer to value will safely dereference the pointer and return its value. + * If the pointer was nil, the scalar's zero value will be returned. + * + * The value to pointer functions will be named after the scalar type. So get a + * *string from a string value use the "String" function. This makes it easy to + * to get pointer of a literal string value, because getting the address of a + * literal requires assigning the value to a variable first. + * + * ``` + * var strPtr *string + * + * // Without the SDK's conversion functions + * str := "my string" + * strPtr = &str + * + * // With the SDK's conversion functions + * strPtr = aws.String("my string") + * + * // Convert *string to string value + * str = aws.ToString(strPtr) + * ``` + * + * In addition to scalars the aws package also includes conversion utilities for + * map and slice for commonly types used in API parameters. The map and slice + * conversion functions use similar naming pattern as the scalar conversion + * functions. + * + * ``` + * var strPtrs []*string + * var strs []string = []string{"Go", "Gophers", "Go"} + * + * // Convert []string to []*string + * strPtrs = aws.StringSlice(strs) + * + * // Convert []*string to []string + * strs = aws.ToStringSlice(strPtrs) + * ``` + * + * # SDK Default HTTP Client + * + * The SDK will use the http.DefaultClient if a HTTP client is not provided to + * the SDK's Session, or service client constructor. This means that if the + * http.DefaultClient is modified by other components of your application the + * modifications will be picked up by the SDK as well. + * + * In some cases this might be intended, but it is a better practice to create + * a custom HTTP Client to share explicitly through your application. You can + * configure the SDK to use the custom HTTP Client by setting the HTTPClient + * value of the SDK's Config type when creating a Session or service client. + */ +/** + * Package aws provides core functionality for making requests to AWS services. + */ +namespace aws { + // @ts-ignore + import smithybearer = bearer + // @ts-ignore + import sdkrand = rand + /** + * DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. + */ + interface DualStackEndpointState extends Number{} + /** + * FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. + */ + interface FIPSEndpointState extends Number{} +} + +namespace types { + interface BucketLogsPermission extends String{} + interface BucketLogsPermission { + /** + * Values returns all known values for BucketLogsPermission. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface BucketType extends String{} + interface BucketType { + /** + * Values returns all known values for BucketType. Note that this can be expanded + * in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface DataRedundancy extends String{} + interface DataRedundancy { + /** + * Values returns all known values for DataRedundancy. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface FileHeaderInfo extends String{} + interface FileHeaderInfo { + /** + * Values returns all known values for FileHeaderInfo. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface IntelligentTieringAccessTier extends String{} + interface IntelligentTieringAccessTier { + /** + * Values returns all known values for IntelligentTieringAccessTier. Note that + * this can be expanded in the future, and so it is only as up to date as the + * client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface InventoryFrequency extends String{} + interface InventoryFrequency { + /** + * Values returns all known values for InventoryFrequency. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface JSONType extends String{} + interface JSONType { + /** + * Values returns all known values for JSONType. Note that this can be expanded in + * the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface QuoteFields extends String{} + interface QuoteFields { + /** + * Values returns all known values for QuoteFields. Note that this can be expanded + * in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ReplicationRuleStatus extends String{} + interface ReplicationRuleStatus { + /** + * Values returns all known values for ReplicationRuleStatus. Note that this can + * be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface TransitionStorageClass extends String{} + interface TransitionStorageClass { + /** + * Values returns all known values for TransitionStorageClass. Note that this can + * be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface Type extends String{} + interface Type { + /** + * Values returns all known values for Type. Note that this can be expanded in the + * future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithydocument = document + /** + * The container element for optionally specifying the default Object Lock + * retention settings for new objects placed in the specified bucket. + * + * ``` + * - The DefaultRetention settings require both a mode and a period. + * + * - The DefaultRetention period can be either Days or Years but you must select + * one. You cannot specify Days and Years at the same time. + * ``` + */ + type _subrbktc = noSmithyDocumentSerde + interface DefaultRetention extends _subrbktc { + /** + * The number of days that you want to specify for the default retention period. + * Must be used with Mode . + */ + days?: number + /** + * The default Object Lock retention mode you want to apply to new objects placed + * in the specified bucket. Must be used with either Days or Years . + */ + mode: ObjectLockRetentionMode + /** + * The number of years that you want to specify for the default retention period. + * Must be used with Mode . + */ + years?: number + } + /** + * Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + * in your replication configuration, you must also include a + * DeleteMarkerReplication element. If your Filter includes a Tag element, the + * DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does + * not support replicating delete markers for tag-based rules. For an example + * configuration, see [Basic Rule Configuration]. + * + * For more information about delete marker replication, see [Basic Rule Configuration]. + * + * If you are using an earlier version of the replication configuration, Amazon S3 + * handles replication of delete markers differently. For more information, see [Backward Compatibility]. + * + * [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + * [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations + */ + type _subFrndK = noSmithyDocumentSerde + interface DeleteMarkerReplication extends _subFrndK { + /** + * Indicates whether to replicate delete markers. + * + * Indicates whether to replicate delete markers. + */ + status: DeleteMarkerReplicationStatus + } + /** + * Specifies information about where to publish analysis or configuration results + * for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). + */ + type _subOaKdq = noSmithyDocumentSerde + interface Destination extends _subOaKdq { + /** + * The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + * the results. + * + * This member is required. + */ + bucket?: string + /** + * Specify this only in a cross-account scenario (where source and destination + * bucket owners are not the same), and you want to change replica ownership to the + * Amazon Web Services account that owns the destination bucket. If this is not + * specified in the replication configuration, the replicas are owned by same + * Amazon Web Services account that owns the source object. + */ + accessControlTranslation?: AccessControlTranslation + /** + * Destination bucket owner account ID. In a cross-account scenario, if you direct + * Amazon S3 to change replica ownership to the Amazon Web Services account that + * owns the destination bucket by specifying the AccessControlTranslation + * property, this is the account ID of the destination bucket owner. For more + * information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide. + * + * [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html + */ + account?: string + /** + * A container that provides information about encryption. If + * SourceSelectionCriteria is specified, you must specify this element. + */ + encryptionConfiguration?: EncryptionConfiguration + /** + * A container specifying replication metrics-related settings enabling + * replication metrics and events. + */ + metrics?: Metrics + /** + * A container specifying S3 Replication Time Control (S3 RTC), including whether + * S3 RTC is enabled and the time when all objects and operations on objects must + * be replicated. Must be specified together with a Metrics block. + */ + replicationTime?: ReplicationTime + /** + * The storage class to use when replicating objects, such as S3 Standard or + * reduced redundancy. By default, Amazon S3 uses the storage class of the source + * object to create the object replica. + * + * For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3 + * API Reference. + * + * [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html + */ + storageClass: StorageClass + } + /** + * Optional configuration to replicate existing source bucket objects. + * + * This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in + * the Amazon S3 User Guide. + * + * [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html + */ + type _subVHygh = noSmithyDocumentSerde + interface ExistingObjectReplication extends _subVHygh { + /** + * Specifies whether Amazon S3 replicates existing source bucket objects. + * + * This member is required. + */ + status: ExistingObjectReplicationStatus + } + /** + * A container for specifying S3 Intelligent-Tiering filters. The filters + * determine the subset of objects to which the rule applies. + */ + type _subocteQ = noSmithyDocumentSerde + interface IntelligentTieringAndOperator extends _subocteQ { + /** + * An object key name prefix that identifies the subset of objects to which the + * configuration applies. + */ + prefix?: string + /** + * All of these tags must exist in the object's tag set in order for the + * configuration to apply. + */ + tags: Array + } + /** + * Contains the bucket name, file format, bucket owner (optional), and prefix + * (optional) where inventory results are published. + */ + type _subPmIjs = noSmithyDocumentSerde + interface InventoryS3BucketDestination extends _subPmIjs { + /** + * The Amazon Resource Name (ARN) of the bucket where inventory results will be + * published. + * + * This member is required. + */ + bucket?: string + /** + * Specifies the output format of the inventory results. + * + * This member is required. + */ + format: InventoryFormat + /** + * The account ID that owns the destination S3 bucket. If no account ID is + * provided, the owner is not validated before exporting data. + * + * Although this value is optional, we strongly recommend that you set it to help + * prevent problems if the destination bucket ownership changes. + */ + accountId?: string + /** + * Contains the type of server-side encryption used to encrypt the inventory + * results. + */ + encryption?: InventoryEncryption + /** + * The prefix that is prepended to all inventory results. + */ + prefix?: string + } + /** + * Amazon S3 keys for log objects are partitioned in the following format: + * + * ``` + * [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] + * ``` + * + * PartitionedPrefix defaults to EventTime delivery when server access logs are + * delivered. + */ + type _subyQbYy = noSmithyDocumentSerde + interface PartitionedPrefix extends _subyQbYy { + /** + * Specifies the partition date source for the partitioned prefix. + * PartitionDateSource can be EventTime or DeliveryTime . + * + * For DeliveryTime , the time in the log file names corresponds to the delivery + * time for the log files. + * + * For EventTime , The logs delivered are for a specific day only. The year, month, + * and day correspond to the day on which the event occurred, and the hour, minutes + * and seconds are set to 00 in the key. + */ + partitionDateSource: PartitionDateSource + } + /** + * A filter that identifies the subset of objects to which the replication rule + * applies. A Filter must specify exactly one Prefix , Tag , or an And child + * element. + * + * The following types satisfy this interface: + * + * ``` + * ReplicationRuleFilterMemberAnd + * ReplicationRuleFilterMemberPrefix + * ReplicationRuleFilterMemberTag + * ``` + */ + interface ReplicationRuleFilter { + [key:string]: any; + } + /** + * A container for object key name prefix and suffix filtering rules. + */ + type _subADdoW = noSmithyDocumentSerde + interface S3KeyFilter extends _subADdoW { + /** + * A list of containers for the key-value pair that defines the criteria for the + * filter rule. + */ + filterRules: Array + } + /** + * Describes an Amazon S3 location that will receive the results of the restore + * request. + */ + type _subKacLP = noSmithyDocumentSerde + interface S3Location extends _subKacLP { + /** + * The name of the bucket where the restore results will be placed. + * + * This member is required. + */ + bucketName?: string + /** + * The prefix that is prepended to the restore results for this request. + * + * This member is required. + */ + prefix?: string + /** + * A list of grants that control access to the staged results. + */ + accessControlList: Array + /** + * The canned ACL to apply to the restore results. + */ + cannedACL: ObjectCannedACL + /** + * Contains the type of server-side encryption used. + */ + encryption?: Encryption + /** + * The class of storage used to store the restore results. + */ + storageClass: StorageClass + /** + * The tag-set that is applied to the restore results. + */ + tagging?: Tagging + /** + * A list of metadata to store with the restore results in S3. + */ + userMetadata: Array + } + /** + * Describes the default server-side encryption to apply to new objects in the + * bucket. If a PUT Object request doesn't specify any server-side encryption, this + * default encryption will be applied. For more information, see [PutBucketEncryption]. + * + * ``` + * - General purpose buckets - If you don't specify a customer managed key at + * configuration, Amazon S3 automatically creates an Amazon Web Services KMS key ( + * aws/s3 ) in your Amazon Web Services account the first time that you add an + * object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS + * key for SSE-KMS. + * + * - Directory buckets - Your SSE-KMS configuration can only support 1 [customer managed key]per + * directory bucket for the lifetime of the bucket. [Amazon Web Services managed key]( aws/s3 ) isn't supported. + * + * - Directory buckets - For directory buckets, there are only two supported + * options for server-side encryption: SSE-S3 and SSE-KMS. + * ``` + * + * [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html + * [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + */ + type _subTvCNQ = noSmithyDocumentSerde + interface ServerSideEncryptionByDefault extends _subTvCNQ { + /** + * Server-side encryption algorithm to use for the default encryption. + * + * For directory buckets, there are only two supported values for server-side + * encryption: AES256 and aws:kms . + * + * This member is required. + */ + sseAlgorithm: ServerSideEncryption + /** + * Amazon Web Services Key Management Service (KMS) customer managed key ID to use + * for the default encryption. + * + * ``` + * - General purpose buckets - This parameter is allowed if and only if + * SSEAlgorithm is set to aws:kms or aws:kms:dsse . + * + * - Directory buckets - This parameter is allowed if and only if SSEAlgorithm is + * set to aws:kms . + * ``` + * + * You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the + * KMS key. + * + * ``` + * - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + * + * - Key ARN: + * arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + * + * - Key Alias: alias/alias-name + * ``` + * + * If you are using encryption with cross-account or Amazon Web Services service + * operations, you must use a fully qualified KMS key ARN. For more information, + * see [Using encryption for cross-account operations]. + * + * ``` + * - General purpose buckets - If you're specifying a customer managed KMS key, + * we recommend using a fully qualified KMS key ARN. If you use a KMS key alias + * instead, then KMS resolves the key within the requester’s account. This behavior + * can result in data that's encrypted with a KMS key that belongs to the + * requester, and not the bucket owner. Also, if you use a key ID, you can run into + * a LogDestination undeliverable error when creating a VPC flow log. + * + * - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory + * bucket, only use the key ID or key ARN. The key alias format of the KMS key + * isn't supported. + * ``` + * + * Amazon S3 only supports symmetric encryption KMS keys. For more information, + * see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + * + * [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy + * [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + * [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + */ + kmsMasterKeyID?: string + } + /** + * To use simple format for S3 keys for log objects, set SimplePrefix to an empty + * object. + * + * ``` + * [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] + * ``` + */ + type _subXYSdU = noSmithyDocumentSerde + interface SimplePrefix extends _subXYSdU { + } + /** + * A container that describes additional filters for identifying the source + * objects that you want to replicate. You can choose to enable or disable the + * replication of these objects. Currently, Amazon S3 supports only the filter that + * you can specify for objects created with server-side encryption using a customer + * managed key stored in Amazon Web Services Key Management Service (SSE-KMS). + */ + type _subhEJDM = noSmithyDocumentSerde + interface SourceSelectionCriteria extends _subhEJDM { + /** + * A filter that you can specify for selections for modifications on replicas. + * Amazon S3 doesn't replicate replica modifications by default. In the latest + * version of replication configuration (when Filter is specified), you can + * specify this element and set the status to Enabled to replicate modifications + * on replicas. + * + * If you don't specify the Filter element, Amazon S3 assumes that the replication + * configuration is the earlier version, V1. In the earlier version, this element + * is not allowed + */ + replicaModifications?: ReplicaModifications + /** + * A container for filter information for the selection of Amazon S3 objects + * encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + * in the replication configuration, this element is required. + */ + sseKmsEncryptedObjects?: SseKmsEncryptedObjects + } + /** + * Container for data related to the storage class analysis for an Amazon S3 + * bucket for export. + */ + type _subaCjhj = noSmithyDocumentSerde + interface StorageClassAnalysisDataExport extends _subaCjhj { + /** + * The place to store the data for an analysis. + * + * This member is required. + */ + destination?: AnalyticsExportDestination + /** + * The version of the output schema to use when exporting data. Must be V_1 . + * + * This member is required. + */ + outputSchemaVersion: StorageClassAnalysisSchemaVersion + } +} + +/** + * Package smithy provides the core components for a Smithy SDK. + */ +namespace smithy_go { +} + +/** + * Package metrics defines the metrics APIs used by Smithy clients. + */ +namespace metrics { + /** + * InstrumentOptions represents configuration for an instrument. + */ + interface InstrumentOptions { + unitLabel: string + description: string + } + /** + * Int64Observer is the interface passed to async int64 instruments. + * + * Callers use the Observe() API of this interface to report metrics to the + * underlying collector. + */ + interface Int64Observer { + [key:string]: any; + observe(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * Float64Observer is the interface passed to async int64 instruments. + * + * Callers use the Observe() API of this interface to report metrics to the + * underlying collector. + */ + interface Float64Observer { + [key:string]: any; + observe(_arg0: context.Context, _arg1: number, ..._arg2: RecordMetricOption[]): void + } + /** + * RecordMetricOption applies configuration to a recorded metric. + */ + interface RecordMetricOption {(o: RecordMetricOptions): void } +} + +/** + * Package tracing defines tracing APIs to be used by Smithy clients. + */ +namespace tracing { + /** + * SpanStatus records the "success" state of an observed span. + */ + interface SpanStatus extends Number{} + /** + * SpanOptions represent configuration for span events. + */ + interface SpanOptions { + kind: SpanKind + properties: smithy.Properties + } + /** + * EventOption applies configuration to a span event. + */ + interface EventOption {(o: EventOptions): void } + /** + * SpanContext uniquely identifies a Span. + */ + interface SpanContext { + traceID: string + spanID: string + isRemote: boolean + } + interface SpanContext { + /** + * IsValid is true when a span has nonzero trace and span IDs. + */ + isValid(): boolean + } +} + +namespace types { + interface DeleteMarkerReplicationStatus extends String{} + interface DeleteMarkerReplicationStatus { + /** + * Values returns all known values for DeleteMarkerReplicationStatus. Note that + * this can be expanded in the future, and so it is only as up to date as the + * client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ExistingObjectReplicationStatus extends String{} + interface ExistingObjectReplicationStatus { + /** + * Values returns all known values for ExistingObjectReplicationStatus. Note that + * this can be expanded in the future, and so it is only as up to date as the + * client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface InventoryFormat extends String{} + interface InventoryFormat { + /** + * Values returns all known values for InventoryFormat. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface PartitionDateSource extends String{} + interface PartitionDateSource { + /** + * Values returns all known values for PartitionDateSource. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface StorageClassAnalysisSchemaVersion extends String{} + interface StorageClassAnalysisSchemaVersion { + /** + * Values returns all known values for StorageClassAnalysisSchemaVersion. Note + * that this can be expanded in the future, and so it is only as up to date as the + * client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithydocument = document + /** + * A container for information about access control for replicas. + */ + type _subRSwvp = noSmithyDocumentSerde + interface AccessControlTranslation extends _subRSwvp { + /** + * Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the + * Amazon S3 API Reference. + * + * [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html + * + * This member is required. + */ + owner: OwnerOverride + } + /** + * Where to publish the analytics results. + */ + type _subEfedq = noSmithyDocumentSerde + interface AnalyticsExportDestination extends _subEfedq { + /** + * A destination signifying output to an S3 bucket. + * + * This member is required. + */ + s3BucketDestination?: AnalyticsS3BucketDestination + } + /** + * Contains the type of server-side encryption used. + */ + type _subMKWRi = noSmithyDocumentSerde + interface Encryption extends _subMKWRi { + /** + * The server-side encryption algorithm used when storing job results in Amazon S3 + * (for example, AES256, aws:kms ). + * + * This member is required. + */ + encryptionType: ServerSideEncryption + /** + * If the encryption type is aws:kms , this optional value can be used to specify + * the encryption context for the restore results. + */ + kmsContext?: string + /** + * If the encryption type is aws:kms , this optional value specifies the ID of the + * symmetric encryption customer managed key to use for encryption of job results. + * Amazon S3 only supports symmetric encryption KMS keys. For more information, see + * [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide. + * + * [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + */ + kmsKeyId?: string + } + /** + * Specifies encryption-related information for an Amazon S3 bucket that is a + * destination for replicated objects. + * + * If you're specifying a customer managed KMS key, we recommend using a fully + * qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the + * key within the requester’s account. This behavior can result in data that's + * encrypted with a KMS key that belongs to the requester, and not the bucket + * owner. + */ + type _subYdcWm = noSmithyDocumentSerde + interface EncryptionConfiguration extends _subYdcWm { + /** + * Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for + * the destination bucket. Amazon S3 uses this key to encrypt replica objects. + * Amazon S3 only supports symmetric encryption KMS keys. For more information, see + * [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + * + * [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + */ + replicaKmsKeyID?: string + } + /** + * Specifies the Amazon S3 object key name to filter on. An object key name is the + * name assigned to an object in your Amazon S3 bucket. You specify whether to + * filter on the suffix or prefix of the object key name. A prefix is a specific + * string of characters at the beginning of an object key name, which you can use + * to organize objects. For example, you can start the key names of related objects + * with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to + * find objects in a bucket with key names that have the same prefix. A suffix is + * similar to a prefix, but it is at the end of the object key name instead of at + * the beginning. + */ + type _subhdpnQ = noSmithyDocumentSerde + interface FilterRule extends _subhdpnQ { + /** + * The object key name prefix or suffix identifying one or more objects to which + * the filtering rule applies. The maximum length is 1,024 characters. Overlapping + * prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the + * Amazon S3 User Guide. + * + * [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + */ + name: FilterRuleName + /** + * The value that the filter searches for in object key names. + */ + value?: string + } + /** + * Contains the type of server-side encryption used to encrypt the inventory + * results. + */ + type _subCOgLz = noSmithyDocumentSerde + interface InventoryEncryption extends _subCOgLz { + /** + * Specifies the use of SSE-KMS to encrypt delivered inventory reports. + */ + ssekms?: SSEKMS + /** + * Specifies the use of SSE-S3 to encrypt delivered inventory reports. + */ + sses3?: SSES3 + } + /** + * A metadata key-value pair to store with an object. + */ + type _subEsbzQ = noSmithyDocumentSerde + interface MetadataEntry extends _subEsbzQ { + /** + * Name of the object. + */ + name?: string + /** + * Value of the object. + */ + value?: string + } + /** + * ``` + * A container specifying replication metrics-related settings enabling + * ``` + * + * replication metrics and events. + */ + type _subvDzQN = noSmithyDocumentSerde + interface Metrics extends _subvDzQN { + /** + * Specifies whether the replication metrics are enabled. + * + * This member is required. + */ + status: MetricsStatus + /** + * A container specifying the time threshold for emitting the + * s3:Replication:OperationMissedThreshold event. + */ + eventThreshold?: ReplicationTimeValue + } + /** + * A filter that you can specify for selection for modifications on replicas. + * Amazon S3 doesn't replicate replica modifications by default. In the latest + * version of replication configuration (when Filter is specified), you can + * specify this element and set the status to Enabled to replicate modifications + * on replicas. + * + * If you don't specify the Filter element, Amazon S3 assumes that the replication + * configuration is the earlier version, V1. In the earlier version, this element + * is not allowed. + */ + type _subtrUmI = noSmithyDocumentSerde + interface ReplicaModifications extends _subtrUmI { + /** + * Specifies whether Amazon S3 replicates modifications on replicas. + * + * This member is required. + */ + status: ReplicaModificationsStatus + } + /** + * ``` + * A container specifying S3 Replication Time Control (S3 RTC) related + * ``` + * + * information, including whether S3 RTC is enabled and the time when all objects + * and operations on objects must be replicated. Must be specified together with a + * Metrics block. + */ + type _subozwUW = noSmithyDocumentSerde + interface ReplicationTime extends _subozwUW { + /** + * Specifies whether the replication time is enabled. + * + * This member is required. + */ + status: ReplicationTimeStatus + /** + * A container specifying the time by which replication should be complete for + * all objects and operations on objects. + * + * This member is required. + */ + time?: ReplicationTimeValue + } + /** + * A container for filter information for the selection of S3 objects encrypted + * with Amazon Web Services KMS. + */ + type _subEQxoY = noSmithyDocumentSerde + interface SseKmsEncryptedObjects extends _subEQxoY { + /** + * Specifies whether Amazon S3 replicates objects created with server-side + * encryption using an Amazon Web Services KMS key stored in Amazon Web Services + * Key Management Service. + * + * This member is required. + */ + status: SseKmsEncryptedObjectsStatus + } +} + +/** + * Package tracing defines tracing APIs to be used by Smithy clients. + */ +namespace tracing { + /** + * SpanKind indicates the nature of the work being performed. + */ + interface SpanKind extends Number{} + /** + * EventOptions represent configuration for span events. + */ + interface EventOptions { + properties: smithy.Properties + } +} + +namespace types { + interface FilterRuleName extends String{} + interface FilterRuleName { + /** + * Values returns all known values for FilterRuleName. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface MetricsStatus extends String{} + interface MetricsStatus { + /** + * Values returns all known values for MetricsStatus. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface OwnerOverride extends String{} + interface OwnerOverride { + /** + * Values returns all known values for OwnerOverride. Note that this can be + * expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ReplicaModificationsStatus extends String{} + interface ReplicaModificationsStatus { + /** + * Values returns all known values for ReplicaModificationsStatus. Note that this + * can be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface ReplicationTimeStatus extends String{} + interface ReplicationTimeStatus { + /** + * Values returns all known values for ReplicationTimeStatus. Note that this can + * be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + interface SseKmsEncryptedObjectsStatus extends String{} + interface SseKmsEncryptedObjectsStatus { + /** + * Values returns all known values for SseKmsEncryptedObjectsStatus. Note that + * this can be expanded in the future, and so it is only as up to date as the + * client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithydocument = document + /** + * Contains information about where to publish the analytics results. + */ + type _subFeyfr = noSmithyDocumentSerde + interface AnalyticsS3BucketDestination extends _subFeyfr { + /** + * The Amazon Resource Name (ARN) of the bucket to which data is exported. + * + * This member is required. + */ + bucket?: string + /** + * Specifies the file format used when exporting data to Amazon S3. + * + * This member is required. + */ + format: AnalyticsS3ExportFileFormat + /** + * The account ID that owns the destination S3 bucket. If no account ID is + * provided, the owner is not validated before exporting data. + * + * Although this value is optional, we strongly recommend that you set it to help + * prevent problems if the destination bucket ownership changes. + */ + bucketAccountId?: string + /** + * The prefix to use when exporting data. The prefix is prepended to all results. + */ + prefix?: string + } + /** + * ``` + * A container specifying the time value for S3 Replication Time Control (S3 RTC) + * ``` + * + * and replication metrics EventThreshold . + */ + type _subSvvgY = noSmithyDocumentSerde + interface ReplicationTimeValue extends _subSvvgY { + /** + * Contains an integer specifying time in minutes. + * + * Valid value: 15 + */ + minutes?: number + } + /** + * Specifies the use of SSE-KMS to encrypt delivered inventory reports. + */ + type _subtGWME = noSmithyDocumentSerde + interface SSEKMS extends _subtGWME { + /** + * Specifies the ID of the Key Management Service (KMS) symmetric encryption + * customer managed key to use for encrypting inventory reports. + * + * This member is required. + */ + keyId?: string + } + /** + * Specifies the use of SSE-S3 to encrypt delivered inventory reports. + */ + type _subfwKrU = noSmithyDocumentSerde + interface SSES3 extends _subfwKrU { + } +} + +/** + * Package metrics defines the metrics APIs used by Smithy clients. + */ +namespace metrics { + /** + * RecordMetricOptions represents configuration for a recorded metric. + */ + interface RecordMetricOptions { + properties: smithy.Properties + } +} + +namespace types { + interface AnalyticsS3ExportFileFormat extends String{} + interface AnalyticsS3ExportFileFormat { + /** + * Values returns all known values for AnalyticsS3ExportFileFormat. Note that this + * can be expanded in the future, and so it is only as up to date as the client. + * + * The ordering of this slice is not guaranteed to be stable across updates. + */ + values(): Array + } + // @ts-ignore + import smithy = smithy_go + // @ts-ignore + import smithydocument = document +}