diff --git a/go.mod b/go.mod
index 4568b36ab357f..ef66f2aa42dfd 100644
--- a/go.mod
+++ b/go.mod
@@ -104,7 +104,7 @@ require (
golang.org/x/sys v0.33.0
golang.org/x/time v0.11.0
google.golang.org/api v0.229.0
- google.golang.org/grpc v1.71.1
+ google.golang.org/grpc v1.72.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/klog/v2 v2.130.1
@@ -116,7 +116,7 @@ require (
github.com/DmitriyVTitov/size v1.5.0
github.com/IBM/go-sdk-core/v5 v5.20.0
github.com/IBM/ibm-cos-sdk-go v1.12.2
- github.com/apache/arrow-go/v18 v18.2.0
+ github.com/apache/arrow-go/v18 v18.3.0
github.com/axiomhq/hyperloglog v0.2.5
github.com/bits-and-blooms/bloom/v3 v3.7.0
github.com/buger/jsonparser v1.1.1
@@ -192,6 +192,7 @@ require (
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/go-ini/ini v1.67.0 // indirect
+ github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-redsync/redsync/v4 v4.13.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
@@ -230,6 +231,7 @@ require (
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sahilm/fuzzy v0.1.1 // indirect
github.com/sercand/kuberesolver/v6 v6.0.0 // indirect
+ github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
github.com/tinylib/msgp v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.13 // indirect
github.com/tklauser/numcpus v0.7.0 // indirect
@@ -237,6 +239,7 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
+ github.com/zeebo/errs v1.4.0 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/component v0.118.0 // indirect
diff --git a/go.sum b/go.sum
index 52dad11b3ee54..933c61f9313a5 100644
--- a/go.sum
+++ b/go.sum
@@ -183,8 +183,8 @@ github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/arrow-go/v18 v18.2.0 h1:QhWqpgZMKfWOniGPhbUxrHohWnooGURqL2R2Gg4SO1Q=
-github.com/apache/arrow-go/v18 v18.2.0/go.mod h1:Ic/01WSwGJWRrdAZcxjBZ5hbApNJ28K96jGYaxzzGUc=
+github.com/apache/arrow-go/v18 v18.3.0 h1:Xq4A6dZj9Nu33sqZibzn012LNnewkTUlfKVUFD/RX/I=
+github.com/apache/arrow-go/v18 v18.3.0/go.mod h1:eEM1DnUTHhgGAjf/ChvOAQbUQ+EPohtDrArffvUjPg8=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE=
@@ -459,6 +459,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
+github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
@@ -1197,6 +1199,8 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
+github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@@ -1289,6 +1293,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zeebo/assert v1.3.1 h1:vukIABvugfNMZMQO1ABsyQDJDTVQbn+LWSMy1ol1h6A=
github.com/zeebo/assert v1.3.1/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
+github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.einride.tech/aip v0.68.1 h1:16/AfSxcQISGN5z9C5lM+0mLYXihrHbQ1onvYTr93aQ=
@@ -1699,8 +1705,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0=
-gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o=
+gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
+gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@@ -1788,8 +1794,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI=
-google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
+google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
+google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/Makefile b/vendor/github.com/apache/arrow-go/v18/arrow/Makefile
index 9c4a23262d0bd..c7e327092f16d 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/Makefile
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/Makefile
@@ -30,7 +30,7 @@ assembly:
@$(MAKE) -C math assembly
generate: bin/tmpl
- bin/tmpl -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen_test.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl
+ bin/tmpl -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numericbuilder.gen_test.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl
bin/tmpl -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl
@$(MAKE) -C math generate
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array.go b/vendor/github.com/apache/arrow-go/v18/arrow/array.go
index df186f2de7178..d42ca6d05bc3a 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array.go
@@ -127,3 +127,15 @@ type Array interface {
// When the reference count goes to zero, the memory is freed.
Release()
}
+
+// ValueType is a generic constraint for valid Arrow primitive types
+type ValueType interface {
+ bool | FixedWidthType | string | []byte
+}
+
+// TypedArray is an interface representing an Array of a particular type
+// allowing for easy propagation of generics
+type TypedArray[T ValueType] interface {
+ Array
+ Value(int) T
+}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/array.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/array.go
index 6e281a43e9b94..947b44f2a3a0f 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/array.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/array.go
@@ -35,7 +35,7 @@ const (
)
type array struct {
- refCount int64
+ refCount atomic.Int64
data *Data
nullBitmapBytes []byte
}
@@ -43,16 +43,16 @@ type array struct {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (a *array) Retain() {
- atomic.AddInt64(&a.refCount, 1)
+ a.refCount.Add(1)
}
// Release decreases the reference count by 1.
// Release may be called simultaneously from multiple goroutines.
// When the reference count goes to zero, the memory is freed.
func (a *array) Release() {
- debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases")
+ debug.Assert(a.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&a.refCount, -1) == 0 {
+ if a.refCount.Add(-1) == 0 {
a.data.Release()
a.data, a.nullBitmapBytes = nil, nil
}
@@ -109,9 +109,7 @@ func (a *array) Offset() int {
type arrayConstructorFn func(arrow.ArrayData) arrow.Array
-var (
- makeArrayFn [64]arrayConstructorFn
-)
+var makeArrayFn [64]arrayConstructorFn
func invalidDataType(data arrow.ArrayData) arrow.Array {
panic("invalid data type: " + data.DataType().ID().String())
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/binary.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/binary.go
index 1af7631b8fe7c..5fef60ec55c0e 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/binary.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/binary.go
@@ -45,7 +45,7 @@ type Binary struct {
// NewBinaryData constructs a new Binary array from data.
func NewBinaryData(data arrow.ArrayData) *Binary {
a := &Binary{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -189,7 +189,7 @@ type LargeBinary struct {
func NewLargeBinaryData(data arrow.ArrayData) *LargeBinary {
a := &LargeBinary{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -208,6 +208,7 @@ func (a *LargeBinary) ValueStr(i int) string {
}
return base64.StdEncoding.EncodeToString(a.Value(i))
}
+
func (a *LargeBinary) ValueString(i int) string {
b := a.Value(i)
return *(*string)(unsafe.Pointer(&b))
@@ -333,7 +334,7 @@ type BinaryView struct {
func NewBinaryViewData(data arrow.ArrayData) *BinaryView {
a := &BinaryView{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -450,4 +451,8 @@ var (
_ BinaryLike = (*Binary)(nil)
_ BinaryLike = (*LargeBinary)(nil)
+
+ _ arrow.TypedArray[[]byte] = (*Binary)(nil)
+ _ arrow.TypedArray[[]byte] = (*LargeBinary)(nil)
+ _ arrow.TypedArray[[]byte] = (*BinaryView)(nil)
)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/binarybuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/binarybuilder.go
index 794ac68899c31..8b162c77148c5 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/binarybuilder.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/binarybuilder.go
@@ -22,7 +22,6 @@ import (
"fmt"
"math"
"reflect"
- "sync/atomic"
"unsafe"
"github.com/apache/arrow-go/v18/arrow"
@@ -72,8 +71,8 @@ func NewBinaryBuilder(mem memory.Allocator, dtype arrow.BinaryDataType) *BinaryB
offsetByteWidth = arrow.Int64SizeBytes
}
- b := &BinaryBuilder{
- builder: builder{refCount: 1, mem: mem},
+ bb := &BinaryBuilder{
+ builder: builder{mem: mem},
dtype: dtype,
offsets: offsets,
values: newByteBufferBuilder(mem),
@@ -82,7 +81,8 @@ func NewBinaryBuilder(mem memory.Allocator, dtype arrow.BinaryDataType) *BinaryB
offsetByteWidth: offsetByteWidth,
getOffsetVal: getOffsetVal,
}
- return b
+ bb.builder.refCount.Add(1)
+ return bb
}
func (b *BinaryBuilder) Type() arrow.DataType { return b.dtype }
@@ -91,9 +91,9 @@ func (b *BinaryBuilder) Type() arrow.DataType { return b.dtype }
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (b *BinaryBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -387,18 +387,19 @@ type BinaryViewBuilder struct {
}
func NewBinaryViewBuilder(mem memory.Allocator) *BinaryViewBuilder {
- return &BinaryViewBuilder{
+ bvb := &BinaryViewBuilder{
dtype: arrow.BinaryTypes.BinaryView,
builder: builder{
- refCount: 1,
- mem: mem,
+ mem: mem,
},
blockBuilder: multiBufferBuilder{
- refCount: 1,
blockSize: dfltBlockSize,
mem: mem,
},
}
+ bvb.builder.refCount.Add(1)
+ bvb.blockBuilder.refCount.Add(1)
+ return bvb
}
func (b *BinaryViewBuilder) SetBlockSize(sz uint) {
@@ -408,9 +409,9 @@ func (b *BinaryViewBuilder) SetBlockSize(sz uint) {
func (b *BinaryViewBuilder) Type() arrow.DataType { return b.dtype }
func (b *BinaryViewBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) != 0 {
+ if b.refCount.Add(-1) != 0 {
return
}
@@ -673,7 +674,8 @@ func (b *BinaryViewBuilder) newData() (data *Data) {
dataBuffers := b.blockBuilder.Finish()
data = NewData(b.dtype, b.length, append([]*memory.Buffer{
- b.nullBitmap, b.data}, dataBuffers...), nil, b.nulls, 0)
+ b.nullBitmap, b.data,
+ }, dataBuffers...), nil, b.nulls, 0)
b.reset()
if b.data != nil {
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/boolean.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/boolean.go
index fb2dba7389a0d..1b28a9f4cd394 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/boolean.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/boolean.go
@@ -44,7 +44,7 @@ func NewBoolean(length int, data *memory.Buffer, nullBitmap *memory.Buffer, null
func NewBooleanData(data arrow.ArrayData) *Boolean {
a := &Boolean{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -122,5 +122,6 @@ func arrayEqualBoolean(left, right *Boolean) bool {
}
var (
- _ arrow.Array = (*Boolean)(nil)
+ _ arrow.Array = (*Boolean)(nil)
+ _ arrow.TypedArray[bool] = (*Boolean)(nil)
)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/booleanbuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/booleanbuilder.go
index 951fe3a90ad49..a277ffd26cd17 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/booleanbuilder.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/booleanbuilder.go
@@ -21,7 +21,6 @@ import (
"fmt"
"reflect"
"strconv"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -38,7 +37,9 @@ type BooleanBuilder struct {
}
func NewBooleanBuilder(mem memory.Allocator) *BooleanBuilder {
- return &BooleanBuilder{builder: builder{refCount: 1, mem: mem}}
+ bb := &BooleanBuilder{builder: builder{mem: mem}}
+ bb.builder.refCount.Add(1)
+ return bb
}
func (b *BooleanBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.Boolean }
@@ -47,9 +48,9 @@ func (b *BooleanBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.Bo
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (b *BooleanBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -258,6 +259,4 @@ func (b *BooleanBuilder) Value(i int) bool {
return bitutil.BitIsSet(b.rawData, i)
}
-var (
- _ Builder = (*BooleanBuilder)(nil)
-)
+var _ Builder = (*BooleanBuilder)(nil)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder.go
index 085d43eff1c99..bc784d6ae1911 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder.go
@@ -43,7 +43,7 @@ type bufBuilder interface {
// A bufferBuilder provides common functionality for populating memory with a sequence of type-specific values.
// Specialized implementations provide type-safe APIs for appending and accessing the memory.
type bufferBuilder struct {
- refCount int64
+ refCount atomic.Int64
mem memory.Allocator
buffer *memory.Buffer
length int
@@ -55,16 +55,16 @@ type bufferBuilder struct {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (b *bufferBuilder) Retain() {
- atomic.AddInt64(&b.refCount, 1)
+ b.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (b *bufferBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.buffer != nil {
b.buffer.Release()
b.buffer, b.bytes = nil, nil
@@ -155,7 +155,7 @@ func (b *bufferBuilder) unsafeAppend(data []byte) {
}
type multiBufferBuilder struct {
- refCount int64
+ refCount atomic.Int64
blockSize int
mem memory.Allocator
@@ -166,16 +166,16 @@ type multiBufferBuilder struct {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (b *multiBufferBuilder) Retain() {
- atomic.AddInt64(&b.refCount, 1)
+ b.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (b *multiBufferBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
b.Reset()
}
}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_byte.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_byte.go
index 78bb938edfef3..61431b716deeb 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_byte.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_byte.go
@@ -23,7 +23,9 @@ type byteBufferBuilder struct {
}
func newByteBufferBuilder(mem memory.Allocator) *byteBufferBuilder {
- return &byteBufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+ bbb := &byteBufferBuilder{bufferBuilder: bufferBuilder{mem: mem}}
+ bbb.bufferBuilder.refCount.Add(1)
+ return bbb
}
func (b *byteBufferBuilder) Values() []byte { return b.Bytes() }
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go
index 3812c5e724550..e887fbf126d4b 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go
@@ -29,7 +29,9 @@ type int64BufferBuilder struct {
}
func newInt64BufferBuilder(mem memory.Allocator) *int64BufferBuilder {
- return &int64BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+ b := &int64BufferBuilder{bufferBuilder: bufferBuilder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
@@ -62,7 +64,9 @@ type int32BufferBuilder struct {
}
func newInt32BufferBuilder(mem memory.Allocator) *int32BufferBuilder {
- return &int32BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+ b := &int32BufferBuilder{bufferBuilder: bufferBuilder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
@@ -95,7 +99,9 @@ type int8BufferBuilder struct {
}
func newInt8BufferBuilder(mem memory.Allocator) *int8BufferBuilder {
- return &int8BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+ b := &int8BufferBuilder{bufferBuilder: bufferBuilder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go.tmpl
index c3c39de15f2f9..3582057555ada 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go.tmpl
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go.tmpl
@@ -30,7 +30,9 @@ type {{$TypeNamePrefix}}BufferBuilder struct {
}
func new{{.Name}}BufferBuilder(mem memory.Allocator) *{{$TypeNamePrefix}}BufferBuilder {
- return &{{$TypeNamePrefix}}BufferBuilder{bufferBuilder:bufferBuilder{refCount: 1, mem:mem}}
+ b := &{{$TypeNamePrefix}}BufferBuilder{bufferBuilder:bufferBuilder{mem:mem}}
+ b.refCount.Add(1)
+ return b
}
// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/builder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/builder.go
index a2a40d48aa18b..0b3a4e9af9fb9 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/builder.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/builder.go
@@ -102,7 +102,7 @@ type Builder interface {
// builder provides common functionality for managing the validity bitmap (nulls) when building arrays.
type builder struct {
- refCount int64
+ refCount atomic.Int64
mem memory.Allocator
nullBitmap *memory.Buffer
nulls int
@@ -113,7 +113,7 @@ type builder struct {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (b *builder) Retain() {
- atomic.AddInt64(&b.refCount, 1)
+ b.refCount.Add(1)
}
// Len returns the number of elements in the array builder.
@@ -176,13 +176,13 @@ func (b *builder) resize(newBits int, init func(int)) {
}
func (b *builder) reserve(elements int, resize func(int)) {
- if b.nullBitmap == nil {
- b.nullBitmap = memory.NewResizableBuffer(b.mem)
- }
if b.length+elements > b.capacity {
newCap := bitutil.NextPowerOf2(b.length + elements)
resize(newCap)
}
+ if b.nullBitmap == nil {
+ b.nullBitmap = memory.NewResizableBuffer(b.mem)
+ }
}
// unsafeAppendBoolsToBitmap appends the contents of valid to the validity bitmap.
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/compare.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/compare.go
index e412febfc0404..fda15f506d884 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/compare.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/compare.go
@@ -240,37 +240,37 @@ func Equal(left, right arrow.Array) bool {
return arrayEqualStringView(l, r)
case *Int8:
r := right.(*Int8)
- return arrayEqualInt8(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Int16:
r := right.(*Int16)
- return arrayEqualInt16(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Int32:
r := right.(*Int32)
- return arrayEqualInt32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Int64:
r := right.(*Int64)
- return arrayEqualInt64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint8:
r := right.(*Uint8)
- return arrayEqualUint8(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint16:
r := right.(*Uint16)
- return arrayEqualUint16(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint32:
r := right.(*Uint32)
- return arrayEqualUint32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint64:
r := right.(*Uint64)
- return arrayEqualUint64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Float16:
r := right.(*Float16)
- return arrayEqualFloat16(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Float32:
r := right.(*Float32)
- return arrayEqualFloat32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Float64:
r := right.(*Float64)
- return arrayEqualFloat64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Decimal32:
r := right.(*Decimal32)
return arrayEqualDecimal(l, r)
@@ -285,16 +285,16 @@ func Equal(left, right arrow.Array) bool {
return arrayEqualDecimal(l, r)
case *Date32:
r := right.(*Date32)
- return arrayEqualDate32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Date64:
r := right.(*Date64)
- return arrayEqualDate64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Time32:
r := right.(*Time32)
- return arrayEqualTime32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Time64:
r := right.(*Time64)
- return arrayEqualTime64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Timestamp:
r := right.(*Timestamp)
return arrayEqualTimestamp(l, r)
@@ -327,7 +327,7 @@ func Equal(left, right arrow.Array) bool {
return arrayEqualMonthDayNanoInterval(l, r)
case *Duration:
r := right.(*Duration)
- return arrayEqualDuration(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Map:
r := right.(*Map)
return arrayEqualMap(l, r)
@@ -502,28 +502,28 @@ func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool {
return arrayApproxEqualStringView(l, r)
case *Int8:
r := right.(*Int8)
- return arrayEqualInt8(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Int16:
r := right.(*Int16)
- return arrayEqualInt16(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Int32:
r := right.(*Int32)
- return arrayEqualInt32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Int64:
r := right.(*Int64)
- return arrayEqualInt64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint8:
r := right.(*Uint8)
- return arrayEqualUint8(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint16:
r := right.(*Uint16)
- return arrayEqualUint16(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint32:
r := right.(*Uint32)
- return arrayEqualUint32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Uint64:
r := right.(*Uint64)
- return arrayEqualUint64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Float16:
r := right.(*Float16)
return arrayApproxEqualFloat16(l, r, opt)
@@ -547,16 +547,16 @@ func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool {
return arrayEqualDecimal(l, r)
case *Date32:
r := right.(*Date32)
- return arrayEqualDate32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Date64:
r := right.(*Date64)
- return arrayEqualDate64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Time32:
r := right.(*Time32)
- return arrayEqualTime32(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Time64:
r := right.(*Time64)
- return arrayEqualTime64(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Timestamp:
r := right.(*Timestamp)
return arrayEqualTimestamp(l, r)
@@ -589,7 +589,7 @@ func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool {
return arrayEqualMonthDayNanoInterval(l, r)
case *Duration:
r := right.(*Duration)
- return arrayEqualDuration(l, r)
+ return arrayEqualFixedWidth(l, r)
case *Map:
r := right.(*Map)
if opt.unorderedMapKeys {
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/concat.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/concat.go
index bb50354b440d0..8f6aefbe2a8fc 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/concat.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/concat.go
@@ -517,7 +517,9 @@ func concatListView(data []arrow.ArrayData, offsetType arrow.FixedWidthDataType,
// concat is the implementation for actually performing the concatenation of the arrow.ArrayData
// objects that we can call internally for nested types.
func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData, err error) {
- out := &Data{refCount: 1, dtype: data[0].DataType(), nulls: 0}
+ out := &Data{dtype: data[0].DataType(), nulls: 0}
+ out.refCount.Add(1)
+
defer func() {
if pErr := recover(); pErr != nil {
err = utils.FormatRecoveredError("arrow/concat", pErr)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/data.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/data.go
index be75c7c709d4c..62284b3915fc7 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/data.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/data.go
@@ -29,7 +29,7 @@ import (
// Data represents the memory and metadata of an Arrow array.
type Data struct {
- refCount int64
+ refCount atomic.Int64
dtype arrow.DataType
nulls int
offset int
@@ -56,8 +56,7 @@ func NewData(dtype arrow.DataType, length int, buffers []*memory.Buffer, childDa
}
}
- return &Data{
- refCount: 1,
+ d := &Data{
dtype: dtype,
nulls: nulls,
length: length,
@@ -65,6 +64,8 @@ func NewData(dtype arrow.DataType, length int, buffers []*memory.Buffer, childDa
buffers: buffers,
childData: childData,
}
+ d.refCount.Add(1)
+ return d
}
// NewDataWithDictionary creates a new data object, but also sets the provided dictionary into the data if it's not nil
@@ -129,16 +130,16 @@ func (d *Data) Reset(dtype arrow.DataType, length int, buffers []*memory.Buffer,
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (d *Data) Retain() {
- atomic.AddInt64(&d.refCount, 1)
+ d.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (d *Data) Release() {
- debug.Assert(atomic.LoadInt64(&d.refCount) > 0, "too many releases")
+ debug.Assert(d.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&d.refCount, -1) == 0 {
+ if d.refCount.Add(-1) == 0 {
for _, b := range d.buffers {
if b != nil {
b.Release()
@@ -246,7 +247,6 @@ func NewSliceData(data arrow.ArrayData, i, j int64) arrow.ArrayData {
}
o := &Data{
- refCount: 1,
dtype: data.DataType(),
nulls: UnknownNullCount,
length: int(j - i),
@@ -255,6 +255,7 @@ func NewSliceData(data arrow.ArrayData, i, j int64) arrow.ArrayData {
childData: data.Children(),
dictionary: data.(*Data).dictionary,
}
+ o.refCount.Add(1)
if data.NullN() == 0 {
o.nulls = 0
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/decimal.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/decimal.go
index 1a9d61c1c8222..dff0feaf048da 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/decimal.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/decimal.go
@@ -21,7 +21,6 @@ import (
"fmt"
"reflect"
"strings"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -45,7 +44,7 @@ func newDecimalData[T interface {
decimal.Num[T]
}](data arrow.ArrayData) *baseDecimal[T] {
a := &baseDecimal[T]{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -148,11 +147,13 @@ func NewDecimal256Data(data arrow.ArrayData) *Decimal256 {
return newDecimalData[decimal.Decimal256](data)
}
-type Decimal32Builder = baseDecimalBuilder[decimal.Decimal32]
-type Decimal64Builder = baseDecimalBuilder[decimal.Decimal64]
-type Decimal128Builder struct {
- *baseDecimalBuilder[decimal.Decimal128]
-}
+type (
+ Decimal32Builder = baseDecimalBuilder[decimal.Decimal32]
+ Decimal64Builder = baseDecimalBuilder[decimal.Decimal64]
+ Decimal128Builder struct {
+ *baseDecimalBuilder[decimal.Decimal128]
+ }
+)
func (b *Decimal128Builder) NewDecimal128Array() *Decimal128 {
return b.NewDecimalArray()
@@ -182,18 +183,20 @@ func newDecimalBuilder[T interface {
decimal.DecimalTypes
decimal.Num[T]
}, DT arrow.DecimalType](mem memory.Allocator, dtype DT) *baseDecimalBuilder[T] {
- return &baseDecimalBuilder[T]{
- builder: builder{refCount: 1, mem: mem},
+ bdb := &baseDecimalBuilder[T]{
+ builder: builder{mem: mem},
dtype: dtype,
}
+ bdb.builder.refCount.Add(1)
+ return bdb
}
func (b *baseDecimalBuilder[T]) Type() arrow.DataType { return b.dtype }
func (b *baseDecimalBuilder[T]) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -429,4 +432,9 @@ var (
_ Builder = (*Decimal64Builder)(nil)
_ Builder = (*Decimal128Builder)(nil)
_ Builder = (*Decimal256Builder)(nil)
+
+ _ arrow.TypedArray[decimal.Decimal32] = (*Decimal32)(nil)
+ _ arrow.TypedArray[decimal.Decimal64] = (*Decimal64)(nil)
+ _ arrow.TypedArray[decimal.Decimal128] = (*Decimal128)(nil)
+ _ arrow.TypedArray[decimal.Decimal256] = (*Decimal256)(nil)
)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/dictionary.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/dictionary.go
index 0c23934a48807..4ddb5d4c71d96 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/dictionary.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/dictionary.go
@@ -22,14 +22,11 @@ import (
"fmt"
"math"
"math/bits"
- "sync/atomic"
"unsafe"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
"github.com/apache/arrow-go/v18/arrow/decimal"
- "github.com/apache/arrow-go/v18/arrow/decimal128"
- "github.com/apache/arrow-go/v18/arrow/decimal256"
"github.com/apache/arrow-go/v18/arrow/float16"
"github.com/apache/arrow-go/v18/arrow/internal/debug"
"github.com/apache/arrow-go/v18/arrow/memory"
@@ -66,7 +63,7 @@ type Dictionary struct {
// and dictionary using the given type.
func NewDictionaryArray(typ arrow.DataType, indices, dict arrow.Array) *Dictionary {
a := &Dictionary{}
- a.array.refCount = 1
+ a.array.refCount.Add(1)
dictdata := NewData(typ, indices.Len(), indices.Data().Buffers(), indices.Data().Children(), indices.NullN(), indices.Data().Offset())
dictdata.dictionary = dict.Data().(*Data)
dict.Data().Retain()
@@ -188,19 +185,19 @@ func NewValidatedDictionaryArray(typ *arrow.DictionaryType, indices, dict arrow.
// an ArrayData object with a datatype of arrow.Dictionary and a dictionary
func NewDictionaryData(data arrow.ArrayData) *Dictionary {
a := &Dictionary{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
func (d *Dictionary) Retain() {
- atomic.AddInt64(&d.refCount, 1)
+ d.refCount.Add(1)
}
func (d *Dictionary) Release() {
- debug.Assert(atomic.LoadInt64(&d.refCount) > 0, "too many releases")
+ debug.Assert(d.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&d.refCount, -1) == 0 {
+ if d.refCount.Add(-1) == 0 {
d.data.Release()
d.data, d.nullBitmapBytes = nil, nil
d.indices.Release()
@@ -426,6 +423,73 @@ type dictionaryBuilder struct {
idxBuilder IndexBuilder
}
+func createDictBuilder[T arrow.ValueType](mem memory.Allocator, idxbldr IndexBuilder, memo hashing.MemoTable, dt *arrow.DictionaryType, init arrow.Array) DictionaryBuilder {
+ ret := &dictBuilder[T]{
+ dictionaryBuilder: dictionaryBuilder{
+ builder: builder{mem: mem},
+ idxBuilder: idxbldr,
+ memoTable: memo,
+ dt: dt,
+ },
+ }
+ ret.builder.refCount.Add(1)
+
+ if init != nil {
+ if err := ret.InsertDictValues(init.(arrValues[T])); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+}
+
+func createBinaryDictBuilder(mem memory.Allocator, idxbldr IndexBuilder, memo hashing.MemoTable, dt *arrow.DictionaryType, init arrow.Array) DictionaryBuilder {
+ ret := &BinaryDictionaryBuilder{
+ dictionaryBuilder: dictionaryBuilder{
+ builder: builder{mem: mem},
+ idxBuilder: idxbldr,
+ memoTable: memo,
+ dt: dt,
+ },
+ }
+ ret.builder.refCount.Add(1)
+
+ if init != nil {
+ switch v := init.(type) {
+ case *String:
+ if err := ret.InsertStringDictValues(v); err != nil {
+ panic(err)
+ }
+ case *Binary:
+ if err := ret.InsertDictValues(v); err != nil {
+ panic(err)
+ }
+ }
+ }
+ return ret
+}
+
+func createFixedSizeDictBuilder[T fsbType](mem memory.Allocator, idxbldr IndexBuilder, memo hashing.MemoTable, dt *arrow.DictionaryType, init arrow.Array) DictionaryBuilder {
+ var z T
+ ret := &fixedSizeDictionaryBuilder[T]{
+ dictionaryBuilder: dictionaryBuilder{
+ builder: builder{mem: mem},
+ idxBuilder: idxbldr,
+ memoTable: memo,
+ dt: dt,
+ },
+ byteWidth: int(unsafe.Sizeof(z)),
+ }
+ ret.builder.refCount.Add(1)
+
+ if init != nil {
+ if err := ret.InsertDictValues(init.(arrValues[T])); err != nil {
+ panic(err)
+ }
+ }
+
+ return ret
+}
+
// NewDictionaryBuilderWithDict initializes a dictionary builder and inserts the values from `init` as the first
// values in the dictionary, but does not insert them as values into the array.
func NewDictionaryBuilderWithDict(mem memory.Allocator, dt *arrow.DictionaryType, init arrow.Array) DictionaryBuilder {
@@ -443,126 +507,55 @@ func NewDictionaryBuilderWithDict(mem memory.Allocator, dt *arrow.DictionaryType
panic(fmt.Errorf("arrow/array: unsupported builder for value type of %T", dt))
}
- bldr := dictionaryBuilder{
- builder: builder{refCount: 1, mem: mem},
- idxBuilder: idxbldr,
- memoTable: memo,
- dt: dt,
- }
-
switch dt.ValueType.ID() {
case arrow.NULL:
- ret := &NullDictionaryBuilder{bldr}
+ ret := &NullDictionaryBuilder{
+ dictionaryBuilder: dictionaryBuilder{
+ builder: builder{mem: mem},
+ idxBuilder: idxbldr,
+ memoTable: memo,
+ dt: dt,
+ },
+ }
+ ret.builder.refCount.Add(1)
debug.Assert(init == nil, "arrow/array: doesn't make sense to init a null dictionary")
return ret
case arrow.UINT8:
- ret := &Uint8DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Uint8)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[uint8](mem, idxbldr, memo, dt, init)
case arrow.INT8:
- ret := &Int8DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Int8)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[int8](mem, idxbldr, memo, dt, init)
case arrow.UINT16:
- ret := &Uint16DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Uint16)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[uint16](mem, idxbldr, memo, dt, init)
case arrow.INT16:
- ret := &Int16DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Int16)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[int16](mem, idxbldr, memo, dt, init)
case arrow.UINT32:
- ret := &Uint32DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Uint32)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[uint32](mem, idxbldr, memo, dt, init)
case arrow.INT32:
- ret := &Int32DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Int32)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[int32](mem, idxbldr, memo, dt, init)
case arrow.UINT64:
- ret := &Uint64DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Uint64)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[uint64](mem, idxbldr, memo, dt, init)
case arrow.INT64:
- ret := &Int64DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Int64)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[int64](mem, idxbldr, memo, dt, init)
case arrow.FLOAT16:
- ret := &Float16DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Float16)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[float16.Num](mem, idxbldr, memo, dt, init)
case arrow.FLOAT32:
- ret := &Float32DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Float32)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[float32](mem, idxbldr, memo, dt, init)
case arrow.FLOAT64:
- ret := &Float64DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Float64)); err != nil {
- panic(err)
- }
- }
- return ret
- case arrow.STRING:
- ret := &BinaryDictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertStringDictValues(init.(*String)); err != nil {
- panic(err)
- }
- }
- return ret
- case arrow.BINARY:
- ret := &BinaryDictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Binary)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[float64](mem, idxbldr, memo, dt, init)
+ case arrow.STRING, arrow.BINARY:
+ return createBinaryDictBuilder(mem, idxbldr, memo, dt, init)
case arrow.FIXED_SIZE_BINARY:
ret := &FixedSizeBinaryDictionaryBuilder{
- bldr, dt.ValueType.(*arrow.FixedSizeBinaryType).ByteWidth,
+ dictionaryBuilder: dictionaryBuilder{
+ builder: builder{mem: mem},
+ idxBuilder: idxbldr,
+ memoTable: memo,
+ dt: dt,
+ },
+ byteWidth: dt.ValueType.(*arrow.FixedSizeBinaryType).ByteWidth,
}
+ ret.builder.refCount.Add(1)
+
if init != nil {
if err = ret.InsertDictValues(init.(*FixedSizeBinary)); err != nil {
panic(err)
@@ -570,93 +563,27 @@ func NewDictionaryBuilderWithDict(mem memory.Allocator, dt *arrow.DictionaryType
}
return ret
case arrow.DATE32:
- ret := &Date32DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Date32)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[arrow.Date32](mem, idxbldr, memo, dt, init)
case arrow.DATE64:
- ret := &Date64DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Date64)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[arrow.Date64](mem, idxbldr, memo, dt, init)
case arrow.TIMESTAMP:
- ret := &TimestampDictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Timestamp)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[arrow.Timestamp](mem, idxbldr, memo, dt, init)
case arrow.TIME32:
- ret := &Time32DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Time32)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[arrow.Time32](mem, idxbldr, memo, dt, init)
case arrow.TIME64:
- ret := &Time64DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Time64)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[arrow.Time64](mem, idxbldr, memo, dt, init)
case arrow.INTERVAL_MONTHS:
- ret := &MonthIntervalDictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*MonthInterval)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[arrow.MonthInterval](mem, idxbldr, memo, dt, init)
case arrow.INTERVAL_DAY_TIME:
- ret := &DayTimeDictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*DayTimeInterval)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createFixedSizeDictBuilder[arrow.DayTimeInterval](mem, idxbldr, memo, dt, init)
case arrow.DECIMAL32:
- ret := &Decimal32DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Decimal32)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createFixedSizeDictBuilder[decimal.Decimal32](mem, idxbldr, memo, dt, init)
case arrow.DECIMAL64:
- ret := &Decimal64DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Decimal64)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createFixedSizeDictBuilder[decimal.Decimal64](mem, idxbldr, memo, dt, init)
case arrow.DECIMAL128:
- ret := &Decimal128DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Decimal128)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createFixedSizeDictBuilder[decimal.Decimal128](mem, idxbldr, memo, dt, init)
case arrow.DECIMAL256:
- ret := &Decimal256DictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Decimal256)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createFixedSizeDictBuilder[decimal.Decimal256](mem, idxbldr, memo, dt, init)
case arrow.LIST:
case arrow.STRUCT:
case arrow.SPARSE_UNION:
@@ -666,24 +593,12 @@ func NewDictionaryBuilderWithDict(mem memory.Allocator, dt *arrow.DictionaryType
case arrow.EXTENSION:
case arrow.FIXED_SIZE_LIST:
case arrow.DURATION:
- ret := &DurationDictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*Duration)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createDictBuilder[arrow.Duration](mem, idxbldr, memo, dt, init)
case arrow.LARGE_STRING:
case arrow.LARGE_BINARY:
case arrow.LARGE_LIST:
case arrow.INTERVAL_MONTH_DAY_NANO:
- ret := &MonthDayNanoDictionaryBuilder{bldr}
- if init != nil {
- if err = ret.InsertDictValues(init.(*MonthDayNanoInterval)); err != nil {
- panic(err)
- }
- }
- return ret
+ return createFixedSizeDictBuilder[arrow.MonthDayNanoInterval](mem, idxbldr, memo, dt, init)
}
panic("arrow/array: unimplemented dictionary key type")
@@ -696,9 +611,9 @@ func NewDictionaryBuilder(mem memory.Allocator, dt *arrow.DictionaryType) Dictio
func (b *dictionaryBuilder) Type() arrow.DataType { return b.dt }
func (b *dictionaryBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
b.idxBuilder.Release()
b.idxBuilder.Builder = nil
if binmemo, ok := b.memoTable.(*hashing.BinaryMemoTable); ok {
@@ -820,7 +735,7 @@ func (b *dictionaryBuilder) newData() *Data {
func (b *dictionaryBuilder) NewDictionaryArray() *Dictionary {
a := &Dictionary{}
- a.refCount = 1
+ a.refCount.Add(1)
indices := b.newData()
a.setData(indices)
@@ -1071,27 +986,20 @@ func (b *NullDictionaryBuilder) AppendArray(arr arrow.Array) error {
return nil
}
-type Int8DictionaryBuilder struct {
+type dictBuilder[T arrow.ValueType] struct {
dictionaryBuilder
}
-func (b *Int8DictionaryBuilder) Append(v int8) error { return b.appendValue(v) }
-func (b *Int8DictionaryBuilder) InsertDictValues(arr *Int8) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
+func (b *dictBuilder[T]) Append(v T) error {
+ return b.appendValue(v)
}
-type Uint8DictionaryBuilder struct {
- dictionaryBuilder
+type arrValues[T arrow.ValueType] interface {
+ Values() []T
}
-func (b *Uint8DictionaryBuilder) Append(v uint8) error { return b.appendValue(v) }
-func (b *Uint8DictionaryBuilder) InsertDictValues(arr *Uint8) (err error) {
- for _, v := range arr.values {
+func (b *dictBuilder[T]) InsertDictValues(arr arrValues[T]) (err error) {
+ for _, v := range arr.Values() {
if err = b.insertDictValue(v); err != nil {
break
}
@@ -1099,231 +1007,30 @@ func (b *Uint8DictionaryBuilder) InsertDictValues(arr *Uint8) (err error) {
return
}
-type Int16DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Int16DictionaryBuilder) Append(v int16) error { return b.appendValue(v) }
-func (b *Int16DictionaryBuilder) InsertDictValues(arr *Int16) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
-
-type Uint16DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Uint16DictionaryBuilder) Append(v uint16) error { return b.appendValue(v) }
-func (b *Uint16DictionaryBuilder) InsertDictValues(arr *Uint16) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
-
-type Int32DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Int32DictionaryBuilder) Append(v int32) error { return b.appendValue(v) }
-func (b *Int32DictionaryBuilder) InsertDictValues(arr *Int32) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
-
-type Uint32DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Uint32DictionaryBuilder) Append(v uint32) error { return b.appendValue(v) }
-func (b *Uint32DictionaryBuilder) InsertDictValues(arr *Uint32) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
-
-type Int64DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Int64DictionaryBuilder) Append(v int64) error { return b.appendValue(v) }
-func (b *Int64DictionaryBuilder) InsertDictValues(arr *Int64) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
-
-type Uint64DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Uint64DictionaryBuilder) Append(v uint64) error { return b.appendValue(v) }
-func (b *Uint64DictionaryBuilder) InsertDictValues(arr *Uint64) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
-
-type DurationDictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *DurationDictionaryBuilder) Append(v arrow.Duration) error { return b.appendValue(int64(v)) }
-func (b *DurationDictionaryBuilder) InsertDictValues(arr *Duration) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(int64(v)); err != nil {
- break
- }
- }
- return
-}
-
-type TimestampDictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *TimestampDictionaryBuilder) Append(v arrow.Timestamp) error { return b.appendValue(int64(v)) }
-func (b *TimestampDictionaryBuilder) InsertDictValues(arr *Timestamp) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(int64(v)); err != nil {
- break
- }
- }
- return
-}
-
-type Time32DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Time32DictionaryBuilder) Append(v arrow.Time32) error { return b.appendValue(int32(v)) }
-func (b *Time32DictionaryBuilder) InsertDictValues(arr *Time32) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(int32(v)); err != nil {
- break
- }
- }
- return
-}
-
-type Time64DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Time64DictionaryBuilder) Append(v arrow.Time64) error { return b.appendValue(int64(v)) }
-func (b *Time64DictionaryBuilder) InsertDictValues(arr *Time64) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(int64(v)); err != nil {
- break
- }
- }
- return
-}
-
-type Date32DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Date32DictionaryBuilder) Append(v arrow.Date32) error { return b.appendValue(int32(v)) }
-func (b *Date32DictionaryBuilder) InsertDictValues(arr *Date32) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(int32(v)); err != nil {
- break
- }
- }
- return
-}
-
-type Date64DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Date64DictionaryBuilder) Append(v arrow.Date64) error { return b.appendValue(int64(v)) }
-func (b *Date64DictionaryBuilder) InsertDictValues(arr *Date64) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(int64(v)); err != nil {
- break
- }
- }
- return
-}
-
-type MonthIntervalDictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *MonthIntervalDictionaryBuilder) Append(v arrow.MonthInterval) error {
- return b.appendValue(int32(v))
-}
-func (b *MonthIntervalDictionaryBuilder) InsertDictValues(arr *MonthInterval) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(int32(v)); err != nil {
- break
- }
- }
- return
-}
-
-type Float16DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Float16DictionaryBuilder) Append(v float16.Num) error { return b.appendValue(v.Uint16()) }
-func (b *Float16DictionaryBuilder) InsertDictValues(arr *Float16) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v.Uint16()); err != nil {
- break
- }
- }
- return
-}
-
-type Float32DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Float32DictionaryBuilder) Append(v float32) error { return b.appendValue(v) }
-func (b *Float32DictionaryBuilder) InsertDictValues(arr *Float32) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
-
-type Float64DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Float64DictionaryBuilder) Append(v float64) error { return b.appendValue(v) }
-func (b *Float64DictionaryBuilder) InsertDictValues(arr *Float64) (err error) {
- for _, v := range arr.values {
- if err = b.insertDictValue(v); err != nil {
- break
- }
- }
- return
-}
+type Int8DictionaryBuilder = dictBuilder[int8]
+type Uint8DictionaryBuilder = dictBuilder[uint8]
+type Int16DictionaryBuilder = dictBuilder[int16]
+type Uint16DictionaryBuilder = dictBuilder[uint16]
+type Int32DictionaryBuilder = dictBuilder[int32]
+type Uint32DictionaryBuilder = dictBuilder[uint32]
+type Int64DictionaryBuilder = dictBuilder[int64]
+type Uint64DictionaryBuilder = dictBuilder[uint64]
+type Float16DictionaryBuilder = dictBuilder[float16.Num]
+type Float32DictionaryBuilder = dictBuilder[float32]
+type Float64DictionaryBuilder = dictBuilder[float64]
+type DurationDictionaryBuilder = dictBuilder[arrow.Duration]
+type TimestampDictionaryBuilder = dictBuilder[arrow.Timestamp]
+type Time32DictionaryBuilder = dictBuilder[arrow.Time32]
+type Time64DictionaryBuilder = dictBuilder[arrow.Time64]
+type Date32DictionaryBuilder = dictBuilder[arrow.Date32]
+type Date64DictionaryBuilder = dictBuilder[arrow.Date64]
+type MonthIntervalDictionaryBuilder = dictBuilder[arrow.MonthInterval]
+type DayTimeDictionaryBuilder = fixedSizeDictionaryBuilder[arrow.DayTimeInterval]
+type Decimal32DictionaryBuilder = fixedSizeDictionaryBuilder[decimal.Decimal32]
+type Decimal64DictionaryBuilder = fixedSizeDictionaryBuilder[decimal.Decimal64]
+type Decimal128DictionaryBuilder = fixedSizeDictionaryBuilder[decimal.Decimal128]
+type Decimal256DictionaryBuilder = fixedSizeDictionaryBuilder[decimal.Decimal256]
+type MonthDayNanoDictionaryBuilder = fixedSizeDictionaryBuilder[arrow.MonthDayNanoInterval]
type BinaryDictionaryBuilder struct {
dictionaryBuilder
@@ -1351,6 +1058,7 @@ func (b *BinaryDictionaryBuilder) InsertDictValues(arr *Binary) (err error) {
}
return
}
+
func (b *BinaryDictionaryBuilder) InsertStringDictValues(arr *String) (err error) {
if !arrow.TypeEqual(arr.DataType(), b.dt.ValueType) {
return fmt.Errorf("dictionary insert type mismatch: cannot insert values of type %T to dictionary type %T", arr.DataType(), b.dt.ValueType)
@@ -1399,133 +1107,61 @@ func (b *BinaryDictionaryBuilder) ValueStr(i int) string {
return string(b.Value(i))
}
-type FixedSizeBinaryDictionaryBuilder struct {
- dictionaryBuilder
- byteWidth int
+type fsbType interface {
+ arrow.DayTimeInterval | arrow.MonthDayNanoInterval |
+ decimal.Decimal32 | decimal.Decimal64 | decimal.Decimal128 | decimal.Decimal256
}
-func (b *FixedSizeBinaryDictionaryBuilder) Append(v []byte) error {
- return b.appendValue(v[:b.byteWidth])
-}
-func (b *FixedSizeBinaryDictionaryBuilder) InsertDictValues(arr *FixedSizeBinary) (err error) {
- var (
- beg = arr.array.data.offset * b.byteWidth
- end = (arr.array.data.offset + arr.data.length) * b.byteWidth
- )
- data := arr.valueBytes[beg:end]
- for len(data) > 0 {
- if err = b.insertDictValue(data[:b.byteWidth]); err != nil {
- break
- }
- data = data[b.byteWidth:]
- }
- return
-}
-
-type Decimal32DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Decimal32DictionaryBuilder) Append(v decimal.Decimal32) error {
- return b.appendValue((*(*[arrow.Decimal32SizeBytes]byte)(unsafe.Pointer(&v)))[:])
-}
-func (b *Decimal32DictionaryBuilder) InsertDictValues(arr *Decimal32) (err error) {
- data := arrow.Decimal32Traits.CastToBytes(arr.values)
- for len(data) > 0 {
- if err = b.insertDictValue(data[:arrow.Decimal32SizeBytes]); err != nil {
- break
- }
- data = data[arrow.Decimal32SizeBytes:]
- }
- return
-}
-
-type Decimal64DictionaryBuilder struct {
- dictionaryBuilder
-}
-
-func (b *Decimal64DictionaryBuilder) Append(v decimal.Decimal64) error {
- return b.appendValue((*(*[arrow.Decimal64SizeBytes]byte)(unsafe.Pointer(&v)))[:])
-}
-func (b *Decimal64DictionaryBuilder) InsertDictValues(arr *Decimal64) (err error) {
- data := arrow.Decimal64Traits.CastToBytes(arr.values)
- for len(data) > 0 {
- if err = b.insertDictValue(data[:arrow.Decimal64SizeBytes]); err != nil {
- break
- }
- data = data[arrow.Decimal64SizeBytes:]
- }
- return
-}
-
-type Decimal128DictionaryBuilder struct {
+type fixedSizeDictionaryBuilder[T fsbType] struct {
dictionaryBuilder
+ byteWidth int
}
-func (b *Decimal128DictionaryBuilder) Append(v decimal128.Num) error {
- return b.appendValue((*(*[arrow.Decimal128SizeBytes]byte)(unsafe.Pointer(&v)))[:])
-}
-func (b *Decimal128DictionaryBuilder) InsertDictValues(arr *Decimal128) (err error) {
- data := arrow.Decimal128Traits.CastToBytes(arr.values)
- for len(data) > 0 {
- if err = b.insertDictValue(data[:arrow.Decimal128SizeBytes]); err != nil {
- break
- }
- data = data[arrow.Decimal128SizeBytes:]
+func (b *fixedSizeDictionaryBuilder[T]) Append(v T) error {
+ if v, ok := any(v).([]byte); ok {
+ return b.appendBytes(v[:b.byteWidth])
}
- return
-}
-type Decimal256DictionaryBuilder struct {
- dictionaryBuilder
+ sliceHdr := struct {
+ Addr *T
+ Len int
+ Cap int
+ }{&v, b.byteWidth, b.byteWidth}
+ slice := *(*[]byte)(unsafe.Pointer(&sliceHdr))
+ return b.appendValue(slice)
}
-func (b *Decimal256DictionaryBuilder) Append(v decimal256.Num) error {
- return b.appendValue((*(*[arrow.Decimal256SizeBytes]byte)(unsafe.Pointer(&v)))[:])
-}
-func (b *Decimal256DictionaryBuilder) InsertDictValues(arr *Decimal256) (err error) {
- data := arrow.Decimal256Traits.CastToBytes(arr.values)
+func (b *fixedSizeDictionaryBuilder[T]) InsertDictValues(arr arrValues[T]) (err error) {
+ data := arrow.GetBytes(arr.Values())
for len(data) > 0 {
- if err = b.insertDictValue(data[:arrow.Decimal256SizeBytes]); err != nil {
+ if err = b.insertDictBytes(data[:b.byteWidth]); err != nil {
break
}
- data = data[arrow.Decimal256SizeBytes:]
+ data = data[b.byteWidth:]
}
return
}
-type MonthDayNanoDictionaryBuilder struct {
+type FixedSizeBinaryDictionaryBuilder struct {
dictionaryBuilder
+ byteWidth int
}
-func (b *MonthDayNanoDictionaryBuilder) Append(v arrow.MonthDayNanoInterval) error {
- return b.appendValue((*(*[arrow.MonthDayNanoIntervalSizeBytes]byte)(unsafe.Pointer(&v)))[:])
-}
-func (b *MonthDayNanoDictionaryBuilder) InsertDictValues(arr *MonthDayNanoInterval) (err error) {
- data := arrow.MonthDayNanoIntervalTraits.CastToBytes(arr.values)
- for len(data) > 0 {
- if err = b.insertDictValue(data[:arrow.MonthDayNanoIntervalSizeBytes]); err != nil {
- break
- }
- data = data[arrow.MonthDayNanoIntervalSizeBytes:]
- }
- return
-}
-
-type DayTimeDictionaryBuilder struct {
- dictionaryBuilder
+func (b *FixedSizeBinaryDictionaryBuilder) Append(v []byte) error {
+ return b.appendValue(v[:b.byteWidth])
}
-func (b *DayTimeDictionaryBuilder) Append(v arrow.DayTimeInterval) error {
- return b.appendValue((*(*[arrow.DayTimeIntervalSizeBytes]byte)(unsafe.Pointer(&v)))[:])
-}
-func (b *DayTimeDictionaryBuilder) InsertDictValues(arr *DayTimeInterval) (err error) {
- data := arrow.DayTimeIntervalTraits.CastToBytes(arr.values)
+func (b *FixedSizeBinaryDictionaryBuilder) InsertDictValues(arr *FixedSizeBinary) (err error) {
+ var (
+ beg = arr.array.data.offset * b.byteWidth
+ end = (arr.array.data.offset + arr.data.length) * b.byteWidth
+ )
+ data := arr.valueBytes[beg:end]
for len(data) > 0 {
- if err = b.insertDictValue(data[:arrow.DayTimeIntervalSizeBytes]); err != nil {
+ if err = b.insertDictValue(data[:b.byteWidth]); err != nil {
break
}
- data = data[arrow.DayTimeIntervalSizeBytes:]
+ data = data[b.byteWidth:]
}
return
}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/encoded.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/encoded.go
index 81c375c98a24a..8e39090f6face 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/encoded.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/encoded.go
@@ -21,7 +21,6 @@ import (
"fmt"
"math"
"reflect"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/encoded"
@@ -50,7 +49,7 @@ func NewRunEndEncodedArray(runEnds, values arrow.Array, logicalLength, offset in
func NewRunEndEncodedData(data arrow.ArrayData) *RunEndEncoded {
r := &RunEndEncoded{}
- r.refCount = 1
+ r.refCount.Add(1)
r.setData(data.(*Data))
return r
}
@@ -305,14 +304,16 @@ func NewRunEndEncodedBuilder(mem memory.Allocator, runEnds, encoded arrow.DataTy
case arrow.INT64:
maxEnd = math.MaxInt64
}
- return &RunEndEncodedBuilder{
- builder: builder{refCount: 1, mem: mem},
+ reb := &RunEndEncodedBuilder{
+ builder: builder{mem: mem},
dt: dt,
runEnds: NewBuilder(mem, runEnds),
values: NewBuilder(mem, encoded),
maxRunEnd: maxEnd,
lastUnmarshalled: nil,
}
+ reb.builder.refCount.Add(1)
+ return reb
}
func (b *RunEndEncodedBuilder) Type() arrow.DataType {
@@ -320,9 +321,9 @@ func (b *RunEndEncodedBuilder) Type() arrow.DataType {
}
func (b *RunEndEncodedBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
b.values.Release()
b.runEnds.Release()
}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/extension.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/extension.go
index d1a2835074843..e509b5e0fddb7 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/extension.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/extension.go
@@ -86,7 +86,7 @@ func NewExtensionArrayWithStorage(dt arrow.ExtensionType, storage arrow.Array) a
// underlying data built for the storage array.
func NewExtensionData(data arrow.ArrayData) ExtensionArray {
base := ExtensionArrayBase{}
- base.refCount = 1
+ base.refCount.Add(1)
base.setData(data.(*Data))
// use the ExtensionType's ArrayType to construct the correctly typed object
@@ -173,7 +173,7 @@ func (e *ExtensionArrayBase) ValueStr(i int) string {
}
// no-op function that exists simply to force embedding this in any extension array types.
-func (ExtensionArrayBase) mustEmbedExtensionArrayBase() {}
+func (*ExtensionArrayBase) mustEmbedExtensionArrayBase() {}
// ExtensionBuilder is a convenience builder so that NewBuilder and such will still work
// with extension types properly. Depending on preference it may be cleaner or easier to just use
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/fixed_size_list.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixed_size_list.go
index 84036f94df065..4a0524ec642b6 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/fixed_size_list.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixed_size_list.go
@@ -20,7 +20,6 @@ import (
"bytes"
"fmt"
"strings"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -41,7 +40,7 @@ var _ ListLike = (*FixedSizeList)(nil)
// NewFixedSizeListData returns a new List array value, from data.
func NewFixedSizeListData(data arrow.ArrayData) *FixedSizeList {
a := &FixedSizeList{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -54,6 +53,7 @@ func (a *FixedSizeList) ValueStr(i int) string {
}
return string(a.GetOneForMarshal(i).(json.RawMessage))
}
+
func (a *FixedSizeList) String() string {
o := new(strings.Builder)
o.WriteString("[")
@@ -169,28 +169,33 @@ type FixedSizeListBuilder struct {
// NewFixedSizeListBuilder returns a builder, using the provided memory allocator.
// The created list builder will create a list whose elements will be of type etype.
func NewFixedSizeListBuilder(mem memory.Allocator, n int32, etype arrow.DataType) *FixedSizeListBuilder {
- return &FixedSizeListBuilder{
+ fslb := &FixedSizeListBuilder{
baseListBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, etype),
dt: arrow.FixedSizeListOf(n, etype),
},
n,
}
+ fslb.baseListBuilder.builder.refCount.Add(1)
+ return fslb
}
// NewFixedSizeListBuilderWithField returns a builder similarly to
// NewFixedSizeListBuilder, but it accepts a child rather than just a datatype
// to ensure nullability context is preserved.
func NewFixedSizeListBuilderWithField(mem memory.Allocator, n int32, field arrow.Field) *FixedSizeListBuilder {
- return &FixedSizeListBuilder{
+ fslb := &FixedSizeListBuilder{
baseListBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, field.Type),
dt: arrow.FixedSizeListOfField(n, field),
},
n,
}
+
+ fslb.baseListBuilder.builder.refCount.Add(1)
+ return fslb
}
func (b *FixedSizeListBuilder) Type() arrow.DataType { return b.dt }
@@ -198,9 +203,9 @@ func (b *FixedSizeListBuilder) Type() arrow.DataType { return b.dt }
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *FixedSizeListBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binary.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binary.go
index 7049c9c011d6d..a3b03806da8ba 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binary.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binary.go
@@ -37,7 +37,7 @@ type FixedSizeBinary struct {
// NewFixedSizeBinaryData constructs a new fixed-size binary array from data.
func NewFixedSizeBinaryData(data arrow.ArrayData) *FixedSizeBinary {
a := &FixedSizeBinary{bytewidth: int32(data.DataType().(arrow.FixedWidthDataType).BitWidth() / 8)}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -52,6 +52,7 @@ func (a *FixedSizeBinary) Value(i int) []byte {
)
return a.valueBytes[beg:end]
}
+
func (a *FixedSizeBinary) ValueStr(i int) string {
if a.IsNull(i) {
return NullValueStr
@@ -83,7 +84,6 @@ func (a *FixedSizeBinary) setData(data *Data) {
if vals != nil {
a.valueBytes = vals.Bytes()
}
-
}
func (a *FixedSizeBinary) GetOneForMarshal(i int) interface{} {
@@ -118,6 +118,4 @@ func arrayEqualFixedSizeBinary(left, right *FixedSizeBinary) bool {
return true
}
-var (
- _ arrow.Array = (*FixedSizeBinary)(nil)
-)
+var _ arrow.Array = (*FixedSizeBinary)(nil)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binarybuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binarybuilder.go
index 02e72a25bc4b7..ee7869fa10d6a 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binarybuilder.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binarybuilder.go
@@ -21,7 +21,6 @@ import (
"encoding/base64"
"fmt"
"reflect"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/internal/debug"
@@ -39,10 +38,11 @@ type FixedSizeBinaryBuilder struct {
func NewFixedSizeBinaryBuilder(mem memory.Allocator, dtype *arrow.FixedSizeBinaryType) *FixedSizeBinaryBuilder {
b := &FixedSizeBinaryBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
dtype: dtype,
values: newByteBufferBuilder(mem),
}
+ b.builder.refCount.Add(1)
return b
}
@@ -52,9 +52,9 @@ func (b *FixedSizeBinaryBuilder) Type() arrow.DataType { return b.dtype }
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (b *FixedSizeBinaryBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -256,6 +256,4 @@ func (b *FixedSizeBinaryBuilder) UnmarshalJSON(data []byte) error {
return b.Unmarshal(dec)
}
-var (
- _ Builder = (*FixedSizeBinaryBuilder)(nil)
-)
+var _ Builder = (*FixedSizeBinaryBuilder)(nil)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/float16.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16.go
index 6b0e820f8a5d3..5f57f725d5bab 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/float16.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16.go
@@ -33,7 +33,7 @@ type Float16 struct {
func NewFloat16Data(data arrow.ArrayData) *Float16 {
a := &Float16{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -106,18 +106,7 @@ func (a *Float16) MarshalJSON() ([]byte, error) {
return json.Marshal(vals)
}
-func arrayEqualFloat16(left, right *Float16) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
var (
- _ arrow.Array = (*Float16)(nil)
+ _ arrow.Array = (*Float16)(nil)
+ _ arrow.TypedArray[float16.Num] = (*Float16)(nil)
)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/float16_builder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16_builder.go
index 93dbfbc023803..d4acd7f66eca5 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/float16_builder.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16_builder.go
@@ -21,7 +21,6 @@ import (
"fmt"
"reflect"
"strconv"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -39,7 +38,9 @@ type Float16Builder struct {
}
func NewFloat16Builder(mem memory.Allocator) *Float16Builder {
- return &Float16Builder{builder: builder{refCount: 1, mem: mem}}
+ fb := &Float16Builder{builder: builder{mem: mem}}
+ fb.refCount.Add(1)
+ return fb
}
func (b *Float16Builder) Type() arrow.DataType { return arrow.FixedWidthTypes.Float16 }
@@ -47,9 +48,9 @@ func (b *Float16Builder) Type() arrow.DataType { return arrow.FixedWidthTypes.Fl
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Float16Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/interval.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/interval.go
index 324647e8c4de1..54915cddd3e63 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/interval.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/interval.go
@@ -21,7 +21,6 @@ import (
"fmt"
"strconv"
"strings"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -51,7 +50,7 @@ type MonthInterval struct {
func NewMonthIntervalData(data arrow.ArrayData) *MonthInterval {
a := &MonthInterval{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -63,7 +62,8 @@ func (a *MonthInterval) ValueStr(i int) string {
}
return fmt.Sprintf("%v", a.Value(i))
}
-func (a *MonthInterval) MonthIntervalValues() []arrow.MonthInterval { return a.values }
+func (a *MonthInterval) MonthIntervalValues() []arrow.MonthInterval { return a.Values() }
+func (a *MonthInterval) Values() []arrow.MonthInterval { return a.values }
func (a *MonthInterval) String() string {
o := new(strings.Builder)
@@ -140,7 +140,9 @@ type MonthIntervalBuilder struct {
}
func NewMonthIntervalBuilder(mem memory.Allocator) *MonthIntervalBuilder {
- return &MonthIntervalBuilder{builder: builder{refCount: 1, mem: mem}}
+ mib := &MonthIntervalBuilder{builder: builder{mem: mem}}
+ mib.refCount.Add(1)
+ return mib
}
func (b *MonthIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.MonthInterval }
@@ -148,9 +150,9 @@ func (b *MonthIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidthTy
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *MonthIntervalBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -348,7 +350,7 @@ type DayTimeInterval struct {
func NewDayTimeIntervalData(data arrow.ArrayData) *DayTimeInterval {
a := &DayTimeInterval{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -440,7 +442,9 @@ type DayTimeIntervalBuilder struct {
}
func NewDayTimeIntervalBuilder(mem memory.Allocator) *DayTimeIntervalBuilder {
- return &DayTimeIntervalBuilder{builder: builder{refCount: 1, mem: mem}}
+ dtb := &DayTimeIntervalBuilder{builder: builder{mem: mem}}
+ dtb.refCount.Add(1)
+ return dtb
}
func (b *DayTimeIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.DayTimeInterval }
@@ -448,9 +452,9 @@ func (b *DayTimeIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidth
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *DayTimeIntervalBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -647,7 +651,7 @@ type MonthDayNanoInterval struct {
func NewMonthDayNanoIntervalData(data arrow.ArrayData) *MonthDayNanoInterval {
a := &MonthDayNanoInterval{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -741,7 +745,9 @@ type MonthDayNanoIntervalBuilder struct {
}
func NewMonthDayNanoIntervalBuilder(mem memory.Allocator) *MonthDayNanoIntervalBuilder {
- return &MonthDayNanoIntervalBuilder{builder: builder{refCount: 1, mem: mem}}
+ mb := &MonthDayNanoIntervalBuilder{builder: builder{mem: mem}}
+ mb.refCount.Add(1)
+ return mb
}
func (b *MonthDayNanoIntervalBuilder) Type() arrow.DataType {
@@ -751,9 +757,9 @@ func (b *MonthDayNanoIntervalBuilder) Type() arrow.DataType {
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *MonthDayNanoIntervalBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -950,4 +956,8 @@ var (
_ Builder = (*MonthIntervalBuilder)(nil)
_ Builder = (*DayTimeIntervalBuilder)(nil)
_ Builder = (*MonthDayNanoIntervalBuilder)(nil)
+
+ _ arrow.TypedArray[arrow.MonthInterval] = (*MonthInterval)(nil)
+ _ arrow.TypedArray[arrow.DayTimeInterval] = (*DayTimeInterval)(nil)
+ _ arrow.TypedArray[arrow.MonthDayNanoInterval] = (*MonthDayNanoInterval)(nil)
)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/json_reader.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/json_reader.go
index 7835b280fb418..b0698b3a25a69 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/json_reader.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/json_reader.go
@@ -28,8 +28,10 @@ import (
"github.com/apache/arrow-go/v18/internal/json"
)
-type Option func(config)
-type config interface{}
+type (
+ Option func(config)
+ config interface{}
+)
// WithChunk sets the chunk size for reading in json records. The default is to
// read in one row per record batch as a single object. If chunk size is set to
@@ -72,7 +74,7 @@ type JSONReader struct {
bldr *RecordBuilder
- refs int64
+ refs atomic.Int64
cur arrow.Record
err error
@@ -93,9 +95,10 @@ func NewJSONReader(r io.Reader, schema *arrow.Schema, opts ...Option) *JSONReade
rr := &JSONReader{
r: json.NewDecoder(r),
schema: schema,
- refs: 1,
chunk: 1,
}
+ rr.refs.Add(1)
+
for _, o := range opts {
o(rr)
}
@@ -126,13 +129,13 @@ func (r *JSONReader) Schema() *arrow.Schema { return r.schema }
func (r *JSONReader) Record() arrow.Record { return r.cur }
func (r *JSONReader) Retain() {
- atomic.AddInt64(&r.refs, 1)
+ r.refs.Add(1)
}
func (r *JSONReader) Release() {
- debug.Assert(atomic.LoadInt64(&r.refs) > 0, "too many releases")
+ debug.Assert(r.refs.Load() > 0, "too many releases")
- if atomic.AddInt64(&r.refs, -1) == 0 {
+ if r.refs.Add(-1) == 0 {
if r.cur != nil {
r.cur.Release()
r.bldr.Release()
@@ -186,7 +189,7 @@ func (r *JSONReader) next1() bool {
}
func (r *JSONReader) nextn() bool {
- var n = 0
+ n := 0
for i := 0; i < r.chunk && !r.done; i, n = i+1, n+1 {
if !r.readNext() {
@@ -200,6 +203,4 @@ func (r *JSONReader) nextn() bool {
return n > 0
}
-var (
- _ RecordReader = (*JSONReader)(nil)
-)
+var _ RecordReader = (*JSONReader)(nil)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/list.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/list.go
index e80bc89635e81..806b89c92f9a9 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/list.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/list.go
@@ -20,7 +20,6 @@ import (
"bytes"
"fmt"
"strings"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -51,7 +50,7 @@ var _ ListLike = (*List)(nil)
// NewListData returns a new List array value, from data.
func NewListData(data arrow.ArrayData) *List {
a := &List{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -183,7 +182,7 @@ var _ ListLike = (*LargeList)(nil)
// NewLargeListData returns a new LargeList array value, from data.
func NewLargeListData(data arrow.ArrayData) *LargeList {
a := new(LargeList)
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -337,30 +336,34 @@ type LargeListBuilder struct {
// The created list builder will create a list whose elements will be of type etype.
func NewListBuilder(mem memory.Allocator, etype arrow.DataType) *ListBuilder {
offsetBldr := NewInt32Builder(mem)
- return &ListBuilder{
+ lb := &ListBuilder{
baseListBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, etype),
offsets: offsetBldr,
dt: arrow.ListOf(etype),
appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) },
},
}
+ lb.refCount.Add(1)
+ return lb
}
// NewListBuilderWithField takes a field to use for the child rather than just
// a datatype to allow for more customization.
func NewListBuilderWithField(mem memory.Allocator, field arrow.Field) *ListBuilder {
offsetBldr := NewInt32Builder(mem)
- return &ListBuilder{
+ lb := &ListBuilder{
baseListBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, field.Type),
offsets: offsetBldr,
dt: arrow.ListOfField(field),
appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) },
},
}
+ lb.refCount.Add(1)
+ return lb
}
func (b *baseListBuilder) Type() arrow.DataType {
@@ -381,38 +384,42 @@ func (b *baseListBuilder) Type() arrow.DataType {
// The created list builder will create a list whose elements will be of type etype.
func NewLargeListBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListBuilder {
offsetBldr := NewInt64Builder(mem)
- return &LargeListBuilder{
+ llb := &LargeListBuilder{
baseListBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, etype),
offsets: offsetBldr,
dt: arrow.LargeListOf(etype),
appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) },
},
}
+ llb.refCount.Add(1)
+ return llb
}
// NewLargeListBuilderWithField takes a field rather than just an element type
// to allow for more customization of the final type of the LargeList Array
func NewLargeListBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListBuilder {
offsetBldr := NewInt64Builder(mem)
- return &LargeListBuilder{
+ llb := &LargeListBuilder{
baseListBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, field.Type),
offsets: offsetBldr,
dt: arrow.LargeListOfField(field),
appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) },
},
}
+ llb.refCount.Add(1)
+ return llb
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *baseListBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -420,7 +427,6 @@ func (b *baseListBuilder) Release() {
b.values.Release()
b.offsets.Release()
}
-
}
func (b *baseListBuilder) appendNextOffset() {
@@ -646,7 +652,7 @@ var _ VarLenListLike = (*ListView)(nil)
func NewListViewData(data arrow.ArrayData) *ListView {
a := &ListView{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -793,7 +799,7 @@ var _ VarLenListLike = (*LargeListView)(nil)
// NewLargeListViewData returns a new LargeListView array value, from data.
func NewLargeListViewData(data arrow.ArrayData) *LargeListView {
a := new(LargeListView)
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -931,8 +937,10 @@ type offsetsAndSizes interface {
sizeAt(slot int64) int64
}
-var _ offsetsAndSizes = (*ListView)(nil)
-var _ offsetsAndSizes = (*LargeListView)(nil)
+var (
+ _ offsetsAndSizes = (*ListView)(nil)
+ _ offsetsAndSizes = (*LargeListView)(nil)
+)
func (a *ListView) offsetAt(slot int64) int64 { return int64(a.offsets[int64(a.data.offset)+slot]) }
@@ -1081,9 +1089,9 @@ type LargeListViewBuilder struct {
func NewListViewBuilder(mem memory.Allocator, etype arrow.DataType) *ListViewBuilder {
offsetBldr := NewInt32Builder(mem)
sizeBldr := NewInt32Builder(mem)
- return &ListViewBuilder{
+ lvb := &ListViewBuilder{
baseListViewBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, etype),
offsets: offsetBldr,
sizes: sizeBldr,
@@ -1092,6 +1100,8 @@ func NewListViewBuilder(mem memory.Allocator, etype arrow.DataType) *ListViewBui
appendSizeVal: func(s int) { sizeBldr.Append(int32(s)) },
},
}
+ lvb.refCount.Add(1)
+ return lvb
}
// NewListViewBuilderWithField takes a field to use for the child rather than just
@@ -1099,9 +1109,9 @@ func NewListViewBuilder(mem memory.Allocator, etype arrow.DataType) *ListViewBui
func NewListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *ListViewBuilder {
offsetBldr := NewInt32Builder(mem)
sizeBldr := NewInt32Builder(mem)
- return &ListViewBuilder{
+ lvb := &ListViewBuilder{
baseListViewBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, field.Type),
offsets: offsetBldr,
sizes: sizeBldr,
@@ -1110,6 +1120,8 @@ func NewListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *ListV
appendSizeVal: func(s int) { sizeBldr.Append(int32(s)) },
},
}
+ lvb.refCount.Add(1)
+ return lvb
}
func (b *baseListViewBuilder) Type() arrow.DataType {
@@ -1131,9 +1143,9 @@ func (b *baseListViewBuilder) Type() arrow.DataType {
func NewLargeListViewBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListViewBuilder {
offsetBldr := NewInt64Builder(mem)
sizeBldr := NewInt64Builder(mem)
- return &LargeListViewBuilder{
+ llvb := &LargeListViewBuilder{
baseListViewBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, etype),
offsets: offsetBldr,
sizes: sizeBldr,
@@ -1142,6 +1154,8 @@ func NewLargeListViewBuilder(mem memory.Allocator, etype arrow.DataType) *LargeL
appendSizeVal: func(s int) { sizeBldr.Append(int64(s)) },
},
}
+ llvb.refCount.Add(1)
+ return llvb
}
// NewLargeListViewBuilderWithField takes a field rather than just an element type
@@ -1149,9 +1163,9 @@ func NewLargeListViewBuilder(mem memory.Allocator, etype arrow.DataType) *LargeL
func NewLargeListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListViewBuilder {
offsetBldr := NewInt64Builder(mem)
sizeBldr := NewInt64Builder(mem)
- return &LargeListViewBuilder{
+ llvb := &LargeListViewBuilder{
baseListViewBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
values: NewBuilder(mem, field.Type),
offsets: offsetBldr,
sizes: sizeBldr,
@@ -1160,14 +1174,17 @@ func NewLargeListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *
appendSizeVal: func(o int) { sizeBldr.Append(int64(o)) },
},
}
+
+ llvb.refCount.Add(1)
+ return llvb
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *baseListViewBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/map.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/map.go
index 5609ccd0699b4..da9a150bad84e 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/map.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/map.go
@@ -37,7 +37,7 @@ var _ ListLike = (*Map)(nil)
// NewMapData returns a new Map array value, from data
func NewMapData(data arrow.ArrayData) *Map {
a := &Map{List: &List{}}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/null.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/null.go
index 76e56a4924526..38b3b097809d6 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/null.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/null.go
@@ -21,7 +21,6 @@ import (
"fmt"
"reflect"
"strings"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/internal/debug"
@@ -37,7 +36,7 @@ type Null struct {
// NewNull returns a new Null array value of size n.
func NewNull(n int) *Null {
a := &Null{}
- a.refCount = 1
+ a.refCount.Add(1)
data := NewData(
arrow.Null, n,
[]*memory.Buffer{nil},
@@ -53,7 +52,7 @@ func NewNull(n int) *Null {
// NewNullData returns a new Null array value, from data.
func NewNullData(data arrow.ArrayData) *Null {
a := &Null{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -95,7 +94,9 @@ type NullBuilder struct {
// NewNullBuilder returns a builder, using the provided memory allocator.
func NewNullBuilder(mem memory.Allocator) *NullBuilder {
- return &NullBuilder{builder: builder{refCount: 1, mem: mem}}
+ nb := &NullBuilder{builder: builder{mem: mem}}
+ nb.refCount.Add(1)
+ return nb
}
func (b *NullBuilder) Type() arrow.DataType { return arrow.Null }
@@ -103,9 +104,9 @@ func (b *NullBuilder) Type() arrow.DataType { return arrow.Null }
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *NullBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go
deleted file mode 100644
index 7e94fe5c4e219..0000000000000
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go
+++ /dev/null
@@ -1,1469 +0,0 @@
-// Code generated by array/numeric.gen.go.tmpl. DO NOT EDIT.
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package array
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
-
- "github.com/apache/arrow-go/v18/arrow"
- "github.com/apache/arrow-go/v18/internal/json"
-)
-
-// A type which represents an immutable sequence of int64 values.
-type Int64 struct {
- array
- values []int64
-}
-
-// NewInt64Data creates a new Int64.
-func NewInt64Data(data arrow.ArrayData) *Int64 {
- a := &Int64{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Int64) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Int64) Value(i int) int64 { return a.values[i] }
-
-// Values returns the values.
-func (a *Int64) Int64Values() []int64 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Int64) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Int64) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Int64Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Int64) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatInt(int64(a.Value(i)), 10)
-}
-
-func (a *Int64) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Int64) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = a.values[i]
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualInt64(left, right *Int64) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of uint64 values.
-type Uint64 struct {
- array
- values []uint64
-}
-
-// NewUint64Data creates a new Uint64.
-func NewUint64Data(data arrow.ArrayData) *Uint64 {
- a := &Uint64{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Uint64) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Uint64) Value(i int) uint64 { return a.values[i] }
-
-// Values returns the values.
-func (a *Uint64) Uint64Values() []uint64 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Uint64) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Uint64) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Uint64Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Uint64) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatUint(uint64(a.Value(i)), 10)
-}
-
-func (a *Uint64) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Uint64) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = a.values[i]
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualUint64(left, right *Uint64) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of float64 values.
-type Float64 struct {
- array
- values []float64
-}
-
-// NewFloat64Data creates a new Float64.
-func NewFloat64Data(data arrow.ArrayData) *Float64 {
- a := &Float64{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Float64) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Float64) Value(i int) float64 { return a.values[i] }
-
-// Values returns the values.
-func (a *Float64) Float64Values() []float64 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Float64) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Float64) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Float64Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Float64) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64)
-}
-
-func (a *Float64) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Float64) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
- if !a.IsValid(i) {
- vals[i] = nil
- continue
- }
-
- f := a.Value(i)
- switch {
- case math.IsNaN(f):
- vals[i] = "NaN"
- case math.IsInf(f, 1):
- vals[i] = "+Inf"
- case math.IsInf(f, -1):
- vals[i] = "-Inf"
- default:
- vals[i] = f
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualFloat64(left, right *Float64) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of int32 values.
-type Int32 struct {
- array
- values []int32
-}
-
-// NewInt32Data creates a new Int32.
-func NewInt32Data(data arrow.ArrayData) *Int32 {
- a := &Int32{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Int32) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Int32) Value(i int) int32 { return a.values[i] }
-
-// Values returns the values.
-func (a *Int32) Int32Values() []int32 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Int32) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Int32) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Int32Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Int32) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatInt(int64(a.Value(i)), 10)
-}
-
-func (a *Int32) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Int32) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = a.values[i]
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualInt32(left, right *Int32) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of uint32 values.
-type Uint32 struct {
- array
- values []uint32
-}
-
-// NewUint32Data creates a new Uint32.
-func NewUint32Data(data arrow.ArrayData) *Uint32 {
- a := &Uint32{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Uint32) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Uint32) Value(i int) uint32 { return a.values[i] }
-
-// Values returns the values.
-func (a *Uint32) Uint32Values() []uint32 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Uint32) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Uint32) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Uint32Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Uint32) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatUint(uint64(a.Value(i)), 10)
-}
-
-func (a *Uint32) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Uint32) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = a.values[i]
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualUint32(left, right *Uint32) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of float32 values.
-type Float32 struct {
- array
- values []float32
-}
-
-// NewFloat32Data creates a new Float32.
-func NewFloat32Data(data arrow.ArrayData) *Float32 {
- a := &Float32{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Float32) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Float32) Value(i int) float32 { return a.values[i] }
-
-// Values returns the values.
-func (a *Float32) Float32Values() []float32 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Float32) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Float32) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Float32Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Float32) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32)
-}
-
-func (a *Float32) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Float32) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
- if !a.IsValid(i) {
- vals[i] = nil
- continue
- }
-
- f := a.Value(i)
- v := strconv.FormatFloat(float64(f), 'g', -1, 32)
-
- switch v {
- case "NaN", "+Inf", "-Inf":
- vals[i] = v
- default:
- vals[i] = f
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualFloat32(left, right *Float32) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of int16 values.
-type Int16 struct {
- array
- values []int16
-}
-
-// NewInt16Data creates a new Int16.
-func NewInt16Data(data arrow.ArrayData) *Int16 {
- a := &Int16{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Int16) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Int16) Value(i int) int16 { return a.values[i] }
-
-// Values returns the values.
-func (a *Int16) Int16Values() []int16 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Int16) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Int16) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Int16Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Int16) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatInt(int64(a.Value(i)), 10)
-}
-
-func (a *Int16) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Int16) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = a.values[i]
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualInt16(left, right *Int16) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of uint16 values.
-type Uint16 struct {
- array
- values []uint16
-}
-
-// NewUint16Data creates a new Uint16.
-func NewUint16Data(data arrow.ArrayData) *Uint16 {
- a := &Uint16{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Uint16) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Uint16) Value(i int) uint16 { return a.values[i] }
-
-// Values returns the values.
-func (a *Uint16) Uint16Values() []uint16 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Uint16) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Uint16) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Uint16Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Uint16) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatUint(uint64(a.Value(i)), 10)
-}
-
-func (a *Uint16) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return a.values[i]
-}
-
-func (a *Uint16) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = a.values[i]
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualUint16(left, right *Uint16) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of int8 values.
-type Int8 struct {
- array
- values []int8
-}
-
-// NewInt8Data creates a new Int8.
-func NewInt8Data(data arrow.ArrayData) *Int8 {
- a := &Int8{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Int8) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Int8) Value(i int) int8 { return a.values[i] }
-
-// Values returns the values.
-func (a *Int8) Int8Values() []int8 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Int8) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Int8) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Int8Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Int8) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatInt(int64(a.Value(i)), 10)
-}
-
-func (a *Int8) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return float64(a.values[i]) // prevent uint8 from being seen as binary data
-}
-
-func (a *Int8) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualInt8(left, right *Int8) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of uint8 values.
-type Uint8 struct {
- array
- values []uint8
-}
-
-// NewUint8Data creates a new Uint8.
-func NewUint8Data(data arrow.ArrayData) *Uint8 {
- a := &Uint8{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Uint8) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Uint8) Value(i int) uint8 { return a.values[i] }
-
-// Values returns the values.
-func (a *Uint8) Uint8Values() []uint8 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Uint8) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Uint8) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Uint8Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Uint8) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return strconv.FormatUint(uint64(a.Value(i)), 10)
-}
-
-func (a *Uint8) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-
- return float64(a.values[i]) // prevent uint8 from being seen as binary data
-}
-
-func (a *Uint8) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
-
- if a.IsValid(i) {
- vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data
- } else {
- vals[i] = nil
- }
-
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualUint8(left, right *Uint8) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of arrow.Time32 values.
-type Time32 struct {
- array
- values []arrow.Time32
-}
-
-// NewTime32Data creates a new Time32.
-func NewTime32Data(data arrow.ArrayData) *Time32 {
- a := &Time32{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Time32) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Time32) Value(i int) arrow.Time32 { return a.values[i] }
-
-// Values returns the values.
-func (a *Time32) Time32Values() []arrow.Time32 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Time32) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Time32) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Time32Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Time32) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return a.values[i].FormattedString(a.DataType().(*arrow.Time32Type).Unit)
-}
-
-func (a *Time32) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
- return a.values[i].ToTime(a.DataType().(*arrow.Time32Type).Unit).Format("15:04:05.999999999")
-}
-
-func (a *Time32) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := range a.values {
- vals[i] = a.GetOneForMarshal(i)
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualTime32(left, right *Time32) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of arrow.Time64 values.
-type Time64 struct {
- array
- values []arrow.Time64
-}
-
-// NewTime64Data creates a new Time64.
-func NewTime64Data(data arrow.ArrayData) *Time64 {
- a := &Time64{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Time64) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Time64) Value(i int) arrow.Time64 { return a.values[i] }
-
-// Values returns the values.
-func (a *Time64) Time64Values() []arrow.Time64 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Time64) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Time64) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Time64Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Time64) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return a.values[i].FormattedString(a.DataType().(*arrow.Time64Type).Unit)
-}
-
-func (a *Time64) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
- return a.values[i].ToTime(a.DataType().(*arrow.Time64Type).Unit).Format("15:04:05.999999999")
-}
-
-func (a *Time64) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := range a.values {
- vals[i] = a.GetOneForMarshal(i)
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualTime64(left, right *Time64) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of arrow.Date32 values.
-type Date32 struct {
- array
- values []arrow.Date32
-}
-
-// NewDate32Data creates a new Date32.
-func NewDate32Data(data arrow.ArrayData) *Date32 {
- a := &Date32{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Date32) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Date32) Value(i int) arrow.Date32 { return a.values[i] }
-
-// Values returns the values.
-func (a *Date32) Date32Values() []arrow.Date32 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Date32) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Date32) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Date32Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Date32) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return a.values[i].FormattedString()
-}
-
-func (a *Date32) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
- return a.values[i].ToTime().Format("2006-01-02")
-}
-
-func (a *Date32) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := range a.values {
- vals[i] = a.GetOneForMarshal(i)
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualDate32(left, right *Date32) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of arrow.Date64 values.
-type Date64 struct {
- array
- values []arrow.Date64
-}
-
-// NewDate64Data creates a new Date64.
-func NewDate64Data(data arrow.ArrayData) *Date64 {
- a := &Date64{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Date64) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Date64) Value(i int) arrow.Date64 { return a.values[i] }
-
-// Values returns the values.
-func (a *Date64) Date64Values() []arrow.Date64 { return a.values }
-
-// String returns a string representation of the array.
-func (a *Date64) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Date64) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.Date64Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Date64) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- return a.values[i].FormattedString()
-}
-
-func (a *Date64) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
- return a.values[i].ToTime().Format("2006-01-02")
-}
-
-func (a *Date64) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := range a.values {
- vals[i] = a.GetOneForMarshal(i)
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualDate64(left, right *Date64) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-// A type which represents an immutable sequence of arrow.Duration values.
-type Duration struct {
- array
- values []arrow.Duration
-}
-
-// NewDurationData creates a new Duration.
-func NewDurationData(data arrow.ArrayData) *Duration {
- a := &Duration{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *Duration) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *Duration) Value(i int) arrow.Duration { return a.values[i] }
-
-// Values returns the values.
-func (a *Duration) DurationValues() []arrow.Duration { return a.values }
-
-// String returns a string representation of the array.
-func (a *Duration) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *Duration) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.DurationTraits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *Duration) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
- // return value and suffix as a string such as "12345ms"
- return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*arrow.DurationType).Unit)
-}
-
-func (a *Duration) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
- // return value and suffix as a string such as "12345ms"
- return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*arrow.DurationType).Unit.String())
-}
-
-func (a *Duration) MarshalJSON() ([]byte, error) {
- vals := make([]interface{}, a.Len())
- for i := range a.values {
- vals[i] = a.GetOneForMarshal(i)
- }
-
- return json.Marshal(vals)
-}
-
-func arrayEqualDuration(left, right *Duration) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go.tmpl
deleted file mode 100644
index df07f205f4389..0000000000000
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go.tmpl
+++ /dev/null
@@ -1,192 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package array
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/apache/arrow-go/v18/arrow"
- "github.com/apache/arrow-go/v18/internal/json"
-)
-
-{{range .In}}
-
-// A type which represents an immutable sequence of {{or .QualifiedType .Type}} values.
-type {{.Name}} struct {
- array
- values []{{or .QualifiedType .Type}}
-}
-
-// New{{.Name}}Data creates a new {{.Name}}.
-func New{{.Name}}Data(data arrow.ArrayData) *{{.Name}} {
- a := &{{.Name}}{}
- a.refCount = 1
- a.setData(data.(*Data))
- return a
-}
-
-// Reset resets the array for re-use.
-func (a *{{.Name}}) Reset(data *Data) {
- a.setData(data)
-}
-
-// Value returns the value at the specified index.
-func (a *{{.Name}}) Value(i int) {{or .QualifiedType .Type}} { return a.values[i] }
-
-// Values returns the values.
-func (a *{{.Name}}) {{.Name}}Values() []{{or .QualifiedType .Type}} { return a.values }
-
-// String returns a string representation of the array.
-func (a *{{.Name}}) String() string {
- o := new(strings.Builder)
- o.WriteString("[")
- for i, v := range a.values {
- if i > 0 {
- fmt.Fprintf(o, " ")
- }
- switch {
- case a.IsNull(i):
- o.WriteString(NullValueStr)
- default:
- fmt.Fprintf(o, "%v", v)
- }
- }
- o.WriteString("]")
- return o.String()
-}
-
-func (a *{{.Name}}) setData(data *Data) {
- a.array.setData(data)
- vals := data.buffers[1]
- if vals != nil {
- a.values = arrow.{{.Name}}Traits.CastFromBytes(vals.Bytes())
- beg := a.array.data.offset
- end := beg + a.array.data.length
- a.values = a.values[beg:end]
- }
-}
-
-func (a *{{.Name}}) ValueStr(i int) string {
- if a.IsNull(i) {
- return NullValueStr
- }
-{{if or (eq .Name "Date32") (eq .Name "Date64") -}}
- return a.values[i].FormattedString()
-{{else if or (eq .Name "Time32") (eq .Name "Time64") -}}
- return a.values[i].FormattedString(a.DataType().(*{{.QualifiedType}}Type).Unit)
-{{else if (eq .Name "Duration") -}}
- // return value and suffix as a string such as "12345ms"
- return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit)
-{{else if or (eq .Name "Int8") (eq .Name "Int16") (eq .Name "Int32") (eq .Name "Int64") -}}
- return strconv.FormatInt(int64(a.Value(i)), 10)
-{{else if or (eq .Name "Uint8") (eq .Name "Uint16") (eq .Name "Uint32") (eq .Name "Uint64") -}}
- return strconv.FormatUint(uint64(a.Value(i)), 10)
-{{else if or (eq .Name "Float32") -}}
- return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32)
-{{else if or (eq .Name "Float64") -}}
- return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64)
-{{else}}
- return fmt.Sprintf("%v", a.values[i])
-{{end -}}
-}
-
-func (a *{{.Name}}) GetOneForMarshal(i int) interface{} {
- if a.IsNull(i) {
- return nil
- }
-{{if or (eq .Name "Date32") (eq .Name "Date64") -}}
- return a.values[i].ToTime().Format("2006-01-02")
-{{else if or (eq .Name "Time32") (eq .Name "Time64") -}}
- return a.values[i].ToTime(a.DataType().(*{{.QualifiedType}}Type).Unit).Format("15:04:05.999999999")
-{{else if (eq .Name "Duration") -}}
- // return value and suffix as a string such as "12345ms"
- return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit.String())
-{{else if (eq .Size "1")}}
- return float64(a.values[i]) // prevent uint8 from being seen as binary data
-{{else}}
- return a.values[i]
-{{end -}}
-}
-
-func (a *{{.Name}}) MarshalJSON() ([]byte, error) {
-{{if .QualifiedType -}}
- vals := make([]interface{}, a.Len())
- for i := range a.values {
- vals[i] = a.GetOneForMarshal(i)
- }
-{{else -}}
- vals := make([]interface{}, a.Len())
- for i := 0; i < a.Len(); i++ {
- {{if (eq .Name "Float32") -}}
- if !a.IsValid(i) {
- vals[i] = nil
- continue
- }
-
- f := a.Value(i)
- v := strconv.FormatFloat(float64(f), 'g', -1, 32)
-
- switch v {
- case "NaN", "+Inf", "-Inf":
- vals[i] = v
- default:
- vals[i] = f
- }
- {{else if (eq .Name "Float64") -}}
- if !a.IsValid(i) {
- vals[i] = nil
- continue
- }
-
- f := a.Value(i)
- switch {
- case math.IsNaN(f):
- vals[i] = "NaN"
- case math.IsInf(f, 1):
- vals[i] = "+Inf"
- case math.IsInf(f, -1):
- vals[i] = "-Inf"
- default:
- vals[i] = f
- }
- {{else}}
- if a.IsValid(i) {
- {{ if (eq .Size "1") }}vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data{{ else }}vals[i] = a.values[i]{{ end }}
- } else {
- vals[i] = nil
- }
- {{end}}
- }
-{{end}}
- return json.Marshal(vals)
-}
-
-func arrayEqual{{.Name}}(left, right *{{.Name}}) bool {
- for i := 0; i < left.Len(); i++ {
- if left.IsNull(i) {
- continue
- }
- if left.Value(i) != right.Value(i) {
- return false
- }
- }
- return true
-}
-
-{{end}}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric_generic.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric_generic.go
new file mode 100644
index 0000000000000..016dc3737df7d
--- /dev/null
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric_generic.go
@@ -0,0 +1,418 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+ "unsafe"
+
+ "github.com/apache/arrow-go/v18/arrow"
+ "github.com/apache/arrow-go/v18/internal/json"
+)
+
+type numericArray[T arrow.IntType | arrow.UintType | arrow.FloatType] struct {
+ array
+ values []T
+}
+
+func newNumericData[T arrow.IntType | arrow.UintType | arrow.FloatType](data arrow.ArrayData) numericArray[T] {
+ a := numericArray[T]{}
+ a.refCount.Add(1)
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *numericArray[T]) Reset(data *Data) {
+ a.setData(data)
+}
+
+func (a *numericArray[T]) Value(i int) T { return a.values[i] }
+func (a *numericArray[T]) Values() []T { return a.values }
+func (a *numericArray[T]) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *numericArray[T]) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.GetData[T](vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *numericArray[T]) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ return fmt.Sprintf("%v", a.values[i])
+}
+
+func (a *numericArray[T]) GetOneForMarshal(i int) any {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *numericArray[T]) MarshalJSON() ([]byte, error) {
+ vals := make([]any, a.Len())
+ for i := range a.Len() {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+type oneByteArrs[T int8 | uint8] struct {
+ numericArray[T]
+}
+
+func (a *oneByteArrs[T]) GetOneForMarshal(i int) any {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return float64(a.values[i]) // prevent uint8/int8 from being seen as binary data
+}
+
+func (a *oneByteArrs[T]) MarshalJSON() ([]byte, error) {
+ vals := make([]any, a.Len())
+ for i := range a.Len() {
+ if a.IsValid(i) {
+ vals[i] = float64(a.values[i]) // prevent uint8/int8 from being seen as binary data
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+type floatArray[T float32 | float64] struct {
+ numericArray[T]
+}
+
+func (a *floatArray[T]) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ f := a.Value(i)
+ bitWidth := int(unsafe.Sizeof(f) * 8)
+ return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, bitWidth)
+}
+
+func (a *floatArray[T]) GetOneForMarshal(i int) any {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ f := a.Value(i)
+ bitWidth := int(unsafe.Sizeof(f) * 8)
+ v := strconv.FormatFloat(float64(a.Value(i)), 'g', -1, bitWidth)
+ switch v {
+ case "NaN", "+Inf", "-Inf":
+ return v
+ default:
+ return f
+ }
+}
+
+func (a *floatArray[T]) MarshalJSON() ([]byte, error) {
+ vals := make([]any, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+type dateArray[T interface {
+ arrow.Date32 | arrow.Date64
+ FormattedString() string
+ ToTime() time.Time
+}] struct {
+ numericArray[T]
+}
+
+func (d *dateArray[T]) MarshalJSON() ([]byte, error) {
+ vals := make([]any, d.Len())
+ for i := range d.values {
+ vals[i] = d.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+func (d *dateArray[T]) ValueStr(i int) string {
+ if d.IsNull(i) {
+ return NullValueStr
+ }
+
+ return d.values[i].FormattedString()
+}
+
+func (d *dateArray[T]) GetOneForMarshal(i int) interface{} {
+ if d.IsNull(i) {
+ return nil
+ }
+
+ return d.values[i].FormattedString()
+}
+
+type timeType interface {
+ TimeUnit() arrow.TimeUnit
+}
+
+type timeArray[T interface {
+ arrow.Time32 | arrow.Time64
+ FormattedString(arrow.TimeUnit) string
+ ToTime(arrow.TimeUnit) time.Time
+}] struct {
+ numericArray[T]
+}
+
+func (a *timeArray[T]) MarshalJSON() ([]byte, error) {
+ vals := make([]any, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+func (a *timeArray[T]) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ return a.values[i].FormattedString(a.DataType().(timeType).TimeUnit())
+}
+
+func (a *timeArray[T]) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i].ToTime(a.DataType().(timeType).TimeUnit()).Format("15:04:05.999999999")
+}
+
+type Duration struct {
+ numericArray[arrow.Duration]
+}
+
+func NewDurationData(data arrow.ArrayData) *Duration {
+ return &Duration{numericArray: newNumericData[arrow.Duration](data)}
+}
+
+func (a *Duration) DurationValues() []arrow.Duration { return a.Values() }
+
+func (a *Duration) MarshalJSON() ([]byte, error) {
+ vals := make([]any, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+func (a *Duration) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ return fmt.Sprintf("%d%s", a.values[i], a.DataType().(timeType).TimeUnit())
+}
+
+func (a *Duration) GetOneForMarshal(i int) any {
+ if a.IsNull(i) {
+ return nil
+ }
+ return fmt.Sprintf("%d%s", a.values[i], a.DataType().(timeType).TimeUnit())
+}
+
+type Int64 struct {
+ numericArray[int64]
+}
+
+func NewInt64Data(data arrow.ArrayData) *Int64 {
+ return &Int64{numericArray: newNumericData[int64](data)}
+}
+
+func (a *Int64) Int64Values() []int64 { return a.Values() }
+
+type Uint64 struct {
+ numericArray[uint64]
+}
+
+func NewUint64Data(data arrow.ArrayData) *Uint64 {
+ return &Uint64{numericArray: newNumericData[uint64](data)}
+}
+
+func (a *Uint64) Uint64Values() []uint64 { return a.Values() }
+
+type Float32 struct {
+ floatArray[float32]
+}
+
+func NewFloat32Data(data arrow.ArrayData) *Float32 {
+ return &Float32{floatArray[float32]{newNumericData[float32](data)}}
+}
+
+func (a *Float32) Float32Values() []float32 { return a.Values() }
+
+type Float64 struct {
+ floatArray[float64]
+}
+
+func NewFloat64Data(data arrow.ArrayData) *Float64 {
+ return &Float64{floatArray: floatArray[float64]{newNumericData[float64](data)}}
+}
+
+func (a *Float64) Float64Values() []float64 { return a.Values() }
+
+type Int32 struct {
+ numericArray[int32]
+}
+
+func NewInt32Data(data arrow.ArrayData) *Int32 {
+ return &Int32{newNumericData[int32](data)}
+}
+
+func (a *Int32) Int32Values() []int32 { return a.Values() }
+
+type Uint32 struct {
+ numericArray[uint32]
+}
+
+func NewUint32Data(data arrow.ArrayData) *Uint32 {
+ return &Uint32{numericArray: newNumericData[uint32](data)}
+}
+
+func (a *Uint32) Uint32Values() []uint32 { return a.Values() }
+
+type Int16 struct {
+ numericArray[int16]
+}
+
+func NewInt16Data(data arrow.ArrayData) *Int16 {
+ return &Int16{newNumericData[int16](data)}
+}
+
+func (a *Int16) Int16Values() []int16 { return a.Values() }
+
+type Uint16 struct {
+ numericArray[uint16]
+}
+
+func NewUint16Data(data arrow.ArrayData) *Uint16 {
+ return &Uint16{numericArray: newNumericData[uint16](data)}
+}
+
+func (a *Uint16) Uint16Values() []uint16 { return a.Values() }
+
+type Int8 struct {
+ oneByteArrs[int8]
+}
+
+func NewInt8Data(data arrow.ArrayData) *Int8 {
+ return &Int8{oneByteArrs[int8]{newNumericData[int8](data)}}
+}
+
+func (a *Int8) Int8Values() []int8 { return a.Values() }
+
+type Uint8 struct {
+ oneByteArrs[uint8]
+}
+
+func NewUint8Data(data arrow.ArrayData) *Uint8 {
+ return &Uint8{oneByteArrs[uint8]{newNumericData[uint8](data)}}
+}
+
+func (a *Uint8) Uint8Values() []uint8 { return a.Values() }
+
+type Time32 struct {
+ timeArray[arrow.Time32]
+}
+
+func NewTime32Data(data arrow.ArrayData) *Time32 {
+ return &Time32{timeArray[arrow.Time32]{newNumericData[arrow.Time32](data)}}
+}
+
+func (a *Time32) Time32Values() []arrow.Time32 { return a.Values() }
+
+type Time64 struct {
+ timeArray[arrow.Time64]
+}
+
+func NewTime64Data(data arrow.ArrayData) *Time64 {
+ return &Time64{timeArray[arrow.Time64]{newNumericData[arrow.Time64](data)}}
+}
+
+func (a *Time64) Time64Values() []arrow.Time64 { return a.Values() }
+
+type Date32 struct {
+ dateArray[arrow.Date32]
+}
+
+func NewDate32Data(data arrow.ArrayData) *Date32 {
+ return &Date32{dateArray[arrow.Date32]{newNumericData[arrow.Date32](data)}}
+}
+
+func (a *Date32) Date32Values() []arrow.Date32 { return a.Values() }
+
+type Date64 struct {
+ dateArray[arrow.Date64]
+}
+
+func NewDate64Data(data arrow.ArrayData) *Date64 {
+ return &Date64{dateArray[arrow.Date64]{newNumericData[arrow.Date64](data)}}
+}
+
+func (a *Date64) Date64Values() []arrow.Date64 { return a.Values() }
+
+func arrayEqualFixedWidth[T arrow.FixedWidthType](left, right arrow.TypedArray[T]) bool {
+ for i := range left.Len() {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go
index 1618dba0391c7..be87fbf4a209d 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go
@@ -24,7 +24,6 @@ import (
"reflect"
"strconv"
"strings"
- "sync/atomic"
"time"
"github.com/apache/arrow-go/v18/arrow"
@@ -42,7 +41,9 @@ type Int64Builder struct {
}
func NewInt64Builder(mem memory.Allocator) *Int64Builder {
- return &Int64Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Int64Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Int64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int64 }
@@ -50,9 +51,9 @@ func (b *Int64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int64
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Int64Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -281,7 +282,9 @@ type Uint64Builder struct {
}
func NewUint64Builder(mem memory.Allocator) *Uint64Builder {
- return &Uint64Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Uint64Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Uint64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint64 }
@@ -289,9 +292,9 @@ func (b *Uint64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Uint64Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -520,7 +523,9 @@ type Float64Builder struct {
}
func NewFloat64Builder(mem memory.Allocator) *Float64Builder {
- return &Float64Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Float64Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Float64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Float64 }
@@ -528,9 +533,9 @@ func (b *Float64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Flo
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Float64Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -759,7 +764,9 @@ type Int32Builder struct {
}
func NewInt32Builder(mem memory.Allocator) *Int32Builder {
- return &Int32Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Int32Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Int32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int32 }
@@ -767,9 +774,9 @@ func (b *Int32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int32
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Int32Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -998,7 +1005,9 @@ type Uint32Builder struct {
}
func NewUint32Builder(mem memory.Allocator) *Uint32Builder {
- return &Uint32Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Uint32Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Uint32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint32 }
@@ -1006,9 +1015,9 @@ func (b *Uint32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Uint32Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -1237,7 +1246,9 @@ type Float32Builder struct {
}
func NewFloat32Builder(mem memory.Allocator) *Float32Builder {
- return &Float32Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Float32Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Float32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Float32 }
@@ -1245,9 +1256,9 @@ func (b *Float32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Flo
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Float32Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -1476,7 +1487,9 @@ type Int16Builder struct {
}
func NewInt16Builder(mem memory.Allocator) *Int16Builder {
- return &Int16Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Int16Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Int16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int16 }
@@ -1484,9 +1497,9 @@ func (b *Int16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int16
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Int16Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -1715,7 +1728,9 @@ type Uint16Builder struct {
}
func NewUint16Builder(mem memory.Allocator) *Uint16Builder {
- return &Uint16Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Uint16Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Uint16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint16 }
@@ -1723,9 +1738,9 @@ func (b *Uint16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Uint16Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -1954,7 +1969,9 @@ type Int8Builder struct {
}
func NewInt8Builder(mem memory.Allocator) *Int8Builder {
- return &Int8Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Int8Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Int8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int8 }
@@ -1962,9 +1979,9 @@ func (b *Int8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int8 }
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Int8Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -2193,7 +2210,9 @@ type Uint8Builder struct {
}
func NewUint8Builder(mem memory.Allocator) *Uint8Builder {
- return &Uint8Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Uint8Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Uint8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint8 }
@@ -2201,9 +2220,9 @@ func (b *Uint8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint8
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Uint8Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -2433,7 +2452,9 @@ type Time32Builder struct {
}
func NewTime32Builder(mem memory.Allocator, dtype *arrow.Time32Type) *Time32Builder {
- return &Time32Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+ b := &Time32Builder{builder: builder{mem: mem}, dtype: dtype}
+ b.refCount.Add(1)
+ return b
}
func (b *Time32Builder) Type() arrow.DataType { return b.dtype }
@@ -2441,9 +2462,9 @@ func (b *Time32Builder) Type() arrow.DataType { return b.dtype }
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Time32Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -2673,7 +2694,9 @@ type Time64Builder struct {
}
func NewTime64Builder(mem memory.Allocator, dtype *arrow.Time64Type) *Time64Builder {
- return &Time64Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+ b := &Time64Builder{builder: builder{mem: mem}, dtype: dtype}
+ b.refCount.Add(1)
+ return b
}
func (b *Time64Builder) Type() arrow.DataType { return b.dtype }
@@ -2681,9 +2704,9 @@ func (b *Time64Builder) Type() arrow.DataType { return b.dtype }
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Time64Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -2912,7 +2935,9 @@ type Date32Builder struct {
}
func NewDate32Builder(mem memory.Allocator) *Date32Builder {
- return &Date32Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Date32Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Date32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date32 }
@@ -2920,9 +2945,9 @@ func (b *Date32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Date32Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -3151,7 +3176,9 @@ type Date64Builder struct {
}
func NewDate64Builder(mem memory.Allocator) *Date64Builder {
- return &Date64Builder{builder: builder{refCount: 1, mem: mem}}
+ b := &Date64Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *Date64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date64 }
@@ -3159,9 +3186,9 @@ func (b *Date64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *Date64Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -3391,7 +3418,9 @@ type DurationBuilder struct {
}
func NewDurationBuilder(mem memory.Allocator, dtype *arrow.DurationType) *DurationBuilder {
- return &DurationBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+ b := &DurationBuilder{builder: builder{mem: mem}, dtype: dtype}
+ b.refCount.Add(1)
+ return b
}
func (b *DurationBuilder) Type() arrow.DataType { return b.dtype }
@@ -3399,9 +3428,9 @@ func (b *DurationBuilder) Type() arrow.DataType { return b.dtype }
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *DurationBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go.tmpl
index e84e095cb3e0b..518b3d4c56e72 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go.tmpl
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go.tmpl
@@ -38,14 +38,18 @@ type {{.Name}}Builder struct {
{{if .Opt.Parametric}}
func New{{.Name}}Builder(mem memory.Allocator, dtype *arrow.{{.Name}}Type) *{{.Name}}Builder {
- return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}, dtype: dtype}
+ b := &{{.Name}}Builder{builder: builder{mem: mem}, dtype: dtype}
+ b.refCount.Add(1)
+ return b
}
func (b *{{.Name}}Builder) Type() arrow.DataType { return b.dtype }
{{else}}
func New{{.Name}}Builder(mem memory.Allocator) *{{.Name}}Builder {
- return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}}
+ b := &{{.Name}}Builder{builder: builder{mem: mem}}
+ b.refCount.Add(1)
+ return b
}
func (b *{{.Name}}Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.{{.Name}} }
@@ -54,9 +58,9 @@ func (b *{{.Name}}Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.{
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *{{.Name}}Builder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen_test.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen_test.go.tmpl
index a5d58f484d902..86cc74a579d31 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen_test.go.tmpl
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen_test.go.tmpl
@@ -276,9 +276,17 @@ func Test{{.Name}}BuilderUnmarshalJSON(t *testing.T) {
mem := memory.NewCheckedAllocator(memory.NewGoAllocator())
defer mem.AssertSize(t, 0)
+{{if .Opt.Parametric -}}
+ dtype := &arrow.{{.Name}}Type{Unit: arrow.Second}
+ bldr := array.New{{.Name}}Builder(mem, dtype)
+{{else}}
bldr := array.New{{.Name}}Builder(mem)
+{{end -}}
+
defer bldr.Release()
+
+{{ if or (eq .Name "Float64") (eq .Name "Float32") -}}
jsonstr := `[0, 1, "+Inf", 2, 3, "NaN", "NaN", 4, 5, "-Inf"]`
err := bldr.UnmarshalJSON([]byte(jsonstr))
@@ -292,6 +300,23 @@ func Test{{.Name}}BuilderUnmarshalJSON(t *testing.T) {
assert.False(t, math.IsInf(float64(arr.Value(0)), 0), arr.Value(0))
assert.True(t, math.IsInf(float64(arr.Value(2)), 1), arr.Value(2))
assert.True(t, math.IsNaN(float64(arr.Value(5))), arr.Value(5))
+{{else}}
+ jsonstr := `[0, 1, null, 2.3, -11]`
+
+ err := bldr.UnmarshalJSON([]byte(jsonstr))
+ assert.NoError(t, err)
+
+ arr := bldr.New{{.Name}}Array()
+ defer arr.Release()
+
+ assert.NotNil(t, arr)
+
+ assert.Equal(t, int64(0), int64(arr.Value(0)))
+ assert.Equal(t, int64(1), int64(arr.Value(1)))
+ assert.True(t, arr.IsNull(2))
+ assert.Equal(t, int64(2), int64(arr.Value(3)))
+ assert.Equal(t, int64(5), int64(arr.Len()))
+{{end -}}
}
{{end}}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/record.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/record.go
index b8041e2799dc3..18a50ed0b5370 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/record.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/record.go
@@ -19,6 +19,7 @@ package array
import (
"bytes"
"fmt"
+ "iter"
"strings"
"sync/atomic"
@@ -42,7 +43,7 @@ type RecordReader interface {
// simpleRecords is a simple iterator over a collection of records.
type simpleRecords struct {
- refCount int64
+ refCount atomic.Int64
schema *arrow.Schema
recs []arrow.Record
@@ -52,11 +53,11 @@ type simpleRecords struct {
// NewRecordReader returns a simple iterator over the given slice of records.
func NewRecordReader(schema *arrow.Schema, recs []arrow.Record) (RecordReader, error) {
rs := &simpleRecords{
- refCount: 1,
- schema: schema,
- recs: recs,
- cur: nil,
+ schema: schema,
+ recs: recs,
+ cur: nil,
}
+ rs.refCount.Add(1)
for _, rec := range rs.recs {
rec.Retain()
@@ -75,16 +76,16 @@ func NewRecordReader(schema *arrow.Schema, recs []arrow.Record) (RecordReader, e
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (rs *simpleRecords) Retain() {
- atomic.AddInt64(&rs.refCount, 1)
+ rs.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (rs *simpleRecords) Release() {
- debug.Assert(atomic.LoadInt64(&rs.refCount) > 0, "too many releases")
+ debug.Assert(rs.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&rs.refCount, -1) == 0 {
+ if rs.refCount.Add(-1) == 0 {
if rs.cur != nil {
rs.cur.Release()
}
@@ -112,7 +113,7 @@ func (rs *simpleRecords) Err() error { return nil }
// simpleRecord is a basic, non-lazy in-memory record batch.
type simpleRecord struct {
- refCount int64
+ refCount atomic.Int64
schema *arrow.Schema
@@ -126,11 +127,12 @@ type simpleRecord struct {
// NewRecord panics if rows is larger than the height of the columns.
func NewRecord(schema *arrow.Schema, cols []arrow.Array, nrows int64) arrow.Record {
rec := &simpleRecord{
- refCount: 1,
- schema: schema,
- rows: nrows,
- arrs: make([]arrow.Array, len(cols)),
+ schema: schema,
+ rows: nrows,
+ arrs: make([]arrow.Array, len(cols)),
}
+ rec.refCount.Add(1)
+
copy(rec.arrs, cols)
for _, arr := range rec.arrs {
arr.Retain()
@@ -210,16 +212,16 @@ func (rec *simpleRecord) validate() error {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (rec *simpleRecord) Retain() {
- atomic.AddInt64(&rec.refCount, 1)
+ rec.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (rec *simpleRecord) Release() {
- debug.Assert(atomic.LoadInt64(&rec.refCount) > 0, "too many releases")
+ debug.Assert(rec.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&rec.refCount, -1) == 0 {
+ if rec.refCount.Add(-1) == 0 {
for _, arr := range rec.arrs {
arr.Release()
}
@@ -273,7 +275,7 @@ func (rec *simpleRecord) MarshalJSON() ([]byte, error) {
// RecordBuilder eases the process of building a Record, iteratively, from
// a known Schema.
type RecordBuilder struct {
- refCount int64
+ refCount atomic.Int64
mem memory.Allocator
schema *arrow.Schema
fields []Builder
@@ -282,11 +284,11 @@ type RecordBuilder struct {
// NewRecordBuilder returns a builder, using the provided memory allocator and a schema.
func NewRecordBuilder(mem memory.Allocator, schema *arrow.Schema) *RecordBuilder {
b := &RecordBuilder{
- refCount: 1,
- mem: mem,
- schema: schema,
- fields: make([]Builder, schema.NumFields()),
+ mem: mem,
+ schema: schema,
+ fields: make([]Builder, schema.NumFields()),
}
+ b.refCount.Add(1)
for i := 0; i < schema.NumFields(); i++ {
b.fields[i] = NewBuilder(b.mem, schema.Field(i).Type)
@@ -298,14 +300,14 @@ func NewRecordBuilder(mem memory.Allocator, schema *arrow.Schema) *RecordBuilder
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (b *RecordBuilder) Retain() {
- atomic.AddInt64(&b.refCount, 1)
+ b.refCount.Add(1)
}
// Release decreases the reference count by 1.
func (b *RecordBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
for _, f := range b.fields {
f.Release()
}
@@ -405,6 +407,84 @@ func (b *RecordBuilder) UnmarshalJSON(data []byte) error {
return nil
}
+type iterReader struct {
+ refCount atomic.Int64
+
+ schema *arrow.Schema
+ cur arrow.Record
+
+ next func() (arrow.Record, error, bool)
+ stop func()
+
+ err error
+}
+
+func (ir *iterReader) Schema() *arrow.Schema { return ir.schema }
+
+func (ir *iterReader) Retain() { ir.refCount.Add(1) }
+func (ir *iterReader) Release() {
+ debug.Assert(ir.refCount.Load() > 0, "too many releases")
+
+ if ir.refCount.Add(-1) == 0 {
+ ir.stop()
+ ir.schema, ir.next = nil, nil
+ if ir.cur != nil {
+ ir.cur.Release()
+ }
+ }
+}
+
+func (ir *iterReader) Record() arrow.Record { return ir.cur }
+func (ir *iterReader) Err() error { return ir.err }
+
+func (ir *iterReader) Next() bool {
+ if ir.cur != nil {
+ ir.cur.Release()
+ }
+
+ var ok bool
+ ir.cur, ir.err, ok = ir.next()
+ if ir.err != nil {
+ ir.stop()
+ return false
+ }
+
+ return ok
+}
+
+// ReaderFromIter wraps a go iterator for arrow.Record + error into a RecordReader
+// interface object for ease of use.
+func ReaderFromIter(schema *arrow.Schema, itr iter.Seq2[arrow.Record, error]) RecordReader {
+ next, stop := iter.Pull2(itr)
+ rdr := &iterReader{
+ schema: schema,
+ next: next,
+ stop: stop,
+ }
+ rdr.refCount.Add(1)
+ return rdr
+}
+
+// IterFromReader converts a RecordReader interface into an iterator that
+// you can use range on. The semantics are still important, if a record
+// that is returned is desired to be utilized beyond the scope of an iteration
+// then Retain must be called on it.
+func IterFromReader(rdr RecordReader) iter.Seq2[arrow.Record, error] {
+ rdr.Retain()
+ return func(yield func(arrow.Record, error) bool) {
+ defer rdr.Release()
+ for rdr.Next() {
+ if !yield(rdr.Record(), nil) {
+ return
+ }
+ }
+
+ if rdr.Err() != nil {
+ yield(nil, rdr.Err())
+ }
+ }
+}
+
var (
_ arrow.Record = (*simpleRecord)(nil)
_ RecordReader = (*simpleRecords)(nil)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/string.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/string.go
index 5197e77f0b12e..d42492d6dcb4d 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/string.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/string.go
@@ -44,7 +44,7 @@ type String struct {
// NewStringData constructs a new String array from data.
func NewStringData(data arrow.ArrayData) *String {
a := &String{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -191,7 +191,7 @@ type LargeString struct {
// NewStringData constructs a new String array from data.
func NewLargeStringData(data arrow.ArrayData) *LargeString {
a := &LargeString{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -332,7 +332,7 @@ type StringView struct {
func NewStringViewData(data arrow.ArrayData) *StringView {
a := &StringView{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -715,4 +715,8 @@ var (
_ StringLike = (*String)(nil)
_ StringLike = (*LargeString)(nil)
_ StringLike = (*StringView)(nil)
+
+ _ arrow.TypedArray[string] = (*String)(nil)
+ _ arrow.TypedArray[string] = (*LargeString)(nil)
+ _ arrow.TypedArray[string] = (*StringView)(nil)
)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/struct.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/struct.go
index 7f65f8d20e029..957947b393206 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/struct.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/struct.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"strings"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -47,6 +46,13 @@ func NewStructArray(cols []arrow.Array, names []string) (*Struct, error) {
// and provided fields. As opposed to NewStructArray, this allows you to provide
// the full fields to utilize for the struct column instead of just the names.
func NewStructArrayWithFields(cols []arrow.Array, fields []arrow.Field) (*Struct, error) {
+ return NewStructArrayWithFieldsAndNulls(cols, fields, nil, 0, 0)
+}
+
+// NewStructArrayWithFieldsAndNulls is like NewStructArrayWithFields as a convenience function,
+// but also takes in a null bitmap, the number of nulls, and an optional offset
+// to use for creating the Struct Array.
+func NewStructArrayWithFieldsAndNulls(cols []arrow.Array, fields []arrow.Field, nullBitmap *memory.Buffer, nullCount int, offset int) (*Struct, error) {
if len(cols) != len(fields) {
return nil, fmt.Errorf("%w: mismatching number of fields and child arrays", arrow.ErrInvalid)
}
@@ -64,15 +70,18 @@ func NewStructArrayWithFields(cols []arrow.Array, fields []arrow.Field) (*Struct
return nil, fmt.Errorf("%w: mismatching data type for child #%d, field says '%s', got '%s'",
arrow.ErrInvalid, i, fields[i].Type, c.DataType())
}
- if !fields[i].Nullable && c.NullN() > 0 {
- return nil, fmt.Errorf("%w: field says not-nullable, child #%d has nulls",
- arrow.ErrInvalid, i)
- }
children[i] = c.Data()
}
- data := NewData(arrow.StructOf(fields...), length, []*memory.Buffer{nil}, children, 0, 0)
+ if nullBitmap == nil {
+ if nullCount > 0 {
+ return nil, fmt.Errorf("%w: null count is greater than 0 but null bitmap is nil", arrow.ErrInvalid)
+ }
+ nullCount = 0
+ }
+
+ data := NewData(arrow.StructOf(fields...), length-offset, []*memory.Buffer{nullBitmap}, children, nullCount, offset)
defer data.Release()
return NewStructData(data), nil
}
@@ -107,7 +116,7 @@ func NewStructArrayWithNulls(cols []arrow.Array, names []string, nullBitmap *mem
// NewStructData returns a new Struct array value from data.
func NewStructData(data arrow.ArrayData) *Struct {
a := &Struct{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -256,10 +265,12 @@ type StructBuilder struct {
// NewStructBuilder returns a builder, using the provided memory allocator.
func NewStructBuilder(mem memory.Allocator, dtype *arrow.StructType) *StructBuilder {
b := &StructBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
dtype: dtype,
fields: make([]Builder, dtype.NumFields()),
}
+ b.refCount.Add(1)
+
for i, f := range dtype.Fields() {
b.fields[i] = NewBuilder(b.mem, f.Type)
}
@@ -278,9 +289,9 @@ func (b *StructBuilder) Type() arrow.DataType {
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *StructBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/table.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/table.go
index 95ac67f29460f..367b1b1057868 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/table.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/table.go
@@ -85,7 +85,7 @@ func NewChunkedSlice(a *arrow.Chunked, i, j int64) *arrow.Chunked {
// simpleTable is a basic, non-lazy in-memory table.
type simpleTable struct {
- refCount int64
+ refCount atomic.Int64
rows int64
cols []arrow.Column
@@ -101,11 +101,11 @@ type simpleTable struct {
// NewTable panics if rows is larger than the height of the columns.
func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) arrow.Table {
tbl := simpleTable{
- refCount: 1,
- rows: rows,
- cols: cols,
- schema: schema,
+ rows: rows,
+ cols: cols,
+ schema: schema,
}
+ tbl.refCount.Add(1)
if tbl.rows < 0 {
switch len(tbl.cols) {
@@ -150,11 +150,11 @@ func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) arrow.Table {
}
tbl := simpleTable{
- refCount: 1,
- schema: schema,
- cols: cols,
- rows: int64(cols[0].Len()),
+ schema: schema,
+ cols: cols,
+ rows: int64(cols[0].Len()),
}
+ tbl.refCount.Add(1)
defer func() {
if r := recover(); r != nil {
@@ -241,16 +241,16 @@ func (tbl *simpleTable) validate() {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (tbl *simpleTable) Retain() {
- atomic.AddInt64(&tbl.refCount, 1)
+ tbl.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (tbl *simpleTable) Release() {
- debug.Assert(atomic.LoadInt64(&tbl.refCount) > 0, "too many releases")
+ debug.Assert(tbl.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&tbl.refCount, -1) == 0 {
+ if tbl.refCount.Add(-1) == 0 {
for i := range tbl.cols {
tbl.cols[i].Release()
}
@@ -279,7 +279,7 @@ func (tbl *simpleTable) String() string {
// TableReader is a Record iterator over a (possibly chunked) Table
type TableReader struct {
- refCount int64
+ refCount atomic.Int64
tbl arrow.Table
cur int64 // current row
@@ -297,15 +297,15 @@ type TableReader struct {
func NewTableReader(tbl arrow.Table, chunkSize int64) *TableReader {
ncols := tbl.NumCols()
tr := &TableReader{
- refCount: 1,
- tbl: tbl,
- cur: 0,
- max: int64(tbl.NumRows()),
- chksz: chunkSize,
- chunks: make([]*arrow.Chunked, ncols),
- slots: make([]int, ncols),
- offsets: make([]int64, ncols),
+ tbl: tbl,
+ cur: 0,
+ max: int64(tbl.NumRows()),
+ chksz: chunkSize,
+ chunks: make([]*arrow.Chunked, ncols),
+ slots: make([]int, ncols),
+ offsets: make([]int64, ncols),
}
+ tr.refCount.Add(1)
tr.tbl.Retain()
if tr.chksz <= 0 {
@@ -383,16 +383,16 @@ func (tr *TableReader) Next() bool {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (tr *TableReader) Retain() {
- atomic.AddInt64(&tr.refCount, 1)
+ tr.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (tr *TableReader) Release() {
- debug.Assert(atomic.LoadInt64(&tr.refCount) > 0, "too many releases")
+ debug.Assert(tr.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&tr.refCount, -1) == 0 {
+ if tr.refCount.Add(-1) == 0 {
tr.tbl.Release()
for _, chk := range tr.chunks {
chk.Release()
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/timestamp.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/timestamp.go
index 37359db1209d2..9f8ca47846030 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/timestamp.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/timestamp.go
@@ -21,7 +21,6 @@ import (
"fmt"
"reflect"
"strings"
- "sync/atomic"
"time"
"github.com/apache/arrow-go/v18/arrow"
@@ -40,7 +39,7 @@ type Timestamp struct {
// NewTimestampData creates a new Timestamp from Data.
func NewTimestampData(data arrow.ArrayData) *Timestamp {
a := &Timestamp{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -53,8 +52,10 @@ func (a *Timestamp) Reset(data *Data) {
// Value returns the value at the specified index.
func (a *Timestamp) Value(i int) arrow.Timestamp { return a.values[i] }
+func (a *Timestamp) Values() []arrow.Timestamp { return a.values }
+
// TimestampValues returns the values.
-func (a *Timestamp) TimestampValues() []arrow.Timestamp { return a.values }
+func (a *Timestamp) TimestampValues() []arrow.Timestamp { return a.Values() }
// String returns a string representation of the array.
func (a *Timestamp) String() string {
@@ -132,7 +133,9 @@ type TimestampBuilder struct {
}
func NewTimestampBuilder(mem memory.Allocator, dtype *arrow.TimestampType) *TimestampBuilder {
- return &TimestampBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+ tb := &TimestampBuilder{builder: builder{mem: mem}, dtype: dtype}
+ tb.refCount.Add(1)
+ return tb
}
func (b *TimestampBuilder) Type() arrow.DataType { return b.dtype }
@@ -140,9 +143,9 @@ func (b *TimestampBuilder) Type() arrow.DataType { return b.dtype }
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
func (b *TimestampBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.nullBitmap != nil {
b.nullBitmap.Release()
b.nullBitmap = nil
@@ -375,6 +378,7 @@ func (b *TimestampBuilder) UnmarshalJSON(data []byte) error {
}
var (
- _ arrow.Array = (*Timestamp)(nil)
- _ Builder = (*TimestampBuilder)(nil)
+ _ arrow.Array = (*Timestamp)(nil)
+ _ Builder = (*TimestampBuilder)(nil)
+ _ arrow.TypedArray[arrow.Timestamp] = (*Timestamp)(nil)
)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/union.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/union.go
index 6f3a9a6edf1ce..9c13af05b1014 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/array/union.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/union.go
@@ -23,7 +23,6 @@ import (
"math"
"reflect"
"strings"
- "sync/atomic"
"github.com/apache/arrow-go/v18/arrow"
"github.com/apache/arrow-go/v18/arrow/bitutil"
@@ -246,7 +245,7 @@ func NewSparseUnion(dt *arrow.SparseUnionType, length int, children []arrow.Arra
// NewSparseUnionData constructs a SparseUnion array from the given ArrayData object.
func NewSparseUnionData(data arrow.ArrayData) *SparseUnion {
a := &SparseUnion{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -506,7 +505,7 @@ func NewDenseUnion(dt *arrow.DenseUnionType, length int, children []arrow.Array,
// NewDenseUnionData constructs a DenseUnion array from the given ArrayData object.
func NewDenseUnionData(data arrow.ArrayData) *DenseUnion {
a := &DenseUnion{}
- a.refCount = 1
+ a.refCount.Add(1)
a.setData(data.(*Data))
return a
}
@@ -736,12 +735,12 @@ type unionBuilder struct {
typesBuilder *int8BufferBuilder
}
-func newUnionBuilder(mem memory.Allocator, children []Builder, typ arrow.UnionType) unionBuilder {
+func newUnionBuilder(mem memory.Allocator, children []Builder, typ arrow.UnionType) *unionBuilder {
if children == nil {
children = make([]Builder, 0)
}
b := unionBuilder{
- builder: builder{refCount: 1, mem: mem},
+ builder: builder{mem: mem},
mode: typ.Mode(),
codes: typ.TypeCodes(),
children: children,
@@ -750,6 +749,7 @@ func newUnionBuilder(mem memory.Allocator, children []Builder, typ arrow.UnionTy
childFields: make([]arrow.Field, len(children)),
typesBuilder: newInt8BufferBuilder(mem),
}
+ b.refCount.Add(1)
b.typeIDtoChildID[0] = arrow.InvalidUnionChildID
for i := 1; i < len(b.typeIDtoChildID); i *= 2 {
@@ -767,7 +767,7 @@ func newUnionBuilder(mem memory.Allocator, children []Builder, typ arrow.UnionTy
b.typeIDtoBuilder[typeID] = c
}
- return b
+ return &b
}
func (b *unionBuilder) NumChildren() int {
@@ -795,9 +795,9 @@ func (b *unionBuilder) reserve(elements int, resize func(int)) {
}
func (b *unionBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
for _, c := range b.children {
c.Release()
}
@@ -854,7 +854,6 @@ func (b *unionBuilder) nextTypeID() arrow.UnionTypeCode {
id := b.denseTypeID
b.denseTypeID++
return id
-
}
func (b *unionBuilder) newData() *Data {
@@ -879,7 +878,7 @@ func (b *unionBuilder) newData() *Data {
// that they have the correct number of preceding elements that have been
// added to the builder beforehand.
type SparseUnionBuilder struct {
- unionBuilder
+ *unionBuilder
}
// NewEmptySparseUnionBuilder is a helper to construct a SparseUnionBuilder
@@ -1109,7 +1108,7 @@ func (b *SparseUnionBuilder) UnmarshalOne(dec *json.Decoder) error {
// methods. You can also add new types to the union on the fly by using
// AppendChild.
type DenseUnionBuilder struct {
- unionBuilder
+ *unionBuilder
offsetsBuilder *int32BufferBuilder
}
@@ -1228,9 +1227,9 @@ func (b *DenseUnionBuilder) Append(nextType arrow.UnionTypeCode) {
}
func (b *DenseUnionBuilder) Release() {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
for _, c := range b.children {
c.Release()
}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/csv/reader.go b/vendor/github.com/apache/arrow-go/v18/arrow/csv/reader.go
index dd0c0f182c10a..db0f836d72f4f 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/csv/reader.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/csv/reader.go
@@ -43,7 +43,7 @@ type Reader struct {
r *csv.Reader
schema *arrow.Schema
- refs int64
+ refs atomic.Int64
bld *array.RecordBuilder
cur arrow.Record
err error
@@ -75,10 +75,10 @@ type Reader struct {
func NewInferringReader(r io.Reader, opts ...Option) *Reader {
rr := &Reader{
r: csv.NewReader(r),
- refs: 1,
chunk: 1,
stringsCanBeNull: false,
}
+ rr.refs.Add(1)
rr.r.ReuseRecord = true
for _, opt := range opts {
opt(rr)
@@ -111,10 +111,10 @@ func NewReader(r io.Reader, schema *arrow.Schema, opts ...Option) *Reader {
rr := &Reader{
r: csv.NewReader(r),
schema: schema,
- refs: 1,
chunk: 1,
stringsCanBeNull: false,
}
+ rr.refs.Add(1)
rr.r.ReuseRecord = true
for _, opt := range opts {
opt(rr)
@@ -288,9 +288,7 @@ func (r *Reader) nextall() bool {
r.done = true
}()
- var (
- recs [][]string
- )
+ var recs [][]string
recs, r.err = r.r.ReadAll()
if r.err != nil {
@@ -926,16 +924,16 @@ func (r *Reader) parseExtension(field array.Builder, str string) {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (r *Reader) Retain() {
- atomic.AddInt64(&r.refs, 1)
+ r.refs.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (r *Reader) Release() {
- debug.Assert(atomic.LoadInt64(&r.refs) > 0, "too many releases")
+ debug.Assert(r.refs.Load() > 0, "too many releases")
- if atomic.AddInt64(&r.refs, -1) == 0 {
+ if r.refs.Add(-1) == 0 {
if r.cur != nil {
r.cur.Release()
}
@@ -1025,6 +1023,4 @@ func tryParse(val string, dt arrow.DataType) error {
panic("shouldn't end up here")
}
-var (
- _ array.RecordReader = (*Reader)(nil)
-)
+var _ array.RecordReader = (*Reader)(nil)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/doc.go
index 690a4f53232a9..7bc175c0eabbb 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/doc.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/doc.go
@@ -34,7 +34,7 @@ To build with tinygo include the noasm build tag.
*/
package arrow
-const PkgVersion = "18.2.0"
+const PkgVersion = "18.3.0"
//go:generate go run _tools/tmpl/main.go -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl
//go:generate go run _tools/tmpl/main.go -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl tensor/numeric.gen.go.tmpl tensor/numeric.gen_test.go.tmpl
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/memory/buffer.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/buffer.go
index 04722225c4ffa..592da70c6833b 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/memory/buffer.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/buffer.go
@@ -24,7 +24,7 @@ import (
// Buffer is a wrapper type for a buffer of bytes.
type Buffer struct {
- refCount int64
+ refCount atomic.Int64
buf []byte
length int
mutable bool
@@ -42,22 +42,28 @@ type Buffer struct {
// through the c data interface and tracking the lifetime of the
// imported buffers.
func NewBufferWithAllocator(data []byte, mem Allocator) *Buffer {
- return &Buffer{refCount: 1, buf: data, length: len(data), mem: mem}
+ b := &Buffer{buf: data, length: len(data), mem: mem}
+ b.refCount.Add(1)
+ return b
}
// NewBufferBytes creates a fixed-size buffer from the specified data.
func NewBufferBytes(data []byte) *Buffer {
- return &Buffer{refCount: 0, buf: data, length: len(data)}
+ return &Buffer{buf: data, length: len(data)}
}
// NewResizableBuffer creates a mutable, resizable buffer with an Allocator for managing memory.
func NewResizableBuffer(mem Allocator) *Buffer {
- return &Buffer{refCount: 1, mutable: true, mem: mem}
+ b := &Buffer{mutable: true, mem: mem}
+ b.refCount.Add(1)
+ return b
}
func SliceBuffer(buf *Buffer, offset, length int) *Buffer {
buf.Retain()
- return &Buffer{refCount: 1, parent: buf, buf: buf.Bytes()[offset : offset+length], length: length}
+ b := &Buffer{parent: buf, buf: buf.Bytes()[offset : offset+length], length: length}
+ b.refCount.Add(1)
+ return b
}
// Parent returns either nil or a pointer to the parent buffer if this buffer
@@ -67,7 +73,7 @@ func (b *Buffer) Parent() *Buffer { return b.parent }
// Retain increases the reference count by 1.
func (b *Buffer) Retain() {
if b.mem != nil || b.parent != nil {
- atomic.AddInt64(&b.refCount, 1)
+ b.refCount.Add(1)
}
}
@@ -75,9 +81,9 @@ func (b *Buffer) Retain() {
// When the reference count goes to zero, the memory is freed.
func (b *Buffer) Release() {
if b.mem != nil || b.parent != nil {
- debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+ debug.Assert(b.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.refCount.Add(-1) == 0 {
if b.mem != nil {
b.mem.Free(b.buf)
} else {
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/memory/checked_allocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/checked_allocator.go
index 78a09a57d74ba..103a0853ce64c 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/memory/checked_allocator.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/checked_allocator.go
@@ -32,7 +32,7 @@ import (
type CheckedAllocator struct {
mem Allocator
- sz int64
+ sz atomic.Int64
allocs sync.Map
}
@@ -41,10 +41,10 @@ func NewCheckedAllocator(mem Allocator) *CheckedAllocator {
return &CheckedAllocator{mem: mem}
}
-func (a *CheckedAllocator) CurrentAlloc() int { return int(atomic.LoadInt64(&a.sz)) }
+func (a *CheckedAllocator) CurrentAlloc() int { return int(a.sz.Load()) }
func (a *CheckedAllocator) Allocate(size int) []byte {
- atomic.AddInt64(&a.sz, int64(size))
+ a.sz.Add(int64(size))
out := a.mem.Allocate(size)
if size == 0 {
return out
@@ -66,7 +66,7 @@ func (a *CheckedAllocator) Allocate(size int) []byte {
}
func (a *CheckedAllocator) Reallocate(size int, b []byte) []byte {
- atomic.AddInt64(&a.sz, int64(size-len(b)))
+ a.sz.Add(int64(size - len(b)))
oldptr := uintptr(unsafe.Pointer(&b[0]))
out := a.mem.Reallocate(size, b)
@@ -92,7 +92,7 @@ func (a *CheckedAllocator) Reallocate(size int, b []byte) []byte {
}
func (a *CheckedAllocator) Free(b []byte) {
- atomic.AddInt64(&a.sz, int64(len(b)*-1))
+ a.sz.Add(int64(len(b) * -1))
defer a.mem.Free(b)
if len(b) == 0 {
@@ -192,9 +192,9 @@ func (a *CheckedAllocator) AssertSize(t TestingT, sz int) {
return true
})
- if int(atomic.LoadInt64(&a.sz)) != sz {
+ if int(a.sz.Load()) != sz {
t.Helper()
- t.Errorf("invalid memory size exp=%d, got=%d", sz, a.sz)
+ t.Errorf("invalid memory size exp=%d, got=%d", sz, a.sz.Load())
}
}
@@ -204,18 +204,16 @@ type CheckedAllocatorScope struct {
}
func NewCheckedAllocatorScope(alloc *CheckedAllocator) *CheckedAllocatorScope {
- sz := atomic.LoadInt64(&alloc.sz)
+ sz := alloc.sz.Load()
return &CheckedAllocatorScope{alloc: alloc, sz: int(sz)}
}
func (c *CheckedAllocatorScope) CheckSize(t TestingT) {
- sz := int(atomic.LoadInt64(&c.alloc.sz))
+ sz := int(c.alloc.sz.Load())
if c.sz != sz {
t.Helper()
t.Errorf("invalid memory size exp=%d, got=%d", c.sz, sz)
}
}
-var (
- _ Allocator = (*CheckedAllocator)(nil)
-)
+var _ Allocator = (*CheckedAllocator)(nil)
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/table.go b/vendor/github.com/apache/arrow-go/v18/arrow/table.go
index 6d19d9f183083..bdbf85bfc2f20 100644
--- a/vendor/github.com/apache/arrow-go/v18/arrow/table.go
+++ b/vendor/github.com/apache/arrow-go/v18/arrow/table.go
@@ -79,16 +79,17 @@ func NewColumnFromArr(field Field, arr Array) Column {
}
arr.Retain()
- return Column{
+ col := Column{
field: field,
data: &Chunked{
- refCount: 1,
- chunks: []Array{arr},
- length: arr.Len(),
- nulls: arr.NullN(),
- dtype: field.Type,
+ chunks: []Array{arr},
+ length: arr.Len(),
+ nulls: arr.NullN(),
+ dtype: field.Type,
},
}
+ col.data.refCount.Add(1)
+ return col
}
// NewColumn returns a column from a field and a chunked data array.
@@ -132,7 +133,7 @@ func (col *Column) DataType() DataType { return col.field.Type }
// Chunked manages a collection of primitives arrays as one logical large array.
type Chunked struct {
- refCount int64 // refCount must be first in the struct for 64 bit alignment and sync/atomic (https://github.com/golang/go/issues/37262)
+ refCount atomic.Int64
chunks []Array
@@ -146,10 +147,11 @@ type Chunked struct {
// NewChunked panics if the chunks do not have the same data type.
func NewChunked(dtype DataType, chunks []Array) *Chunked {
arr := &Chunked{
- chunks: make([]Array, 0, len(chunks)),
- refCount: 1,
- dtype: dtype,
+ chunks: make([]Array, 0, len(chunks)),
+ dtype: dtype,
}
+ arr.refCount.Add(1)
+
for _, chunk := range chunks {
if chunk == nil {
continue
@@ -169,16 +171,16 @@ func NewChunked(dtype DataType, chunks []Array) *Chunked {
// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func (a *Chunked) Retain() {
- atomic.AddInt64(&a.refCount, 1)
+ a.refCount.Add(1)
}
// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func (a *Chunked) Release() {
- debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases")
+ debug.Assert(a.refCount.Load() > 0, "too many releases")
- if atomic.AddInt64(&a.refCount, -1) == 0 {
+ if a.refCount.Add(-1) == 0 {
for _, arr := range a.chunks {
arr.Release()
}
diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/tmpl
new file mode 100644
index 0000000000000..60df161a9967e
Binary files /dev/null and b/vendor/github.com/apache/arrow-go/v18/arrow/tmpl differ
diff --git a/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go
index e99a4f8f97a6e..5f105f619f06f 100644
--- a/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go
+++ b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go
@@ -267,6 +267,11 @@ func (s *Int8MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Int8MemoTable) Exists(val int8) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Int8MemoTable) Get(val interface{}) (int, bool) {
@@ -282,10 +287,13 @@ func (s *Int8MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Int8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(int8))
+}
- h := hashInt(uint64(val.(int8)), 0)
+func (s *Int8MemoTable) InsertOrGet(val int8) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v int8) bool {
- return val.(int8) == v
+ return val == v
})
if ok {
@@ -293,7 +301,7 @@ func (s *Int8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err e
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(int8), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -544,6 +552,11 @@ func (s *Uint8MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Uint8MemoTable) Exists(val uint8) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Uint8MemoTable) Get(val interface{}) (int, bool) {
@@ -559,10 +572,13 @@ func (s *Uint8MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Uint8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(uint8))
+}
- h := hashInt(uint64(val.(uint8)), 0)
+func (s *Uint8MemoTable) InsertOrGet(val uint8) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v uint8) bool {
- return val.(uint8) == v
+ return val == v
})
if ok {
@@ -570,7 +586,7 @@ func (s *Uint8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(uint8), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -821,6 +837,11 @@ func (s *Int16MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Int16MemoTable) Exists(val int16) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Int16MemoTable) Get(val interface{}) (int, bool) {
@@ -836,10 +857,13 @@ func (s *Int16MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Int16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(int16))
+}
- h := hashInt(uint64(val.(int16)), 0)
+func (s *Int16MemoTable) InsertOrGet(val int16) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v int16) bool {
- return val.(int16) == v
+ return val == v
})
if ok {
@@ -847,7 +871,7 @@ func (s *Int16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(int16), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -1098,6 +1122,11 @@ func (s *Uint16MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Uint16MemoTable) Exists(val uint16) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Uint16MemoTable) Get(val interface{}) (int, bool) {
@@ -1113,10 +1142,13 @@ func (s *Uint16MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Uint16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(uint16))
+}
- h := hashInt(uint64(val.(uint16)), 0)
+func (s *Uint16MemoTable) InsertOrGet(val uint16) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v uint16) bool {
- return val.(uint16) == v
+ return val == v
})
if ok {
@@ -1124,7 +1156,7 @@ func (s *Uint16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(uint16), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -1375,6 +1407,11 @@ func (s *Int32MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Int32MemoTable) Exists(val int32) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Int32MemoTable) Get(val interface{}) (int, bool) {
@@ -1390,10 +1427,13 @@ func (s *Int32MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Int32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(int32))
+}
- h := hashInt(uint64(val.(int32)), 0)
+func (s *Int32MemoTable) InsertOrGet(val int32) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v int32) bool {
- return val.(int32) == v
+ return val == v
})
if ok {
@@ -1401,7 +1441,7 @@ func (s *Int32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(int32), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -1652,6 +1692,11 @@ func (s *Int64MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Int64MemoTable) Exists(val int64) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Int64MemoTable) Get(val interface{}) (int, bool) {
@@ -1667,10 +1712,13 @@ func (s *Int64MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Int64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(int64))
+}
- h := hashInt(uint64(val.(int64)), 0)
+func (s *Int64MemoTable) InsertOrGet(val int64) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v int64) bool {
- return val.(int64) == v
+ return val == v
})
if ok {
@@ -1678,7 +1726,7 @@ func (s *Int64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(int64), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -1929,6 +1977,11 @@ func (s *Uint32MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Uint32MemoTable) Exists(val uint32) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Uint32MemoTable) Get(val interface{}) (int, bool) {
@@ -1944,10 +1997,13 @@ func (s *Uint32MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Uint32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(uint32))
+}
- h := hashInt(uint64(val.(uint32)), 0)
+func (s *Uint32MemoTable) InsertOrGet(val uint32) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v uint32) bool {
- return val.(uint32) == v
+ return val == v
})
if ok {
@@ -1955,7 +2011,7 @@ func (s *Uint32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(uint32), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -2206,6 +2262,11 @@ func (s *Uint64MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Uint64MemoTable) Exists(val uint64) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Uint64MemoTable) Get(val interface{}) (int, bool) {
@@ -2221,10 +2282,13 @@ func (s *Uint64MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Uint64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(uint64))
+}
- h := hashInt(uint64(val.(uint64)), 0)
+func (s *Uint64MemoTable) InsertOrGet(val uint64) (idx int, found bool, err error) {
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v uint64) bool {
- return val.(uint64) == v
+ return val == v
})
if ok {
@@ -2232,7 +2296,7 @@ func (s *Uint64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(uint64), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -2483,6 +2547,11 @@ func (s *Float32MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Float32MemoTable) Exists(val float32) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Float32MemoTable) Get(val interface{}) (int, bool) {
@@ -2508,19 +2577,23 @@ func (s *Float32MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Float32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(float32))
+}
+
+func (s *Float32MemoTable) InsertOrGet(val float32) (idx int, found bool, err error) {
var cmp func(float32) bool
- if math.IsNaN(float64(val.(float32))) {
+ if math.IsNaN(float64(val)) {
cmp = isNan32Cmp
// use consistent internal bit pattern for NaN regardless of the pattern
// that is passed to us. NaN is NaN is NaN
val = float32(math.NaN())
} else {
- cmp = func(v float32) bool { return val.(float32) == v }
+ cmp = func(v float32) bool { return val == v }
}
- h := hashFloat32(val.(float32), 0)
+ h := hashFloat32(val, 0)
e, ok := s.tbl.Lookup(h, cmp)
if ok {
@@ -2528,7 +2601,7 @@ func (s *Float32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, er
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(float32), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
@@ -2779,6 +2852,11 @@ func (s *Float64MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *Float64MemoTable) Exists(val float64) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *Float64MemoTable) Get(val interface{}) (int, bool) {
@@ -2803,18 +2881,22 @@ func (s *Float64MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *Float64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return s.InsertOrGet(val.(float64))
+}
+
+func (s *Float64MemoTable) InsertOrGet(val float64) (idx int, found bool, err error) {
var cmp func(float64) bool
- if math.IsNaN(val.(float64)) {
+ if math.IsNaN(val) {
cmp = math.IsNaN
// use consistent internal bit pattern for NaN regardless of the pattern
// that is passed to us. NaN is NaN is NaN
val = math.NaN()
} else {
- cmp = func(v float64) bool { return val.(float64) == v }
+ cmp = func(v float64) bool { return val == v }
}
- h := hashFloat64(val.(float64), 0)
+ h := hashFloat64(val, 0)
e, ok := s.tbl.Lookup(h, cmp)
if ok {
@@ -2822,7 +2904,7 @@ func (s *Float64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, er
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.(float64), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
diff --git a/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go.tmpl
index 9ba35c7291ed7..14a8f21240068 100644
--- a/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go.tmpl
+++ b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go.tmpl
@@ -267,6 +267,11 @@ func (s *{{.Name}}MemoTable) WriteOutSubsetLE(start int, out []byte) {
s.tbl.WriteOutSubset(start, out)
}
+func (s *{{.Name}}MemoTable) Exists(val {{.name}}) bool {
+ _, ok := s.Get(val)
+ return ok
+}
+
// Get returns the index of the requested value in the hash table or KeyNotFound
// along with a boolean indicating if it was found or not.
func (s *{{.Name}}MemoTable) Get(val interface{}) (int, bool) {
@@ -304,31 +309,35 @@ func (s *{{.Name}}MemoTable) Get(val interface{}) (int, bool) {
// value into the table and return the new index. found indicates whether or not it already
// existed in the table (true) or was inserted by this call (false).
func (s *{{.Name}}MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
- {{if and (ne .Name "Float32") (ne .Name "Float64") }}
- h := hashInt(uint64(val.({{.name}})), 0)
+ return s.InsertOrGet(val.({{.name}}))
+}
+
+func (s *{{.Name}}MemoTable) InsertOrGet(val {{.name}}) (idx int, found bool, err error) {
+ {{if and (ne .Name "Float32") (ne .Name "Float64") -}}
+ h := hashInt(uint64(val), 0)
e, ok := s.tbl.Lookup(h, func(v {{.name}}) bool {
- return val.({{.name}}) == v
+ return val == v
})
{{ else }}
var cmp func({{.name}}) bool
{{if eq .Name "Float32"}}
- if math.IsNaN(float64(val.(float32))) {
+ if math.IsNaN(float64(val)) {
cmp = isNan32Cmp
// use consistent internal bit pattern for NaN regardless of the pattern
// that is passed to us. NaN is NaN is NaN
val = float32(math.NaN())
{{ else -}}
- if math.IsNaN(val.(float64)) {
+ if math.IsNaN(val) {
cmp = math.IsNaN
// use consistent internal bit pattern for NaN regardless of the pattern
// that is passed to us. NaN is NaN is NaN
val = math.NaN()
{{end -}}
} else {
- cmp = func(v {{.name}}) bool { return val.({{.name}}) == v }
+ cmp = func(v {{.name}}) bool { return val == v }
}
- h := hash{{.Name}}(val.({{.name}}), 0)
+ h := hash{{.Name}}(val, 0)
e, ok := s.tbl.Lookup(h, cmp)
{{ end }}
if ok {
@@ -336,7 +345,7 @@ func (s *{{.Name}}MemoTable) GetOrInsert(val interface{}) (idx int, found bool,
found = true
} else {
idx = s.Size()
- s.tbl.Insert(e, h, val.({{.name}}), int32(idx))
+ s.tbl.Insert(e, h, val, int32(idx))
}
return
}
diff --git a/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.go b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.go
index fbb8b33531bbd..f10a9b21ffe51 100644
--- a/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.go
+++ b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.go
@@ -74,6 +74,18 @@ type MemoTable interface {
WriteOutSubset(offset int, out []byte)
}
+type MemoTypes interface {
+ int8 | int16 | int32 | int64 |
+ uint8 | uint16 | uint32 | uint64 |
+ float32 | float64 | []byte
+}
+
+type TypedMemoTable[T MemoTypes] interface {
+ MemoTable
+ Exists(T) bool
+ InsertOrGet(val T) (idx int, found bool, err error)
+}
+
type NumericMemoTable interface {
MemoTable
WriteOutLE(out []byte)
@@ -202,25 +214,17 @@ func (BinaryMemoTable) getHash(val interface{}) uint64 {
}
}
-// helper function to append the given value to the builder regardless
-// of the underlying binary type.
-func (b *BinaryMemoTable) appendVal(val interface{}) {
- switch v := val.(type) {
- case string:
- b.builder.AppendString(v)
- case []byte:
- b.builder.Append(v)
- case ByteSlice:
- b.builder.Append(v.Bytes())
- }
-}
-
func (b *BinaryMemoTable) lookup(h uint64, val []byte) (*entryInt32, bool) {
return b.tbl.Lookup(h, func(i int32) bool {
return bytes.Equal(val, b.builder.Value(int(i)))
})
}
+func (b *BinaryMemoTable) Exists(val []byte) bool {
+ _, ok := b.lookup(b.getHash(val), val)
+ return ok
+}
+
// Get returns the index of the specified value in the table or KeyNotFound,
// and a boolean indicating whether it was found in the table.
func (b *BinaryMemoTable) Get(val interface{}) (int, bool) {
@@ -246,17 +250,21 @@ func (b *BinaryMemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err
return
}
+func (b *BinaryMemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ return b.InsertOrGet(b.valAsByteSlice(val))
+}
+
// GetOrInsert returns the index of the given value in the table, if not found
// it is inserted into the table. The return value 'found' indicates whether the value
// was found in the table (true) or inserted (false) along with any possible error.
-func (b *BinaryMemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+func (b *BinaryMemoTable) InsertOrGet(val []byte) (idx int, found bool, err error) {
h := b.getHash(val)
- p, found := b.lookup(h, b.valAsByteSlice(val))
+ p, found := b.lookup(h, val)
if found {
idx = int(p.payload.val)
} else {
idx = b.Size()
- b.appendVal(val)
+ b.builder.Append(val)
b.tbl.Insert(p, h, int32(idx), -1)
}
return
diff --git a/vendor/github.com/go-jose/go-jose/v4/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore
new file mode 100644
index 0000000000000..eb29ebaefd85b
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.gitignore
@@ -0,0 +1,2 @@
+jose-util/jose-util
+jose-util.t.err
\ No newline at end of file
diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml
new file mode 100644
index 0000000000000..2a577a8f95b03
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml
@@ -0,0 +1,53 @@
+# https://github.com/golangci/golangci-lint
+
+run:
+ skip-files:
+ - doc_test.go
+ modules-download-mode: readonly
+
+linters:
+ enable-all: true
+ disable:
+ - gochecknoglobals
+ - goconst
+ - lll
+ - maligned
+ - nakedret
+ - scopelint
+ - unparam
+ - funlen # added in 1.18 (requires go-jose changes before it can be enabled)
+
+linters-settings:
+ gocyclo:
+ min-complexity: 35
+
+issues:
+ exclude-rules:
+ - text: "don't use ALL_CAPS in Go names"
+ linters:
+ - golint
+ - text: "hardcoded credentials"
+ linters:
+ - gosec
+ - text: "weak cryptographic primitive"
+ linters:
+ - gosec
+ - path: json/
+ linters:
+ - dupl
+ - errcheck
+ - gocritic
+ - gocyclo
+ - golint
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - unused
+ - path: _test\.go
+ linters:
+ - scopelint
+ - path: jwk.go
+ linters:
+ - gocyclo
diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml
new file mode 100644
index 0000000000000..48de631b003b2
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.travis.yml
@@ -0,0 +1,33 @@
+language: go
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+go:
+ - "1.13.x"
+ - "1.14.x"
+ - tip
+
+before_script:
+ - export PATH=$HOME/.local/bin:$PATH
+
+before_install:
+ - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge
+ - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0
+ - pip install cram --user
+
+script:
+ - go test -v -covermode=count -coverprofile=profile.cov .
+ - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner
+ - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher
+ - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt
+ - go test -v ./json # no coverage for forked encoding/json package
+ - golangci-lint run
+ - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
+ - cd ..
+
+after_success:
+ - gocovmerge *.cov */*.cov > merged.coverprofile
+ - goveralls -coverprofile merged.coverprofile -service=travis-ci
diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
new file mode 100644
index 0000000000000..6f717dbd86e3b
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
@@ -0,0 +1,96 @@
+# v4.0.4
+
+## Fixed
+
+ - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a
+ breaking change. See #136 / #137.
+
+# v4.0.3
+
+## Changed
+
+ - Allow unmarshalling JSONWebKeySets with unsupported key types (#130)
+ - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129)
+ - Dependency updates
+
+# v4.0.2
+
+## Changed
+
+ - Improved documentation of Verify() to note that JSONWebKeySet is a supported
+ argument type (#104)
+ - Defined exported error values for missing x5c header and unsupported elliptic
+ curves error cases (#117)
+
+# v4.0.1
+
+## Fixed
+
+ - An attacker could send a JWE containing compressed data that used large
+ amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`.
+ Those functions now return an error if the decompressed data would exceed
+ 250kB or 10x the compressed size (whichever is larger). Thanks to
+ Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj)
+ for reporting.
+
+# v4.0.0
+
+This release makes some breaking changes in order to more thoroughly
+address the vulnerabilities discussed in [Three New Attacks Against JSON Web
+Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot
+token".
+
+## Changed
+
+ - Limit JWT encryption types (exclude password or public key types) (#78)
+ - Enforce minimum length for HMAC keys (#85)
+ - jwt: match any audience in a list, rather than requiring all audiences (#81)
+ - jwt: accept only Compact Serialization (#75)
+ - jws: Add expected algorithms for signatures (#74)
+ - Require specifying expected algorithms for ParseEncrypted,
+ ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned,
+ jwt.ParseSignedAndEncrypted (#69, #74)
+ - Usually there is a small, known set of appropriate algorithms for a program
+ to use and it's a mistake to allow unexpected algorithms. For instance the
+ "billion hash attack" relies in part on programs accepting the PBES2
+ encryption algorithm and doing the necessary work even if they weren't
+ specifically configured to allow PBES2.
+ - Revert "Strip padding off base64 strings" (#82)
+ - The specs require base64url encoding without padding.
+ - Minimum supported Go version is now 1.21
+
+## Added
+
+ - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON.
+ - These allow parsing a specific serialization, as opposed to ParseSigned and
+ ParseEncrypted, which try to automatically detect which serialization was
+ provided. It's common to require a specific serialization for a specific
+ protocol - for instance JWT requires Compact serialization.
+
+[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
+
+# v3.0.2
+
+## Fixed
+
+ - DecryptMulti: handle decompression error (#19)
+
+## Changed
+
+ - jwe/CompactSerialize: improve performance (#67)
+ - Increase the default number of PBKDF2 iterations to 600k (#48)
+ - Return the proper algorithm for ECDSA keys (#45)
+
+## Added
+
+ - Add Thumbprint support for opaque signers (#38)
+
+# v3.0.1
+
+## Fixed
+
+ - Security issue: an attacker specifying a large "p2c" value can cause
+ JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
+ amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
+ disclosure and to Tom Tervoort for originally publishing the category of attack.
+ https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
new file mode 100644
index 0000000000000..4b4805add65cc
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
@@ -0,0 +1,9 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
diff --git a/vendor/github.com/go-jose/go-jose/v4/LICENSE b/vendor/github.com/go-jose/go-jose/v4/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md
new file mode 100644
index 0000000000000..02b5749546b28
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/README.md
@@ -0,0 +1,106 @@
+# Go JOSE
+
+[](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
+[](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
+[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
+[](https://github.com/go-jose/go-jose/actions)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. This includes support for JSON Web Encryption,
+JSON Web Signature, and JSON Web Token standards.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516),
+[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
+[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
+Tables of supported algorithms are shown below. The library supports both
+the compact and JWS/JWE JSON Serialization formats, and has optional support for
+multiple recipients. It also comes with a small command-line utility
+([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages.
+
+### Versions
+
+[Version 4](https://github.com/go-jose/go-jose)
+([branch](https://github.com/go-jose/go-jose/tree/main),
+[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
+
+ import "github.com/go-jose/go-jose/v4"
+
+The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
+are still useable but not actively developed anymore.
+
+Version 3, in this repo, is still receiving security fixes but not functionality
+updates.
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
+standard where possible. The Godoc reference has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES1
+ Direct encryption | dir1
+
+1. Not supported in multi-recipient mode
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+ Ed25519 | EdDSA2
+
+2. Only available in version 2 of the package
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
+allows attaching a key id.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
+ EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
+ AES, HMAC | []byte
+
+1. Only available in version 2 or later of the package
+
+## Examples
+
+[](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
+[](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
+
+Examples can be found in the Godoc
+reference for this package. The
+[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util)
+subdirectory also contains a small command-line utility which might be useful
+as an example as well.
diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md
new file mode 100644
index 0000000000000..2f18a75a8228c
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md
@@ -0,0 +1,13 @@
+# Security Policy
+This document explains how to contact the Let's Encrypt security team to report security vulnerabilities.
+
+## Supported Versions
+| Version | Supported |
+| ------- | ----------|
+| >= v3 | ✓ |
+| v2 | ✗ |
+| v1 | ✗ |
+
+## Reporting a vulnerability
+
+Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email.
diff --git a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go
new file mode 100644
index 0000000000000..f8d5774ef56cb
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go
@@ -0,0 +1,595 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ josecipher "github.com/go-jose/go-jose/v4/cipher"
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+type edEncrypterVerifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+type edDecrypterSigner struct {
+ privateKey ed25519.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
+ if sigAlg != EdDSA {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &edDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the
+ // random parameter is legacy and ignored, and it can be nil.
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1
+ out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ b, err := json.Marshal(&JSONWebKey{
+ Key: &priv.PublicKey,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ headers := rawHeader{
+ headerEPK: makeRawMessage(b),
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ epk, err := headers.getEPK()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+ if epk == nil {
+ return nil, errors.New("go-jose/go-jose: missing epk header")
+ }
+
+ publicKey, ok := epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+
+ if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
+ }
+
+ apuData, err := headers.getAPU()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apu header")
+ }
+ apvData, err := headers.getAPV()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apv header")
+ }
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ algorithm := headers.getAlgorithm()
+ switch algorithm {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(string(algorithm), keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ if alg != EdDSA {
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: sig,
+ protected: &rawHeader{},
+ }, nil
+}
+
+func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ if alg != EdDSA {
+ return ErrUnsupportedAlgorithm
+ }
+ ok := ed25519.Verify(ctx.publicKey, payload, signature)
+ if !ok {
+ return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
+ }
+ return nil
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go
new file mode 100644
index 0000000000000..af029cec0baaf
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, ciphertext[:offset]...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures that the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n uint64) (head, tail []byte) {
+ if uint64(cap(in)) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go
new file mode 100644
index 0000000000000..f62c3bdba5d0e
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go
new file mode 100644
index 0000000000000..093c646740ba3
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go
@@ -0,0 +1,86 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+// It is an error to call this function with a private/public key that are not on the same
+// curve. Callers must ensure that the keys are valid before calling this function. Output
+// size may be at most 1<<16 bytes (64 KiB).
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ if size > 1<<16 {
+ panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
+ }
+
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
+ panic("public key not on same curve as private key")
+ }
+
+ z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ zBytes := z.Bytes()
+
+ // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
+ // the returned byte array. This can lead to a problem where zBytes will be
+ // shorter than expected which breaks the key derivation. Therefore we must pad
+ // to the full length of the expected coordinate here before calling the KDF.
+ octSize := dSize(priv.Curve)
+ if len(zBytes) != octSize {
+ zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
+ }
+
+ reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+
+ return key
+}
+
+// dSize returns the size in octets for a coordinate on a elliptic curve.
+func dSize(curve elliptic.Curve) int {
+ order := curve.Params().P
+ bitLen := order.BitLen()
+ size := bitLen / 8
+ if bitLen%8 != 0 {
+ size++
+ }
+ return size
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go
new file mode 100644
index 0000000000000..b9effbca8a401
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] ^= tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] ^= tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("go-jose/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go
new file mode 100644
index 0000000000000..d81b03b447400
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go
@@ -0,0 +1,599 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JSONWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
+ Options() EncrypterOptions
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// EncrypterOptions represents options that can be set on new encrypters.
+type EncrypterOptions struct {
+ Compression CompressionAlgorithm
+
+ // Optional map of name/value pairs to be inserted into the protected
+ // header of a JWS object. Some specifications which make use of
+ // JWS require additional values here.
+ //
+ // Values will be serialized by [json.Marshal] and must be valid inputs to
+ // that function.
+ //
+ // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary, and returns the updated EncrypterOptions.
+//
+// The v parameter will be serialized by [json.Marshal] and must be a valid
+// input to that function.
+//
+// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
+func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
+ if eo.ExtraHeaders == nil {
+ eo.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ eo.ExtraHeaders[k] = v
+ return eo
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// EncrypterOptions.
+func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
+func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderType, typ)
+}
+
+// Recipient represents an algorithm/key to encrypt messages to.
+//
+// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
+// on the password-based encryption algorithms PBES2-HS256+A128KW,
+// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
+// default of 100000 will be used for the count and a 128-bit random salt will
+// be generated.
+type Recipient struct {
+ Algorithm KeyAlgorithm
+ // Key must have one of these types:
+ // - ed25519.PublicKey
+ // - *ecdsa.PublicKey
+ // - *rsa.PublicKey
+ // - *JSONWebKey
+ // - JSONWebKey
+ // - []byte (a symmetric key)
+ // - Any type that satisfies the OpaqueKeyEncrypter interface
+ //
+ // The type of Key must match the value of Algorithm.
+ Key interface{}
+ KeyID string
+ PBES2Count int
+ PBES2Salt []byte
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := rcpt.Key.(type) {
+ case JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case *JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case OpaqueKeyEncrypter:
+ keyID, rawKey = encryptionKey.KeyID(), encryptionKey
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch rcpt.Algorithm {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ keyBytes, ok := rawKey.([]byte)
+ if !ok {
+ return nil, ErrUnsupportedKeyType
+ }
+ if encrypter.cipher.keySize() != len(keyBytes) {
+ return nil, ErrInvalidKeySize
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: keyBytes,
+ }
+ recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes)
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ keyDSA, ok := rawKey.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: keyDSA,
+ }
+ recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA)
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.addRecipient(rcpt)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+ if len(rcpts) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ for _, recipient := range rcpts {
+ err := encrypter.addRecipient(recipient)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
+ var recipientInfo recipientKeyInfo
+
+ switch recipient.Algorithm {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
+ }
+
+ recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
+ if recipient.KeyID != "" {
+ recipientInfo.keyID = recipient.KeyID
+ }
+
+ switch recipient.Algorithm {
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
+ sr.p2c = recipient.PBES2Count
+ sr.p2s = recipient.PBES2Salt
+ }
+ }
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipientInfo)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case string:
+ return newSymmetricRecipient(alg, []byte(encryptionKey))
+ case *JSONWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ recipient.keyID = encryptionKey.KeyID
+ return recipient, err
+ case OpaqueKeyEncrypter:
+ return newOpaqueKeyEncrypter(alg, encryptionKey)
+ }
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case string:
+ return &symmetricKeyCipher{
+ key: []byte(decryptionKey),
+ }, nil
+ case JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case *JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case OpaqueKeyDecrypter:
+ return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{}
+ err := obj.protected.set(headerEncryption, ctx.contentAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ err = recipient.header.set(headerAlgorithm, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ if info.keyID != "" {
+ err = recipient.header.set(headerKeyID, info.keyID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ err = obj.protected.set(headerCompression, ctx.compressionAlg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for k, v := range ctx.extraHeaders {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ (*obj.protected)[k] = makeRawMessage(b)
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+func (ctx *genericEncrypter) Options() EncrypterOptions {
+ return EncrypterOptions{
+ Compression: ctx.compressionAlg,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Decrypt and validate the object and return the plaintext. This
+// function does not support multi-recipient. If you desire multi-recipient
+// decryption use DecryptMulti instead.
+//
+// The decryptionKey argument must contain a private or symmetric key
+// and must have one of these types:
+// - *ecdsa.PrivateKey
+// - *rsa.PrivateKey
+// - *JSONWebKey
+// - JSONWebKey
+// - *JSONWebKeySet
+// - JSONWebKeySet
+// - []byte (a symmetric key)
+// - string (a symmetric key)
+// - Any type that satisfies the OpaqueKeyDecrypter interface.
+//
+// Note that ed25519 is only available for signatures, not encryption, so is
+// not an option here.
+//
+// Automatically decompresses plaintext, but returns an error if the decompressed
+// data would be >250kB or >10x the size of the compressed data, whichever is larger.
+func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(obj.recipients) > 1 {
+ return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
+ }
+
+ critical, err := headers.getCritical()
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ key, err := tryJWKS(decryptionKey, obj.Header)
+ if err != nil {
+ return nil, err
+ }
+ decrypter, err := newDecrypter(key)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.getEncryption())
+ if cipher == nil {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ recipient := obj.recipients[0]
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
+ }
+
+ return plaintext, nil
+}
+
+// DecryptMulti decrypts and validates the object and returns the plaintexts,
+// with support for multiple recipients. It returns the index of the recipient
+// for which the decryption was successful, the merged headers for that recipient,
+// and the plaintext.
+//
+// The decryptionKey argument must have one of the types allowed for the
+// decryptionKey argument of Decrypt().
+//
+// Automatically decompresses plaintext, but returns an error if the decompressed
+// data would be >250kB or >3x the size of the compressed data, whichever is larger.
+func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
+ globalHeaders := obj.mergedHeaders(nil)
+
+ critical, err := globalHeaders.getCritical()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ key, err := tryJWKS(decryptionKey, obj.Header)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
+ decrypter, err := newDecrypter(key)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
+
+ encryption := globalHeaders.getEncryption()
+ cipher := getContentCipher(encryption)
+ if cipher == nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ index := -1
+ var plaintext []byte
+ var headers rawHeader
+
+ for i, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ index = i
+ headers = recipientHeaders
+ break
+ }
+ }
+ }
+
+ if plaintext == nil {
+ return -1, Header{}, nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
+ }
+
+ sanitized, err := headers.sanitized()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
+ }
+
+ return index, sanitized, plaintext, err
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/doc.go b/vendor/github.com/go-jose/go-jose/v4/doc.go
new file mode 100644
index 0000000000000..0ad40ca085f1b
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/doc.go
@@ -0,0 +1,25 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. It implements encryption and signing based on
+the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
+Token support available in a sub-package. The library supports both the compact
+and JWS/JWE JSON Serialization formats, and has optional support for multiple
+recipients.
+*/
+package jose
diff --git a/vendor/github.com/go-jose/go-jose/v4/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go
new file mode 100644
index 0000000000000..4f6e0d4a5cf65
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/encoding.go
@@ -0,0 +1,228 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+ "strings"
+ "unicode"
+
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := json.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/go-jose/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ buf := strings.Builder{}
+ buf.Grow(len(data))
+ for _, r := range data {
+ if !unicode.IsSpace(r) {
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String()
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// deflate compresses the input.
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// inflate decompresses the input.
+//
+// Errors if the decompressed data would be >250kB or >10x the size of the
+// compressed data, whichever is larger.
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ maxCompressedSize := max(250_000, 10*int64(len(input)))
+
+ limit := maxCompressedSize + 1
+ n, err := io.CopyN(output, reader, limit)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+ if n == limit {
+ return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize)
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := json.Unmarshal(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64.RawURLEncoding.DecodeString(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64.RawURLEncoding.EncodeToString(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
+
+func base64EncodeLen(sl []byte) int {
+ return base64.RawURLEncoding.EncodedLen(len(sl))
+}
+
+func base64JoinWithDots(inputs ...[]byte) string {
+ if len(inputs) == 0 {
+ return ""
+ }
+
+ // Count of dots.
+ totalCount := len(inputs) - 1
+
+ for _, input := range inputs {
+ totalCount += base64EncodeLen(input)
+ }
+
+ out := make([]byte, totalCount)
+ startEncode := 0
+ for i, input := range inputs {
+ base64.RawURLEncoding.Encode(out[startEncode:], input)
+
+ if i == len(inputs)-1 {
+ continue
+ }
+
+ startEncode += base64EncodeLen(input)
+ out[startEncode] = '.'
+ startEncode++
+ }
+
+ return string(out)
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE
new file mode 100644
index 0000000000000..74487567632c8
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md
new file mode 100644
index 0000000000000..86de5e5581f58
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go
new file mode 100644
index 0000000000000..50634dd84781a
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/decode.go
@@ -0,0 +1,1216 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// “not present,” unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+type NumberUnmarshalType int
+
+const (
+ // unmarshal a JSON number into an interface{} as a float64
+ UnmarshalFloat NumberUnmarshalType = iota
+ // unmarshal a JSON number into an interface{} as a `json.Number`
+ UnmarshalJSONNumber
+ // unmarshal a JSON number into an interface{} as a int64
+ // if value is an integer otherwise float64
+ UnmarshalIntOrFloat
+)
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ numberType NumberUnmarshalType
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64, int64 or a Number
+// depending on d.numberDecodeType.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ switch d.numberType {
+
+ case UnmarshalJSONNumber:
+ return Number(s), nil
+ case UnmarshalIntOrFloat:
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err == nil {
+ return v, nil
+ }
+
+ // tries to parse integer number in scientific notation
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+
+ // if it has no decimal value use int64
+ if fi, fd := math.Modf(f); fd == 0.0 {
+ return int64(fi), nil
+ }
+ return f, nil
+ default:
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+ }
+
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go
new file mode 100644
index 0000000000000..98de68ce1e9b6
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+//
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML