podman

Форк
0
3004 строки · 84.9 Кб
1
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
2
// Use of this source code is governed by a MIT license found in the LICENSE file.
3

4
package codec
5

6
// Contains code shared by both encode and decode.
7

8
// Some shared ideas around encoding/decoding
9
// ------------------------------------------
10
//
11
// If an interface{} is passed, we first do a type assertion to see if it is
12
// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
13
//
14
// If we start with a reflect.Value, we are already in reflect.Value land and
15
// will try to grab the function for the underlying Type and directly call that function.
16
// This is more performant than calling reflect.Value.Interface().
17
//
18
// This still helps us bypass many layers of reflection, and give best performance.
19
//
20
// Containers
21
// ------------
22
// Containers in the stream are either associative arrays (key-value pairs) or
23
// regular arrays (indexed by incrementing integers).
24
//
25
// Some streams support indefinite-length containers, and use a breaking
26
// byte-sequence to denote that the container has come to an end.
27
//
28
// Some streams also are text-based, and use explicit separators to denote the
29
// end/beginning of different values.
30
//
31
// Philosophy
32
// ------------
33
// On decode, this codec will update containers appropriately:
34
//    - If struct, update fields from stream into fields of struct.
35
//      If field in stream not found in struct, handle appropriately (based on option).
36
//      If a struct field has no corresponding value in the stream, leave it AS IS.
37
//      If nil in stream, set value to nil/zero value.
38
//    - If map, update map from stream.
39
//      If the stream value is NIL, set the map to nil.
40
//    - if slice, try to update up to length of array in stream.
41
//      if container len is less than stream array length,
42
//      and container cannot be expanded, handled (based on option).
43
//      This means you can decode 4-element stream array into 1-element array.
44
//
45
// ------------------------------------
46
// On encode, user can specify omitEmpty. This means that the value will be omitted
47
// if the zero value. The problem may occur during decode, where omitted values do not affect
48
// the value being decoded into. This means that if decoding into a struct with an
49
// int field with current value=5, and the field is omitted in the stream, then after
50
// decoding, the value will still be 5 (not 0).
51
// omitEmpty only works if you guarantee that you always decode into zero-values.
52
//
53
// ------------------------------------
54
// We could have truncated a map to remove keys not available in the stream,
55
// or set values in the struct which are not in the stream to their zero values.
56
// We decided against it because there is no efficient way to do it.
57
// We may introduce it as an option later.
58
// However, that will require enabling it for both runtime and code generation modes.
59
//
60
// To support truncate, we need to do 2 passes over the container:
61
//   map
62
//   - first collect all keys (e.g. in k1)
63
//   - for each key in stream, mark k1 that the key should not be removed
64
//   - after updating map, do second pass and call delete for all keys in k1 which are not marked
65
//   struct:
66
//   - for each field, track the *typeInfo s1
67
//   - iterate through all s1, and for each one not marked, set value to zero
68
//   - this involves checking the possible anonymous fields which are nil ptrs.
69
//     too much work.
70
//
71
// ------------------------------------------
72
// Error Handling is done within the library using panic.
73
//
74
// This way, the code doesn't have to keep checking if an error has happened,
75
// and we don't have to keep sending the error value along with each call
76
// or storing it in the En|Decoder and checking it constantly along the way.
77
//
78
// We considered storing the error is En|Decoder.
79
//   - once it has its err field set, it cannot be used again.
80
//   - panicing will be optional, controlled by const flag.
81
//   - code should always check error first and return early.
82
//
83
// We eventually decided against it as it makes the code clumsier to always
84
// check for these error conditions.
85
//
86
// ------------------------------------------
87
// We use sync.Pool only for the aid of long-lived objects shared across multiple goroutines.
88
// Encoder, Decoder, enc|decDriver, reader|writer, etc do not fall into this bucket.
89
//
90
// Also, GC is much better now, eliminating some of the reasons to use a shared pool structure.
91
// Instead, the short-lived objects use free-lists that live as long as the object exists.
92
//
93
// ------------------------------------------
94
// Performance is affected by the following:
95
//    - Bounds Checking
96
//    - Inlining
97
//    - Pointer chasing
98
// This package tries hard to manage the performance impact of these.
99
//
100
// ------------------------------------------
101
// To alleviate performance due to pointer-chasing:
102
//    - Prefer non-pointer values in a struct field
103
//    - Refer to these directly within helper classes
104
//      e.g. json.go refers directly to d.d.decRd
105
//
106
// We made the changes to embed En/Decoder in en/decDriver,
107
// but we had to explicitly reference the fields as opposed to using a function
108
// to get the better performance that we were looking for.
109
// For example, we explicitly call d.d.decRd.fn() instead of d.d.r().fn().
110
//
111
// ------------------------------------------
112
// Bounds Checking
113
//    - Allow bytesDecReader to incur "bounds check error", and recover that as an io error.
114
//      This allows the bounds check branch to always be taken by the branch predictor,
115
//      giving better performance (in theory), while ensuring that the code is shorter.
116
//
117
// ------------------------------------------
118
// Escape Analysis
119
//    - Prefer to return non-pointers if the value is used right away.
120
//      Newly allocated values returned as pointers will be heap-allocated as they escape.
121
//
122
// Prefer functions and methods that
123
//    - take no parameters and
124
//    - return no results and
125
//    - do not allocate.
126
// These are optimized by the runtime.
127
// For example, in json, we have dedicated functions for ReadMapElemKey, etc
128
// which do not delegate to readDelim, as readDelim takes a parameter.
129
// The difference in runtime was as much as 5%.
130
//
131
// ------------------------------------------
132
// Handling Nil
133
//   - In dynamic (reflection) mode, decodeValue and encodeValue handle nil at the top
134
//   - Consequently, methods used with them as a parent in the chain e.g. kXXX
135
//     methods do not handle nil.
136
//   - Fastpath methods also do not handle nil.
137
//     The switch called in (en|de)code(...) handles it so the dependent calls don't have to.
138
//   - codecgen will handle nil before calling into the library for further work also.
139
//
140
// ------------------------------------------
141
// Passing reflect.Kind to functions that take a reflect.Value
142
//   - Note that reflect.Value.Kind() is very cheap, as its fundamentally a binary AND of 2 numbers
143
//
144
// ------------------------------------------
145
// Transient values during decoding
146
//
147
// With reflection, the stack is not used. Consequently, values which may be stack-allocated in
148
// normal use will cause a heap allocation when using reflection.
149
//
150
// There are cases where we know that a value is transient, and we just need to decode into it
151
// temporarily so we can right away use its value for something else.
152
//
153
// In these situations, we can elide the heap allocation by being deliberate with use of a pre-cached
154
// scratch memory or scratch value.
155
//
156
// We use this for situations:
157
// - decode into a temp value x, and then set x into an interface
158
// - decode into a temp value, for use as a map key, to lookup up a map value
159
// - decode into a temp value, for use as a map value, to set into a map
160
// - decode into a temp value, for sending into a channel
161
//
162
// By definition, Transient values are NEVER pointer-shaped values,
163
// like pointer, func, map, chan. Using transient for pointer-shaped values
164
// can lead to data corruption when GC tries to follow what it saw as a pointer at one point.
165
//
166
// In general, transient values are values which can be decoded as an atomic value
167
// using a single call to the decDriver. This naturally includes bool or numeric types.
168
//
169
// Note that some values which "contain" pointers, specifically string and slice,
170
// can also be transient. In the case of string, it is decoded as an atomic value.
171
// In the case of a slice, decoding into its elements always uses an addressable
172
// value in memory ie we grow the slice, and then decode directly into the memory
173
// address corresponding to that index in the slice.
174
//
175
// To handle these string and slice values, we have to use a scratch value
176
// which has the same shape of a string or slice.
177
//
178
// Consequently, the full range of types which can be transient is:
179
// - numbers
180
// - bool
181
// - string
182
// - slice
183
//
184
// and whbut we MUST use a scratch space with that element
185
// being defined as an unsafe.Pointer to start with.
186
//
187
// We have to be careful with maps. Because we iterate map keys and values during a range,
188
// we must have 2 variants of the scratch space/value for maps and keys separately.
189
//
190
// These are the TransientAddrK and TransientAddr2K methods of decPerType.
191

192
import (
193
	"encoding"
194
	"encoding/binary"
195
	"errors"
196
	"fmt"
197
	"io"
198
	"math"
199
	"reflect"
200
	"runtime"
201
	"sort"
202
	"strconv"
203
	"strings"
204
	"sync"
205
	"sync/atomic"
206
	"time"
207
	"unicode/utf8"
208
)
209

210
// if debugging is true, then
211
//   - within Encode/Decode, do not recover from panic's
212
//   - etc
213
//
214
// Note: Negative tests that check for errors will fail, so only use this
215
// when debugging, and run only one test at a time preferably.
216
//
217
// Note: RPC tests depend on getting the error from an Encode/Decode call.
218
// Consequently, they will always fail if debugging = true.
219
const debugging = false
220

221
const (
222
	// containerLenUnknown is length returned from Read(Map|Array)Len
223
	// when a format doesn't know apiori.
224
	// For example, json doesn't pre-determine the length of a container (sequence/map).
225
	containerLenUnknown = -1
226

227
	// containerLenNil is length returned from Read(Map|Array)Len
228
	// when a 'nil' was encountered in the stream.
229
	containerLenNil = math.MinInt32
230

231
	// [N]byte is handled by converting to []byte first,
232
	// and sending to the dedicated fast-path function for []byte.
233
	//
234
	// Code exists in case our understanding is wrong.
235
	// keep the defensive code behind this flag, so we can remove/hide it if needed.
236
	// For now, we enable the defensive code (ie set it to true).
237
	handleBytesWithinKArray = true
238

239
	// Support encoding.(Binary|Text)(Unm|M)arshaler.
240
	// This constant flag will enable or disable it.
241
	supportMarshalInterfaces = true
242

243
	// bytesFreeListNoCache is used for debugging, when we want to skip using a cache of []byte.
244
	bytesFreeListNoCache = false
245

246
	// size of the cacheline: defaulting to value for archs: amd64, arm64, 386
247
	// should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
248
	cacheLineSize = 64
249

250
	wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
251
	wordSize     = wordSizeBits / 8
252

253
	// MARKER: determines whether to skip calling fastpath(En|De)codeTypeSwitch.
254
	// Calling the fastpath switch in encode() or decode() could be redundant,
255
	// as we still have to introspect it again within fnLoad
256
	// to determine the function to use for values of that type.
257
	skipFastpathTypeSwitchInDirectCall = false
258
)
259

260
const cpu32Bit = ^uint(0)>>32 == 0
261

262
type rkind byte
263

264
const (
265
	rkindPtr    = rkind(reflect.Ptr)
266
	rkindString = rkind(reflect.String)
267
	rkindChan   = rkind(reflect.Chan)
268
)
269

270
type mapKeyFastKind uint8
271

272
const (
273
	mapKeyFastKind32 = iota + 1
274
	mapKeyFastKind32ptr
275
	mapKeyFastKind64
276
	mapKeyFastKind64ptr
277
	mapKeyFastKindStr
278
)
279

280
var (
281
	// use a global mutex to ensure each Handle is initialized.
282
	// We do this, so we don't have to store the basicHandle mutex
283
	// directly in BasicHandle, so it can be shallow-copied.
284
	handleInitMu sync.Mutex
285

286
	must mustHdl
287
	halt panicHdl
288

289
	digitCharBitset      bitset256
290
	numCharBitset        bitset256
291
	whitespaceCharBitset bitset256
292
	asciiAlphaNumBitset  bitset256
293

294
	// numCharWithExpBitset64 bitset64
295
	// numCharNoExpBitset64   bitset64
296
	// whitespaceCharBitset64 bitset64
297
	//
298
	// // hasptrBitset sets bit for all kinds which always have internal pointers
299
	// hasptrBitset bitset32
300

301
	// refBitset sets bit for all kinds which are direct internal references
302
	refBitset bitset32
303

304
	// isnilBitset sets bit for all kinds which can be compared to nil
305
	isnilBitset bitset32
306

307
	// numBoolBitset sets bit for all number and bool kinds
308
	numBoolBitset bitset32
309

310
	// numBoolStrSliceBitset sets bits for all kinds which are numbers, bool, strings and slices
311
	numBoolStrSliceBitset bitset32
312

313
	// scalarBitset sets bit for all kinds which are scalars/primitives and thus immutable
314
	scalarBitset bitset32
315

316
	mapKeyFastKindVals [32]mapKeyFastKind
317

318
	// codecgen is set to true by codecgen, so that tests, etc can use this information as needed.
319
	codecgen bool
320

321
	oneByteArr    [1]byte
322
	zeroByteSlice = oneByteArr[:0:0]
323

324
	eofReader devNullReader
325
)
326

327
var (
328
	errMapTypeNotMapKind     = errors.New("MapType MUST be of Map Kind")
329
	errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
330

331
	errExtFnWriteExtUnsupported   = errors.New("BytesExt.WriteExt is not supported")
332
	errExtFnReadExtUnsupported    = errors.New("BytesExt.ReadExt is not supported")
333
	errExtFnConvertExtUnsupported = errors.New("InterfaceExt.ConvertExt is not supported")
334
	errExtFnUpdateExtUnsupported  = errors.New("InterfaceExt.UpdateExt is not supported")
335

336
	errPanicUndefined = errors.New("panic: undefined error")
337

338
	errHandleInited = errors.New("cannot modify initialized Handle")
339

340
	errNoFormatHandle = errors.New("no handle (cannot identify format)")
341
)
342

343
var pool4tiload = sync.Pool{
344
	New: func() interface{} {
345
		return &typeInfoLoad{
346
			etypes:   make([]uintptr, 0, 4),
347
			sfis:     make([]structFieldInfo, 0, 4),
348
			sfiNames: make(map[string]uint16, 4),
349
		}
350
	},
351
}
352

353
func init() {
354
	xx := func(f mapKeyFastKind, k ...reflect.Kind) {
355
		for _, v := range k {
356
			mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
357
		}
358
	}
359

360
	var f mapKeyFastKind
361

362
	f = mapKeyFastKind64
363
	if wordSizeBits == 32 {
364
		f = mapKeyFastKind32
365
	}
366
	xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
367

368
	f = mapKeyFastKind64ptr
369
	if wordSizeBits == 32 {
370
		f = mapKeyFastKind32ptr
371
	}
372
	xx(f, reflect.Ptr)
373

374
	xx(mapKeyFastKindStr, reflect.String)
375
	xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
376
	xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
377

378
	numBoolBitset.
379
		set(byte(reflect.Bool)).
380
		set(byte(reflect.Int)).
381
		set(byte(reflect.Int8)).
382
		set(byte(reflect.Int16)).
383
		set(byte(reflect.Int32)).
384
		set(byte(reflect.Int64)).
385
		set(byte(reflect.Uint)).
386
		set(byte(reflect.Uint8)).
387
		set(byte(reflect.Uint16)).
388
		set(byte(reflect.Uint32)).
389
		set(byte(reflect.Uint64)).
390
		set(byte(reflect.Uintptr)).
391
		set(byte(reflect.Float32)).
392
		set(byte(reflect.Float64)).
393
		set(byte(reflect.Complex64)).
394
		set(byte(reflect.Complex128))
395

396
	numBoolStrSliceBitset = numBoolBitset
397

398
	numBoolStrSliceBitset.
399
		set(byte(reflect.String)).
400
		set(byte(reflect.Slice))
401

402
	scalarBitset = numBoolBitset
403

404
	scalarBitset.
405
		set(byte(reflect.String))
406

407
	// MARKER: reflect.Array is not a scalar, as its contents can be modified.
408

409
	refBitset.
410
		set(byte(reflect.Map)).
411
		set(byte(reflect.Ptr)).
412
		set(byte(reflect.Func)).
413
		set(byte(reflect.Chan)).
414
		set(byte(reflect.UnsafePointer))
415

416
	isnilBitset = refBitset
417

418
	isnilBitset.
419
		set(byte(reflect.Interface)).
420
		set(byte(reflect.Slice))
421

422
	// hasptrBitset = isnilBitset
423
	//
424
	// hasptrBitset.
425
	// 	set(byte(reflect.String))
426

427
	for i := byte(0); i <= utf8.RuneSelf; i++ {
428
		if (i >= '0' && i <= '9') || (i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') {
429
			asciiAlphaNumBitset.set(i)
430
		}
431
		switch i {
432
		case ' ', '\t', '\r', '\n':
433
			whitespaceCharBitset.set(i)
434
		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
435
			digitCharBitset.set(i)
436
			numCharBitset.set(i)
437
		case '.', '+', '-':
438
			numCharBitset.set(i)
439
		case 'e', 'E':
440
			numCharBitset.set(i)
441
		}
442
	}
443
}
444

445
// driverStateManager supports the runtime state of an (enc|dec)Driver.
446
//
447
// During a side(En|De)code call, we can capture the state, reset it,
448
// and then restore it later to continue the primary encoding/decoding.
449
type driverStateManager interface {
450
	resetState()
451
	captureState() interface{}
452
	restoreState(state interface{})
453
}
454

455
type bdAndBdread struct {
456
	bdRead bool
457
	bd     byte
458
}
459

460
func (x bdAndBdread) captureState() interface{}   { return x }
461
func (x *bdAndBdread) resetState()                { x.bd, x.bdRead = 0, false }
462
func (x *bdAndBdread) reset()                     { x.resetState() }
463
func (x *bdAndBdread) restoreState(v interface{}) { *x = v.(bdAndBdread) }
464

465
type clsErr struct {
466
	err    error // error on closing
467
	closed bool  // is it closed?
468
}
469

470
type charEncoding uint8
471

472
const (
473
	_ charEncoding = iota // make 0 unset
474
	cUTF8
475
	cUTF16LE
476
	cUTF16BE
477
	cUTF32LE
478
	cUTF32BE
479
	// Deprecated: not a true char encoding value
480
	cRAW charEncoding = 255
481
)
482

483
// valueType is the stream type
484
type valueType uint8
485

486
const (
487
	valueTypeUnset valueType = iota
488
	valueTypeNil
489
	valueTypeInt
490
	valueTypeUint
491
	valueTypeFloat
492
	valueTypeBool
493
	valueTypeString
494
	valueTypeSymbol
495
	valueTypeBytes
496
	valueTypeMap
497
	valueTypeArray
498
	valueTypeTime
499
	valueTypeExt
500

501
	// valueTypeInvalid = 0xff
502
)
503

504
var valueTypeStrings = [...]string{
505
	"Unset",
506
	"Nil",
507
	"Int",
508
	"Uint",
509
	"Float",
510
	"Bool",
511
	"String",
512
	"Symbol",
513
	"Bytes",
514
	"Map",
515
	"Array",
516
	"Timestamp",
517
	"Ext",
518
}
519

520
func (x valueType) String() string {
521
	if int(x) < len(valueTypeStrings) {
522
		return valueTypeStrings[x]
523
	}
524
	return strconv.FormatInt(int64(x), 10)
525
}
526

527
// note that containerMapStart and containerArraySend are not sent.
528
// This is because the ReadXXXStart and EncodeXXXStart already does these.
529
type containerState uint8
530

531
const (
532
	_ containerState = iota
533

534
	containerMapStart
535
	containerMapKey
536
	containerMapValue
537
	containerMapEnd
538
	containerArrayStart
539
	containerArrayElem
540
	containerArrayEnd
541
)
542

543
// do not recurse if a containing type refers to an embedded type
544
// which refers back to its containing type (via a pointer).
545
// The second time this back-reference happens, break out,
546
// so as not to cause an infinite loop.
547
const rgetMaxRecursion = 2
548

549
// fauxUnion is used to keep track of the primitives decoded.
550
//
551
// Without it, we would have to decode each primitive and wrap it
552
// in an interface{}, causing an allocation.
553
// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
554
// so we can rest assured that no other decoding happens while these
555
// primitives are being decoded.
556
//
557
// maps and arrays are not handled by this mechanism.
558
type fauxUnion struct {
559
	// r RawExt // used for RawExt, uint, []byte.
560

561
	// primitives below
562
	u uint64
563
	i int64
564
	f float64
565
	l []byte
566
	s string
567

568
	// ---- cpu cache line boundary?
569
	t time.Time
570
	b bool
571

572
	// state
573
	v valueType
574
}
575

576
// typeInfoLoad is a transient object used while loading up a typeInfo.
577
type typeInfoLoad struct {
578
	etypes   []uintptr
579
	sfis     []structFieldInfo
580
	sfiNames map[string]uint16
581
}
582

583
func (x *typeInfoLoad) reset() {
584
	x.etypes = x.etypes[:0]
585
	x.sfis = x.sfis[:0]
586
	for k := range x.sfiNames { // optimized to zero the map
587
		delete(x.sfiNames, k)
588
	}
589
}
590

591
// mirror json.Marshaler and json.Unmarshaler here,
592
// so we don't import the encoding/json package
593

594
type jsonMarshaler interface {
595
	MarshalJSON() ([]byte, error)
596
}
597
type jsonUnmarshaler interface {
598
	UnmarshalJSON([]byte) error
599
}
600

601
type isZeroer interface {
602
	IsZero() bool
603
}
604

605
type isCodecEmptyer interface {
606
	IsCodecEmpty() bool
607
}
608

609
type codecError struct {
610
	err    error
611
	name   string
612
	pos    int
613
	encode bool
614
}
615

616
func (e *codecError) Cause() error {
617
	return e.err
618
}
619

620
func (e *codecError) Unwrap() error {
621
	return e.err
622
}
623

624
func (e *codecError) Error() string {
625
	if e.encode {
626
		return fmt.Sprintf("%s encode error: %v", e.name, e.err)
627
	}
628
	return fmt.Sprintf("%s decode error [pos %d]: %v", e.name, e.pos, e.err)
629
}
630

631
func wrapCodecErr(in error, name string, numbytesread int, encode bool) (out error) {
632
	x, ok := in.(*codecError)
633
	if ok && x.pos == numbytesread && x.name == name && x.encode == encode {
634
		return in
635
	}
636
	return &codecError{in, name, numbytesread, encode}
637
}
638

639
var (
640
	bigen bigenHelper
641

642
	bigenstd = binary.BigEndian
643

644
	structInfoFieldName = "_struct"
645

646
	mapStrIntfTyp  = reflect.TypeOf(map[string]interface{}(nil))
647
	mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
648
	intfSliceTyp   = reflect.TypeOf([]interface{}(nil))
649
	intfTyp        = intfSliceTyp.Elem()
650

651
	reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
652

653
	stringTyp     = reflect.TypeOf("")
654
	timeTyp       = reflect.TypeOf(time.Time{})
655
	rawExtTyp     = reflect.TypeOf(RawExt{})
656
	rawTyp        = reflect.TypeOf(Raw{})
657
	uintptrTyp    = reflect.TypeOf(uintptr(0))
658
	uint8Typ      = reflect.TypeOf(uint8(0))
659
	uint8SliceTyp = reflect.TypeOf([]uint8(nil))
660
	uintTyp       = reflect.TypeOf(uint(0))
661
	intTyp        = reflect.TypeOf(int(0))
662

663
	mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
664

665
	binaryMarshalerTyp   = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
666
	binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
667

668
	textMarshalerTyp   = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
669
	textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
670

671
	jsonMarshalerTyp   = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
672
	jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
673

674
	selferTyp                = reflect.TypeOf((*Selfer)(nil)).Elem()
675
	missingFielderTyp        = reflect.TypeOf((*MissingFielder)(nil)).Elem()
676
	iszeroTyp                = reflect.TypeOf((*isZeroer)(nil)).Elem()
677
	isCodecEmptyerTyp        = reflect.TypeOf((*isCodecEmptyer)(nil)).Elem()
678
	isSelferViaCodecgenerTyp = reflect.TypeOf((*isSelferViaCodecgener)(nil)).Elem()
679

680
	uint8TypId      = rt2id(uint8Typ)
681
	uint8SliceTypId = rt2id(uint8SliceTyp)
682
	rawExtTypId     = rt2id(rawExtTyp)
683
	rawTypId        = rt2id(rawTyp)
684
	intfTypId       = rt2id(intfTyp)
685
	timeTypId       = rt2id(timeTyp)
686
	stringTypId     = rt2id(stringTyp)
687

688
	mapStrIntfTypId  = rt2id(mapStrIntfTyp)
689
	mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
690
	intfSliceTypId   = rt2id(intfSliceTyp)
691
	// mapBySliceTypId  = rt2id(mapBySliceTyp)
692

693
	intBitsize  = uint8(intTyp.Bits())
694
	uintBitsize = uint8(uintTyp.Bits())
695

696
	// bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
697
	bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
698

699
	chkOvf checkOverflow
700
)
701

702
var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
703

704
// SelfExt is a sentinel extension signifying that types
705
// registered with it SHOULD be encoded and decoded
706
// based on the native mode of the format.
707
//
708
// This allows users to define a tag for an extension,
709
// but signify that the types should be encoded/decoded as the native encoding.
710
// This way, users need not also define how to encode or decode the extension.
711
var SelfExt = &extFailWrapper{}
712

713
// Selfer defines methods by which a value can encode or decode itself.
714
//
715
// Any type which implements Selfer will be able to encode or decode itself.
716
// Consequently, during (en|de)code, this takes precedence over
717
// (text|binary)(M|Unm)arshal or extension support.
718
//
719
// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself.
720
// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error.
721
// For example, the snippet below will cause such an error.
722
//
723
//	type testSelferRecur struct{}
724
//	func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) }
725
//	func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) }
726
//
727
// Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
728
// This is because, during each decode, we first check the the next set of bytes
729
// represent nil, and if so, we just set the value to nil.
730
type Selfer interface {
731
	CodecEncodeSelf(*Encoder)
732
	CodecDecodeSelf(*Decoder)
733
}
734

735
type isSelferViaCodecgener interface {
736
	codecSelferViaCodecgen()
737
}
738

739
// MissingFielder defines the interface allowing structs to internally decode or encode
740
// values which do not map to struct fields.
741
//
742
// We expect that this interface is bound to a pointer type (so the mutation function works).
743
//
744
// A use-case is if a version of a type unexports a field, but you want compatibility between
745
// both versions during encoding and decoding.
746
//
747
// Note that the interface is completely ignored during codecgen.
748
type MissingFielder interface {
749
	// CodecMissingField is called to set a missing field and value pair.
750
	//
751
	// It returns true if the missing field was set on the struct.
752
	CodecMissingField(field []byte, value interface{}) bool
753

754
	// CodecMissingFields returns the set of fields which are not struct fields.
755
	//
756
	// Note that the returned map may be mutated by the caller.
757
	CodecMissingFields() map[string]interface{}
758
}
759

760
// MapBySlice is a tag interface that denotes the slice or array value should encode as a map
761
// in the stream, and can be decoded from a map in the stream.
762
//
763
// The slice or array must contain a sequence of key-value pairs.
764
// The length of the slice or array must be even (fully divisible by 2).
765
//
766
// This affords storing a map in a specific sequence in the stream.
767
//
768
// Example usage:
769
//
770
//	type T1 []string         // or []int or []Point or any other "slice" type
771
//	func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
772
//	type T2 struct { KeyValues T1 }
773
//
774
//	var kvs = []string{"one", "1", "two", "2", "three", "3"}
775
//	var v2 = T2{ KeyValues: T1(kvs) }
776
//	// v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
777
//
778
// The support of MapBySlice affords the following:
779
//   - A slice or array type which implements MapBySlice will be encoded as a map
780
//   - A slice can be decoded from a map in the stream
781
type MapBySlice interface {
782
	MapBySlice()
783
}
784

785
// basicHandleRuntimeState holds onto all BasicHandle runtime and cached config information.
786
//
787
// Storing this outside BasicHandle allows us create shallow copies of a Handle,
788
// which can be used e.g. when we need to modify config fields temporarily.
789
// Shallow copies are used within tests, so we can modify some config fields for a test
790
// temporarily when running tests in parallel, without running the risk that a test executing
791
// in parallel with other tests does not see a transient modified values not meant for it.
792
type basicHandleRuntimeState struct {
793
	// these are used during runtime.
794
	// At init time, they should have nothing in them.
795
	rtidFns      atomicRtidFnSlice
796
	rtidFnsNoExt atomicRtidFnSlice
797

798
	// Note: basicHandleRuntimeState is not comparable, due to these slices here (extHandle, intf2impls).
799
	// If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
800
	// Thses slices are used all the time, so keep as slices (not pointers).
801

802
	extHandle
803

804
	intf2impls
805

806
	mu sync.Mutex
807

808
	jsonHandle   bool
809
	binaryHandle bool
810

811
	// timeBuiltin is initialized from TimeNotBuiltin, and used internally.
812
	// once initialized, it cannot be changed, as the function for encoding/decoding time.Time
813
	// will have been cached and the TimeNotBuiltin value will not be consulted thereafter.
814
	timeBuiltin bool
815
	_           bool // padding
816
}
817

818
// BasicHandle encapsulates the common options and extension functions.
819
//
820
// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
821
type BasicHandle struct {
822
	// BasicHandle is always a part of a different type.
823
	// It doesn't have to fit into it own cache lines.
824

825
	// TypeInfos is used to get the type info for any type.
826
	//
827
	// If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
828
	TypeInfos *TypeInfos
829

830
	*basicHandleRuntimeState
831

832
	// ---- cache line
833

834
	DecodeOptions
835

836
	// ---- cache line
837

838
	EncodeOptions
839

840
	RPCOptions
841

842
	// TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
843
	//
844
	// All Handlers should know how to encode/decode time.Time as part of the core
845
	// format specification, or as a standard extension defined by the format.
846
	//
847
	// However, users can elect to handle time.Time as a custom extension, or via the
848
	// standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface.
849
	// To elect this behavior, users can set TimeNotBuiltin=true.
850
	//
851
	// Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior
852
	// (for Cbor and Msgpack), where time.Time was not a builtin supported type.
853
	//
854
	// Note: DO NOT CHANGE AFTER FIRST USE.
855
	//
856
	// Once a Handle has been initialized (used), do not modify this option. It will be ignored.
857
	TimeNotBuiltin bool
858

859
	// ExplicitRelease is ignored and has no effect.
860
	//
861
	// Deprecated: Pools are only used for long-lived objects shared across goroutines.
862
	// It is maintained for backward compatibility.
863
	ExplicitRelease bool
864

865
	// ---- cache line
866
	inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc)
867

868
}
869

870
// initHandle does a one-time initialization of the handle.
871
// After this is run, do not modify the Handle, as some modifications are ignored
872
// e.g. extensions, registered interfaces, TimeNotBuiltIn, etc
873
func initHandle(hh Handle) {
874
	x := hh.getBasicHandle()
875

876
	// MARKER: We need to simulate once.Do, to ensure no data race within the block.
877
	// Consequently, below would not work.
878
	//
879
	// if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
880
	// 	x.be = hh.isBinary()
881
	// 	x.js = hh.isJson
882
	// 	x.n = hh.Name()[0]
883
	// }
884

885
	// simulate once.Do using our own stored flag and mutex as a CompareAndSwap
886
	// is not sufficient, since a race condition can occur within init(Handle) function.
887
	// init is made noinline, so that this function can be inlined by its caller.
888
	if atomic.LoadUint32(&x.inited) == 0 {
889
		x.initHandle(hh)
890
	}
891
}
892

893
func (x *BasicHandle) basicInit() {
894
	x.rtidFns.store(nil)
895
	x.rtidFnsNoExt.store(nil)
896
	x.timeBuiltin = !x.TimeNotBuiltin
897
}
898

899
func (x *BasicHandle) init() {}
900

901
func (x *BasicHandle) isInited() bool {
902
	return atomic.LoadUint32(&x.inited) != 0
903
}
904

905
// clearInited: DANGEROUS - only use in testing, etc
906
func (x *BasicHandle) clearInited() {
907
	atomic.StoreUint32(&x.inited, 0)
908
}
909

910
// TimeBuiltin returns whether time.Time OOTB support is used,
911
// based on the initial configuration of TimeNotBuiltin
912
func (x *basicHandleRuntimeState) TimeBuiltin() bool {
913
	return x.timeBuiltin
914
}
915

916
func (x *basicHandleRuntimeState) isJs() bool {
917
	return x.jsonHandle
918
}
919

920
func (x *basicHandleRuntimeState) isBe() bool {
921
	return x.binaryHandle
922
}
923

924
func (x *basicHandleRuntimeState) setExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
925
	rk := rt.Kind()
926
	for rk == reflect.Ptr {
927
		rt = rt.Elem()
928
		rk = rt.Kind()
929
	}
930

931
	if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
932
		return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
933
	}
934

935
	rtid := rt2id(rt)
936
	// handle all natively supported type appropriately, so they cannot have an extension.
937
	// However, we do not return an error for these, as we do not document that.
938
	// Instead, we silently treat as a no-op, and return.
939
	switch rtid {
940
	case rawTypId, rawExtTypId:
941
		return
942
	case timeTypId:
943
		if x.timeBuiltin {
944
			return
945
		}
946
	}
947

948
	for i := range x.extHandle {
949
		v := &x.extHandle[i]
950
		if v.rtid == rtid {
951
			v.tag, v.ext = tag, ext
952
			return
953
		}
954
	}
955
	rtidptr := rt2id(reflect.PtrTo(rt))
956
	x.extHandle = append(x.extHandle, extTypeTagFn{rtid, rtidptr, rt, tag, ext})
957
	return
958
}
959

960
// initHandle should be called only from codec.initHandle global function.
961
// make it uninlineable, as it is called at most once for each handle.
962
//
963
//go:noinline
964
func (x *BasicHandle) initHandle(hh Handle) {
965
	handleInitMu.Lock()
966
	defer handleInitMu.Unlock() // use defer, as halt may panic below
967
	if x.inited == 0 {
968
		if x.basicHandleRuntimeState == nil {
969
			x.basicHandleRuntimeState = new(basicHandleRuntimeState)
970
		}
971
		x.jsonHandle = hh.isJson()
972
		x.binaryHandle = hh.isBinary()
973
		// ensure MapType and SliceType are of correct type
974
		if x.MapType != nil && x.MapType.Kind() != reflect.Map {
975
			halt.onerror(errMapTypeNotMapKind)
976
		}
977
		if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
978
			halt.onerror(errSliceTypeNotSliceKind)
979
		}
980
		x.basicInit()
981
		hh.init()
982
		atomic.StoreUint32(&x.inited, 1)
983
	}
984
}
985

986
func (x *BasicHandle) getBasicHandle() *BasicHandle {
987
	return x
988
}
989

990
func (x *BasicHandle) typeInfos() *TypeInfos {
991
	if x.TypeInfos != nil {
992
		return x.TypeInfos
993
	}
994
	return defTypeInfos
995
}
996

997
func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
998
	return x.typeInfos().get(rtid, rt)
999
}
1000

1001
func findRtidFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
1002
	// binary search. adapted from sort/search.go.
1003
	// Note: we use goto (instead of for loop) so this can be inlined.
1004

1005
	// h, i, j := 0, 0, len(s)
1006
	var h uint // var h, i uint
1007
	var j = uint(len(s))
1008
LOOP:
1009
	if i < j {
1010
		h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
1011
		if s[h].rtid < rtid {
1012
			i = h + 1
1013
		} else {
1014
			j = h
1015
		}
1016
		goto LOOP
1017
	}
1018
	if i < uint(len(s)) && s[i].rtid == rtid {
1019
		fn = s[i].fn
1020
	}
1021
	return
1022
}
1023

1024
func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) {
1025
	return x.fnVia(rt, x.typeInfos(), &x.rtidFns, x.CheckCircularRef, true)
1026
}
1027

1028
func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) {
1029
	return x.fnVia(rt, x.typeInfos(), &x.rtidFnsNoExt, x.CheckCircularRef, false)
1030
}
1031

1032
func (x *basicHandleRuntimeState) fnVia(rt reflect.Type, tinfos *TypeInfos, fs *atomicRtidFnSlice, checkCircularRef, checkExt bool) (fn *codecFn) {
1033
	rtid := rt2id(rt)
1034
	sp := fs.load()
1035
	if sp != nil {
1036
		if _, fn = findRtidFn(sp, rtid); fn != nil {
1037
			return
1038
		}
1039
	}
1040

1041
	fn = x.fnLoad(rt, rtid, tinfos, checkCircularRef, checkExt)
1042
	x.mu.Lock()
1043
	sp = fs.load()
1044
	// since this is an atomic load/store, we MUST use a different array each time,
1045
	// else we have a data race when a store is happening simultaneously with a findRtidFn call.
1046
	if sp == nil {
1047
		sp = []codecRtidFn{{rtid, fn}}
1048
		fs.store(sp)
1049
	} else {
1050
		idx, fn2 := findRtidFn(sp, rtid)
1051
		if fn2 == nil {
1052
			sp2 := make([]codecRtidFn, len(sp)+1)
1053
			copy(sp2[idx+1:], sp[idx:])
1054
			copy(sp2, sp[:idx])
1055
			sp2[idx] = codecRtidFn{rtid, fn}
1056
			fs.store(sp2)
1057
		}
1058
	}
1059
	x.mu.Unlock()
1060
	return
1061
}
1062

1063
func fnloadFastpathUnderlying(ti *typeInfo) (f *fastpathE, u reflect.Type) {
1064
	var rtid uintptr
1065
	var idx int
1066
	rtid = rt2id(ti.fastpathUnderlying)
1067
	idx = fastpathAvIndex(rtid)
1068
	if idx == -1 {
1069
		return
1070
	}
1071
	f = &fastpathAv[idx]
1072
	if uint8(reflect.Array) == ti.kind {
1073
		u = reflectArrayOf(ti.rt.Len(), ti.elem)
1074
	} else {
1075
		u = f.rt
1076
	}
1077
	return
1078
}
1079

1080
func (x *basicHandleRuntimeState) fnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, checkCircularRef, checkExt bool) (fn *codecFn) {
1081
	fn = new(codecFn)
1082
	fi := &(fn.i)
1083
	ti := tinfos.get(rtid, rt)
1084
	fi.ti = ti
1085
	rk := reflect.Kind(ti.kind)
1086

1087
	// anything can be an extension except the built-in ones: time, raw and rawext.
1088
	// ensure we check for these types, then if extension, before checking if
1089
	// it implementes one of the pre-declared interfaces.
1090

1091
	fi.addrDf = true
1092
	// fi.addrEf = true
1093

1094
	if rtid == timeTypId && x.timeBuiltin {
1095
		fn.fe = (*Encoder).kTime
1096
		fn.fd = (*Decoder).kTime
1097
	} else if rtid == rawTypId {
1098
		fn.fe = (*Encoder).raw
1099
		fn.fd = (*Decoder).raw
1100
	} else if rtid == rawExtTypId {
1101
		fn.fe = (*Encoder).rawExt
1102
		fn.fd = (*Decoder).rawExt
1103
		fi.addrD = true
1104
		fi.addrE = true
1105
	} else if xfFn := x.getExt(rtid, checkExt); xfFn != nil {
1106
		fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
1107
		fn.fe = (*Encoder).ext
1108
		fn.fd = (*Decoder).ext
1109
		fi.addrD = true
1110
		if rk == reflect.Struct || rk == reflect.Array {
1111
			fi.addrE = true
1112
		}
1113
	} else if (ti.flagSelfer || ti.flagSelferPtr) &&
1114
		!(checkCircularRef && ti.flagSelferViaCodecgen && ti.kind == byte(reflect.Struct)) {
1115
		// do not use Selfer generated by codecgen if it is a struct and CheckCircularRef=true
1116
		fn.fe = (*Encoder).selferMarshal
1117
		fn.fd = (*Decoder).selferUnmarshal
1118
		fi.addrD = ti.flagSelferPtr
1119
		fi.addrE = ti.flagSelferPtr
1120
	} else if supportMarshalInterfaces && x.isBe() &&
1121
		(ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) &&
1122
		(ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) {
1123
		fn.fe = (*Encoder).binaryMarshal
1124
		fn.fd = (*Decoder).binaryUnmarshal
1125
		fi.addrD = ti.flagBinaryUnmarshalerPtr
1126
		fi.addrE = ti.flagBinaryMarshalerPtr
1127
	} else if supportMarshalInterfaces && !x.isBe() && x.isJs() &&
1128
		(ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) &&
1129
		(ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) {
1130
		//If JSON, we should check JSONMarshal before textMarshal
1131
		fn.fe = (*Encoder).jsonMarshal
1132
		fn.fd = (*Decoder).jsonUnmarshal
1133
		fi.addrD = ti.flagJsonUnmarshalerPtr
1134
		fi.addrE = ti.flagJsonMarshalerPtr
1135
	} else if supportMarshalInterfaces && !x.isBe() &&
1136
		(ti.flagTextMarshaler || ti.flagTextMarshalerPtr) &&
1137
		(ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) {
1138
		fn.fe = (*Encoder).textMarshal
1139
		fn.fd = (*Decoder).textUnmarshal
1140
		fi.addrD = ti.flagTextUnmarshalerPtr
1141
		fi.addrE = ti.flagTextMarshalerPtr
1142
	} else {
1143
		if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) {
1144
			// by default (without using unsafe),
1145
			// if an array is not addressable, converting from an array to a slice
1146
			// requires an allocation (see helper_not_unsafe.go: func rvGetSlice4Array).
1147
			//
1148
			// (Non-addressable arrays mostly occur as keys/values from a map).
1149
			//
1150
			// However, fastpath functions are mostly for slices of numbers or strings,
1151
			// which are small by definition and thus allocation should be fast/cheap in time.
1152
			//
1153
			// Consequently, the value of doing this quick allocation to elide the overhead cost of
1154
			// non-optimized (not-unsafe) reflection is a fair price.
1155
			var rtid2 uintptr
1156
			if !ti.flagHasPkgPath { // un-named type (slice or mpa or array)
1157
				rtid2 = rtid
1158
				if rk == reflect.Array {
1159
					rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem)
1160
				}
1161
				if idx := fastpathAvIndex(rtid2); idx != -1 {
1162
					fn.fe = fastpathAv[idx].encfn
1163
					fn.fd = fastpathAv[idx].decfn
1164
					fi.addrD = true
1165
					fi.addrDf = false
1166
					if rk == reflect.Array {
1167
						fi.addrD = false // decode directly into array value (slice made from it)
1168
					}
1169
				}
1170
			} else { // named type (with underlying type of map or slice or array)
1171
				// try to use mapping for underlying type
1172
				xfe, xrt := fnloadFastpathUnderlying(ti)
1173
				if xfe != nil {
1174
					xfnf := xfe.encfn
1175
					xfnf2 := xfe.decfn
1176
					if rk == reflect.Array {
1177
						fi.addrD = false // decode directly into array value (slice made from it)
1178
						fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
1179
							xfnf2(d, xf, rvConvert(xrv, xrt))
1180
						}
1181
					} else {
1182
						fi.addrD = true
1183
						fi.addrDf = false // meaning it can be an address(ptr) or a value
1184
						xptr2rt := reflect.PtrTo(xrt)
1185
						fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
1186
							if xrv.Kind() == reflect.Ptr {
1187
								xfnf2(d, xf, rvConvert(xrv, xptr2rt))
1188
							} else {
1189
								xfnf2(d, xf, rvConvert(xrv, xrt))
1190
							}
1191
						}
1192
					}
1193
					fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
1194
						xfnf(e, xf, rvConvert(xrv, xrt))
1195
					}
1196
				}
1197
			}
1198
		}
1199
		if fn.fe == nil && fn.fd == nil {
1200
			switch rk {
1201
			case reflect.Bool:
1202
				fn.fe = (*Encoder).kBool
1203
				fn.fd = (*Decoder).kBool
1204
			case reflect.String:
1205
				// Do not use different functions based on StringToRaw option, as that will statically
1206
				// set the function for a string type, and if the Handle is modified thereafter,
1207
				// behaviour is non-deterministic
1208
				// i.e. DO NOT DO:
1209
				//   if x.StringToRaw {
1210
				//   	fn.fe = (*Encoder).kStringToRaw
1211
				//   } else {
1212
				//   	fn.fe = (*Encoder).kStringEnc
1213
				//   }
1214

1215
				fn.fe = (*Encoder).kString
1216
				fn.fd = (*Decoder).kString
1217
			case reflect.Int:
1218
				fn.fd = (*Decoder).kInt
1219
				fn.fe = (*Encoder).kInt
1220
			case reflect.Int8:
1221
				fn.fe = (*Encoder).kInt8
1222
				fn.fd = (*Decoder).kInt8
1223
			case reflect.Int16:
1224
				fn.fe = (*Encoder).kInt16
1225
				fn.fd = (*Decoder).kInt16
1226
			case reflect.Int32:
1227
				fn.fe = (*Encoder).kInt32
1228
				fn.fd = (*Decoder).kInt32
1229
			case reflect.Int64:
1230
				fn.fe = (*Encoder).kInt64
1231
				fn.fd = (*Decoder).kInt64
1232
			case reflect.Uint:
1233
				fn.fd = (*Decoder).kUint
1234
				fn.fe = (*Encoder).kUint
1235
			case reflect.Uint8:
1236
				fn.fe = (*Encoder).kUint8
1237
				fn.fd = (*Decoder).kUint8
1238
			case reflect.Uint16:
1239
				fn.fe = (*Encoder).kUint16
1240
				fn.fd = (*Decoder).kUint16
1241
			case reflect.Uint32:
1242
				fn.fe = (*Encoder).kUint32
1243
				fn.fd = (*Decoder).kUint32
1244
			case reflect.Uint64:
1245
				fn.fe = (*Encoder).kUint64
1246
				fn.fd = (*Decoder).kUint64
1247
			case reflect.Uintptr:
1248
				fn.fe = (*Encoder).kUintptr
1249
				fn.fd = (*Decoder).kUintptr
1250
			case reflect.Float32:
1251
				fn.fe = (*Encoder).kFloat32
1252
				fn.fd = (*Decoder).kFloat32
1253
			case reflect.Float64:
1254
				fn.fe = (*Encoder).kFloat64
1255
				fn.fd = (*Decoder).kFloat64
1256
			case reflect.Complex64:
1257
				fn.fe = (*Encoder).kComplex64
1258
				fn.fd = (*Decoder).kComplex64
1259
			case reflect.Complex128:
1260
				fn.fe = (*Encoder).kComplex128
1261
				fn.fd = (*Decoder).kComplex128
1262
			case reflect.Chan:
1263
				fn.fe = (*Encoder).kChan
1264
				fn.fd = (*Decoder).kChan
1265
			case reflect.Slice:
1266
				fn.fe = (*Encoder).kSlice
1267
				fn.fd = (*Decoder).kSlice
1268
			case reflect.Array:
1269
				fi.addrD = false // decode directly into array value (slice made from it)
1270
				fn.fe = (*Encoder).kArray
1271
				fn.fd = (*Decoder).kArray
1272
			case reflect.Struct:
1273
				if ti.anyOmitEmpty ||
1274
					ti.flagMissingFielder ||
1275
					ti.flagMissingFielderPtr {
1276
					fn.fe = (*Encoder).kStruct
1277
				} else {
1278
					fn.fe = (*Encoder).kStructNoOmitempty
1279
				}
1280
				fn.fd = (*Decoder).kStruct
1281
			case reflect.Map:
1282
				fn.fe = (*Encoder).kMap
1283
				fn.fd = (*Decoder).kMap
1284
			case reflect.Interface:
1285
				// encode: reflect.Interface are handled already by preEncodeValue
1286
				fn.fd = (*Decoder).kInterface
1287
				fn.fe = (*Encoder).kErr
1288
			default:
1289
				// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
1290
				fn.fe = (*Encoder).kErr
1291
				fn.fd = (*Decoder).kErr
1292
			}
1293
		}
1294
	}
1295
	return
1296
}
1297

1298
// Handle defines a specific encoding format. It also stores any runtime state
1299
// used during an Encoding or Decoding session e.g. stored state about Types, etc.
1300
//
1301
// Once a handle is configured, it can be shared across multiple Encoders and Decoders.
1302
//
1303
// Note that a Handle is NOT safe for concurrent modification.
1304
//
1305
// A Handle also should not be modified after it is configured and has
1306
// been used at least once. This is because stored state may be out of sync with the
1307
// new configuration, and a data race can occur when multiple goroutines access it.
1308
// i.e. multiple Encoders or Decoders in different goroutines.
1309
//
1310
// Consequently, the typical usage model is that a Handle is pre-configured
1311
// before first time use, and not modified while in use.
1312
// Such a pre-configured Handle is safe for concurrent access.
1313
type Handle interface {
1314
	Name() string
1315
	getBasicHandle() *BasicHandle
1316
	newEncDriver() encDriver
1317
	newDecDriver() decDriver
1318
	isBinary() bool
1319
	isJson() bool // json is special for now, so track it
1320
	// desc describes the current byte descriptor, or returns "unknown[XXX]" if not understood.
1321
	desc(bd byte) string
1322
	// init initializes the handle based on handle-specific info (beyond what is in BasicHandle)
1323
	init()
1324
}
1325

1326
// Raw represents raw formatted bytes.
1327
// We "blindly" store it during encode and retrieve the raw bytes during decode.
1328
// Note: it is dangerous during encode, so we may gate the behaviour
1329
// behind an Encode flag which must be explicitly set.
1330
type Raw []byte
1331

1332
// RawExt represents raw unprocessed extension data.
1333
// Some codecs will decode extension data as a *RawExt
1334
// if there is no registered extension for the tag.
1335
//
1336
// Only one of Data or Value is nil.
1337
// If Data is nil, then the content of the RawExt is in the Value.
1338
type RawExt struct {
1339
	Tag uint64
1340
	// Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
1341
	// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
1342
	Data []byte
1343
	// Value represents the extension, if Data is nil.
1344
	// Value is used by codecs (e.g. cbor, json) which leverage the format to do
1345
	// custom serialization of the types.
1346
	Value interface{}
1347
}
1348

1349
func (re *RawExt) setData(xbs []byte, zerocopy bool) {
1350
	if zerocopy {
1351
		re.Data = xbs
1352
	} else {
1353
		re.Data = append(re.Data[:0], xbs...)
1354
	}
1355
}
1356

1357
// BytesExt handles custom (de)serialization of types to/from []byte.
1358
// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
1359
type BytesExt interface {
1360
	// WriteExt converts a value to a []byte.
1361
	//
1362
	// Note: v is a pointer iff the registered extension type is a struct or array kind.
1363
	WriteExt(v interface{}) []byte
1364

1365
	// ReadExt updates a value from a []byte.
1366
	//
1367
	// Note: dst is always a pointer kind to the registered extension type.
1368
	ReadExt(dst interface{}, src []byte)
1369
}
1370

1371
// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
1372
// The Encoder or Decoder will then handle the further (de)serialization of that known type.
1373
//
1374
// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
1375
type InterfaceExt interface {
1376
	// ConvertExt converts a value into a simpler interface for easy encoding
1377
	// e.g. convert time.Time to int64.
1378
	//
1379
	// Note: v is a pointer iff the registered extension type is a struct or array kind.
1380
	ConvertExt(v interface{}) interface{}
1381

1382
	// UpdateExt updates a value from a simpler interface for easy decoding
1383
	// e.g. convert int64 to time.Time.
1384
	//
1385
	// Note: dst is always a pointer kind to the registered extension type.
1386
	UpdateExt(dst interface{}, src interface{})
1387
}
1388

1389
// Ext handles custom (de)serialization of custom types / extensions.
1390
type Ext interface {
1391
	BytesExt
1392
	InterfaceExt
1393
}
1394

1395
// addExtWrapper is a wrapper implementation to support former AddExt exported method.
1396
type addExtWrapper struct {
1397
	encFn func(reflect.Value) ([]byte, error)
1398
	decFn func(reflect.Value, []byte) error
1399
}
1400

1401
func (x addExtWrapper) WriteExt(v interface{}) []byte {
1402
	bs, err := x.encFn(reflect.ValueOf(v))
1403
	halt.onerror(err)
1404
	return bs
1405
}
1406

1407
func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
1408
	halt.onerror(x.decFn(reflect.ValueOf(v), bs))
1409
}
1410

1411
func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
1412
	return x.WriteExt(v)
1413
}
1414

1415
func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
1416
	x.ReadExt(dest, v.([]byte))
1417
}
1418

1419
type bytesExtFailer struct{}
1420

1421
func (bytesExtFailer) WriteExt(v interface{}) []byte {
1422
	halt.onerror(errExtFnWriteExtUnsupported)
1423
	return nil
1424
}
1425
func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
1426
	halt.onerror(errExtFnReadExtUnsupported)
1427
}
1428

1429
type interfaceExtFailer struct{}
1430

1431
func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
1432
	halt.onerror(errExtFnConvertExtUnsupported)
1433
	return nil
1434
}
1435
func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
1436
	halt.onerror(errExtFnUpdateExtUnsupported)
1437
}
1438

1439
type bytesExtWrapper struct {
1440
	interfaceExtFailer
1441
	BytesExt
1442
}
1443

1444
type interfaceExtWrapper struct {
1445
	bytesExtFailer
1446
	InterfaceExt
1447
}
1448

1449
type extFailWrapper struct {
1450
	bytesExtFailer
1451
	interfaceExtFailer
1452
}
1453

1454
type binaryEncodingType struct{}
1455

1456
func (binaryEncodingType) isBinary() bool { return true }
1457
func (binaryEncodingType) isJson() bool   { return false }
1458

1459
type textEncodingType struct{}
1460

1461
func (textEncodingType) isBinary() bool { return false }
1462
func (textEncodingType) isJson() bool   { return false }
1463

1464
type notJsonType struct{}
1465

1466
func (notJsonType) isJson() bool { return false }
1467

1468
// noBuiltInTypes is embedded into many types which do not support builtins
1469
// e.g. msgpack, simple, cbor.
1470

1471
type noBuiltInTypes struct{}
1472

1473
func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
1474
func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
1475

1476
// bigenHelper handles ByteOrder operations directly using
1477
// arrays of bytes (not slice of bytes).
1478
//
1479
// Since byteorder operations are very common for encoding and decoding
1480
// numbers, lengths, etc - it is imperative that this operation is as
1481
// fast as possible. Removing indirection (pointer chasing) to look
1482
// at up to 8 bytes helps a lot here.
1483
//
1484
// For times where it is expedient to use a slice, delegate to the
1485
// bigenstd (equal to the binary.BigEndian value).
1486
//
1487
// retrofitted from stdlib: encoding/binary/BigEndian (ByteOrder)
1488
type bigenHelper struct{}
1489

1490
func (z bigenHelper) PutUint16(v uint16) (b [2]byte) {
1491
	return [...]byte{
1492
		byte(v >> 8),
1493
		byte(v),
1494
	}
1495
}
1496

1497
func (z bigenHelper) PutUint32(v uint32) (b [4]byte) {
1498
	return [...]byte{
1499
		byte(v >> 24),
1500
		byte(v >> 16),
1501
		byte(v >> 8),
1502
		byte(v),
1503
	}
1504
}
1505

1506
func (z bigenHelper) PutUint64(v uint64) (b [8]byte) {
1507
	return [...]byte{
1508
		byte(v >> 56),
1509
		byte(v >> 48),
1510
		byte(v >> 40),
1511
		byte(v >> 32),
1512
		byte(v >> 24),
1513
		byte(v >> 16),
1514
		byte(v >> 8),
1515
		byte(v),
1516
	}
1517
}
1518

1519
func (z bigenHelper) Uint16(b [2]byte) (v uint16) {
1520
	return uint16(b[1]) |
1521
		uint16(b[0])<<8
1522
}
1523

1524
func (z bigenHelper) Uint32(b [4]byte) (v uint32) {
1525
	return uint32(b[3]) |
1526
		uint32(b[2])<<8 |
1527
		uint32(b[1])<<16 |
1528
		uint32(b[0])<<24
1529
}
1530

1531
func (z bigenHelper) Uint64(b [8]byte) (v uint64) {
1532
	return uint64(b[7]) |
1533
		uint64(b[6])<<8 |
1534
		uint64(b[5])<<16 |
1535
		uint64(b[4])<<24 |
1536
		uint64(b[3])<<32 |
1537
		uint64(b[2])<<40 |
1538
		uint64(b[1])<<48 |
1539
		uint64(b[0])<<56
1540
}
1541

1542
func (z bigenHelper) writeUint16(w *encWr, v uint16) {
1543
	x := z.PutUint16(v)
1544
	w.writen2(x[0], x[1])
1545
}
1546

1547
func (z bigenHelper) writeUint32(w *encWr, v uint32) {
1548
	// w.writeb((z.PutUint32(v))[:])
1549
	// x := z.PutUint32(v)
1550
	// w.writeb(x[:])
1551
	// w.writen4(x[0], x[1], x[2], x[3])
1552
	w.writen4(z.PutUint32(v))
1553
}
1554

1555
func (z bigenHelper) writeUint64(w *encWr, v uint64) {
1556
	w.writen8(z.PutUint64(v))
1557
}
1558

1559
type extTypeTagFn struct {
1560
	rtid    uintptr
1561
	rtidptr uintptr
1562
	rt      reflect.Type
1563
	tag     uint64
1564
	ext     Ext
1565
}
1566

1567
type extHandle []extTypeTagFn
1568

1569
// AddExt registes an encode and decode function for a reflect.Type.
1570
// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
1571
//
1572
// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
1573
func (x *BasicHandle) AddExt(rt reflect.Type, tag byte,
1574
	encfn func(reflect.Value) ([]byte, error),
1575
	decfn func(reflect.Value, []byte) error) (err error) {
1576
	if encfn == nil || decfn == nil {
1577
		return x.SetExt(rt, uint64(tag), nil)
1578
	}
1579
	return x.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
1580
}
1581

1582
// SetExt will set the extension for a tag and reflect.Type.
1583
// Note that the type must be a named type, and specifically not a pointer or Interface.
1584
// An error is returned if that is not honored.
1585
// To Deregister an ext, call SetExt with nil Ext.
1586
//
1587
// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
1588
func (x *BasicHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
1589
	if x.isInited() {
1590
		return errHandleInited
1591
	}
1592
	if x.basicHandleRuntimeState == nil {
1593
		x.basicHandleRuntimeState = new(basicHandleRuntimeState)
1594
	}
1595
	return x.basicHandleRuntimeState.setExt(rt, tag, ext)
1596
}
1597

1598
func (o extHandle) getExtForI(x interface{}) (v *extTypeTagFn) {
1599
	if len(o) > 0 {
1600
		v = o.getExt(i2rtid(x), true)
1601
	}
1602
	return
1603
}
1604

1605
func (o extHandle) getExt(rtid uintptr, check bool) (v *extTypeTagFn) {
1606
	if !check {
1607
		return
1608
	}
1609
	for i := range o {
1610
		v = &o[i]
1611
		if v.rtid == rtid || v.rtidptr == rtid {
1612
			return
1613
		}
1614
	}
1615
	return nil
1616
}
1617

1618
func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
1619
	for i := range o {
1620
		v = &o[i]
1621
		if v.tag == tag {
1622
			return
1623
		}
1624
	}
1625
	return nil
1626
}
1627

1628
type intf2impl struct {
1629
	rtid uintptr // for intf
1630
	impl reflect.Type
1631
}
1632

1633
type intf2impls []intf2impl
1634

1635
// Intf2Impl maps an interface to an implementing type.
1636
// This allows us support infering the concrete type
1637
// and populating it when passed an interface.
1638
// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
1639
//
1640
// Passing a nil impl will clear the mapping.
1641
func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
1642
	if impl != nil && !impl.Implements(intf) {
1643
		return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
1644
	}
1645
	rtid := rt2id(intf)
1646
	o2 := *o
1647
	for i := range o2 {
1648
		v := &o2[i]
1649
		if v.rtid == rtid {
1650
			v.impl = impl
1651
			return
1652
		}
1653
	}
1654
	*o = append(o2, intf2impl{rtid, impl})
1655
	return
1656
}
1657

1658
func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
1659
	for i := range o {
1660
		v := &o[i]
1661
		if v.rtid == rtid {
1662
			if v.impl == nil {
1663
				return
1664
			}
1665
			vkind := v.impl.Kind()
1666
			if vkind == reflect.Ptr {
1667
				return reflect.New(v.impl.Elem())
1668
			}
1669
			return rvZeroAddrK(v.impl, vkind)
1670
		}
1671
	}
1672
	return
1673
}
1674

1675
// structFieldinfopathNode is a node in a tree, which allows us easily
1676
// walk the anonymous path.
1677
//
1678
// In the typical case, the node is not embedded/anonymous, and thus the parent
1679
// will be nil and this information becomes a value (not needing any indirection).
1680
type structFieldInfoPathNode struct {
1681
	parent *structFieldInfoPathNode
1682

1683
	offset   uint16
1684
	index    uint16
1685
	kind     uint8
1686
	numderef uint8
1687

1688
	// encNameAsciiAlphaNum and omitEmpty should be in structFieldInfo,
1689
	// but are kept here for tighter packaging.
1690

1691
	encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
1692
	omitEmpty            bool
1693

1694
	typ reflect.Type
1695
}
1696

1697
// depth returns number of valid nodes in the hierachy
1698
func (path *structFieldInfoPathNode) depth() (d int) {
1699
TOP:
1700
	if path != nil {
1701
		d++
1702
		path = path.parent
1703
		goto TOP
1704
	}
1705
	return
1706
}
1707

1708
// field returns the field of the struct.
1709
func (path *structFieldInfoPathNode) field(v reflect.Value) (rv2 reflect.Value) {
1710
	if parent := path.parent; parent != nil {
1711
		v = parent.field(v)
1712
		for j, k := uint8(0), parent.numderef; j < k; j++ {
1713
			if rvIsNil(v) {
1714
				return
1715
			}
1716
			v = v.Elem()
1717
		}
1718
	}
1719
	return path.rvField(v)
1720
}
1721

1722
// fieldAlloc returns the field of the struct.
1723
// It allocates if a nil value was seen while searching.
1724
func (path *structFieldInfoPathNode) fieldAlloc(v reflect.Value) (rv2 reflect.Value) {
1725
	if parent := path.parent; parent != nil {
1726
		v = parent.fieldAlloc(v)
1727
		for j, k := uint8(0), parent.numderef; j < k; j++ {
1728
			if rvIsNil(v) {
1729
				rvSetDirect(v, reflect.New(v.Type().Elem()))
1730
			}
1731
			v = v.Elem()
1732
		}
1733
	}
1734
	return path.rvField(v)
1735
}
1736

1737
type structFieldInfo struct {
1738
	encName string // encode name
1739

1740
	// encNameHash uintptr
1741

1742
	// fieldName string // currently unused
1743

1744
	// encNameAsciiAlphaNum and omitEmpty should be here,
1745
	// but are stored in structFieldInfoPathNode for tighter packaging.
1746

1747
	path structFieldInfoPathNode
1748
}
1749

1750
func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
1751
	keytype = valueTypeString // default
1752
	if stag == "" {
1753
		return
1754
	}
1755
	ss := strings.Split(stag, ",")
1756
	if len(ss) < 2 {
1757
		return
1758
	}
1759
	for _, s := range ss[1:] {
1760
		switch s {
1761
		case "omitempty":
1762
			omitEmpty = true
1763
		case "toarray":
1764
			toArray = true
1765
		case "int":
1766
			keytype = valueTypeInt
1767
		case "uint":
1768
			keytype = valueTypeUint
1769
		case "float":
1770
			keytype = valueTypeFloat
1771
			// case "bool":
1772
			// 	keytype = valueTypeBool
1773
		case "string":
1774
			keytype = valueTypeString
1775
		}
1776
	}
1777
	return
1778
}
1779

1780
func (si *structFieldInfo) parseTag(stag string) {
1781
	if stag == "" {
1782
		return
1783
	}
1784
	for i, s := range strings.Split(stag, ",") {
1785
		if i == 0 {
1786
			if s != "" {
1787
				si.encName = s
1788
			}
1789
		} else {
1790
			switch s {
1791
			case "omitempty":
1792
				si.path.omitEmpty = true
1793
			}
1794
		}
1795
	}
1796
}
1797

1798
type sfiSortedByEncName []*structFieldInfo
1799

1800
func (p sfiSortedByEncName) Len() int           { return len(p) }
1801
func (p sfiSortedByEncName) Swap(i, j int)      { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
1802
func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName }
1803

1804
// typeInfo4Container holds information that is only available for
1805
// containers like map, array, chan, slice.
1806
type typeInfo4Container struct {
1807
	elem reflect.Type
1808
	// key is:
1809
	//   - if map kind: map key
1810
	//   - if array kind: sliceOf(elem)
1811
	//   - if chan kind: sliceof(elem)
1812
	key reflect.Type
1813

1814
	// fastpathUnderlying is underlying type of a named slice/map/array, as defined by go spec,
1815
	// that is used by fastpath where we defined fastpath functions for the underlying type.
1816
	//
1817
	// for a map, it's a map; for a slice or array, it's a slice; else its nil.
1818
	fastpathUnderlying reflect.Type
1819

1820
	tikey  *typeInfo
1821
	tielem *typeInfo
1822
}
1823

1824
// typeInfo keeps static (non-changing readonly)information
1825
// about each (non-ptr) type referenced in the encode/decode sequence.
1826
//
1827
// During an encode/decode sequence, we work as below:
1828
//   - If base is a built in type, en/decode base value
1829
//   - If base is registered as an extension, en/decode base value
1830
//   - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
1831
//   - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
1832
//   - Else decode appropriately based on the reflect.Kind
1833
type typeInfo struct {
1834
	rt  reflect.Type
1835
	ptr reflect.Type
1836

1837
	// pkgpath string
1838

1839
	rtid uintptr
1840

1841
	numMeth uint16 // number of methods
1842
	kind    uint8
1843
	chandir uint8
1844

1845
	anyOmitEmpty bool      // true if a struct, and any of the fields are tagged "omitempty"
1846
	toArray      bool      // whether this (struct) type should be encoded as an array
1847
	keyType      valueType // if struct, how is the field name stored in a stream? default is string
1848
	mbs          bool      // base type (T or *T) is a MapBySlice
1849

1850
	sfi4Name map[string]*structFieldInfo // map. used for finding sfi given a name
1851

1852
	*typeInfo4Container
1853

1854
	// ---- cpu cache line boundary?
1855

1856
	size, keysize, elemsize uint32
1857

1858
	keykind, elemkind uint8
1859

1860
	flagHasPkgPath   bool // Type.PackagePath != ""
1861
	flagComparable   bool
1862
	flagCanTransient bool
1863

1864
	flagMarshalInterface  bool // does this have custom (un)marshal implementation?
1865
	flagSelferViaCodecgen bool
1866

1867
	// custom implementation flags
1868
	flagIsZeroer    bool
1869
	flagIsZeroerPtr bool
1870

1871
	flagIsCodecEmptyer    bool
1872
	flagIsCodecEmptyerPtr bool
1873

1874
	flagBinaryMarshaler    bool
1875
	flagBinaryMarshalerPtr bool
1876

1877
	flagBinaryUnmarshaler    bool
1878
	flagBinaryUnmarshalerPtr bool
1879

1880
	flagTextMarshaler    bool
1881
	flagTextMarshalerPtr bool
1882

1883
	flagTextUnmarshaler    bool
1884
	flagTextUnmarshalerPtr bool
1885

1886
	flagJsonMarshaler    bool
1887
	flagJsonMarshalerPtr bool
1888

1889
	flagJsonUnmarshaler    bool
1890
	flagJsonUnmarshalerPtr bool
1891

1892
	flagSelfer    bool
1893
	flagSelferPtr bool
1894

1895
	flagMissingFielder    bool
1896
	flagMissingFielderPtr bool
1897

1898
	infoFieldOmitempty bool
1899

1900
	sfi structFieldInfos
1901
}
1902

1903
func (ti *typeInfo) siForEncName(name []byte) (si *structFieldInfo) {
1904
	return ti.sfi4Name[string(name)]
1905
}
1906

1907
func (ti *typeInfo) resolve(x []structFieldInfo, ss map[string]uint16) (n int) {
1908
	n = len(x)
1909

1910
	for i := range x {
1911
		ui := uint16(i)
1912
		xn := x[i].encName
1913
		j, ok := ss[xn]
1914
		if ok {
1915
			i2clear := ui                              // index to be cleared
1916
			if x[i].path.depth() < x[j].path.depth() { // this one is shallower
1917
				ss[xn] = ui
1918
				i2clear = j
1919
			}
1920
			if x[i2clear].encName != "" {
1921
				x[i2clear].encName = ""
1922
				n--
1923
			}
1924
		} else {
1925
			ss[xn] = ui
1926
		}
1927
	}
1928

1929
	return
1930
}
1931

1932
func (ti *typeInfo) init(x []structFieldInfo, n int) {
1933
	var anyOmitEmpty bool
1934

1935
	// remove all the nils (non-ready)
1936
	m := make(map[string]*structFieldInfo, n)
1937
	w := make([]structFieldInfo, n)
1938
	y := make([]*structFieldInfo, n+n)
1939
	z := y[n:]
1940
	y = y[:n]
1941
	n = 0
1942
	for i := range x {
1943
		if x[i].encName == "" {
1944
			continue
1945
		}
1946
		if !anyOmitEmpty && x[i].path.omitEmpty {
1947
			anyOmitEmpty = true
1948
		}
1949
		w[n] = x[i]
1950
		y[n] = &w[n]
1951
		m[x[i].encName] = &w[n]
1952
		n++
1953
	}
1954
	if n != len(y) {
1955
		halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", ti.rt, len(y), len(x), n)
1956
	}
1957

1958
	copy(z, y)
1959
	sort.Sort(sfiSortedByEncName(z))
1960

1961
	ti.anyOmitEmpty = anyOmitEmpty
1962
	ti.sfi.load(y, z)
1963
	ti.sfi4Name = m
1964
}
1965

1966
// Handling flagCanTransient
1967
//
1968
// We support transient optimization if the kind of the type is
1969
// a number, bool, string, or slice (of number/bool).
1970
// In addition, we also support if the kind is struct or array,
1971
// and the type does not contain any pointers recursively).
1972
//
1973
// Noteworthy that all reference types (string, slice, func, map, ptr, interface, etc) have pointers.
1974
//
1975
// If using transient for a type with a pointer, there is the potential for data corruption
1976
// when GC tries to follow a "transient" pointer which may become a non-pointer soon after.
1977
//
1978

1979
func transientBitsetFlags() *bitset32 {
1980
	if transientValueHasStringSlice {
1981
		return &numBoolStrSliceBitset
1982
	}
1983
	return &numBoolBitset
1984
}
1985

1986
func isCanTransient(t reflect.Type, k reflect.Kind) (v bool) {
1987
	var bs = transientBitsetFlags()
1988
	if bs.isset(byte(k)) {
1989
		v = true
1990
	} else if k == reflect.Slice {
1991
		elem := t.Elem()
1992
		v = numBoolBitset.isset(byte(elem.Kind()))
1993
	} else if k == reflect.Array {
1994
		elem := t.Elem()
1995
		v = isCanTransient(elem, elem.Kind())
1996
	} else if k == reflect.Struct {
1997
		v = true
1998
		for j, jlen := 0, t.NumField(); j < jlen; j++ {
1999
			f := t.Field(j)
2000
			if !isCanTransient(f.Type, f.Type.Kind()) {
2001
				v = false
2002
				return
2003
			}
2004
		}
2005
	} else {
2006
		v = false
2007
	}
2008
	return
2009
}
2010

2011
func (ti *typeInfo) doSetFlagCanTransient() {
2012
	if transientSizeMax > 0 {
2013
		ti.flagCanTransient = ti.size <= transientSizeMax
2014
	} else {
2015
		ti.flagCanTransient = true
2016
	}
2017
	if ti.flagCanTransient {
2018
		if !transientBitsetFlags().isset(ti.kind) {
2019
			ti.flagCanTransient = isCanTransient(ti.rt, reflect.Kind(ti.kind))
2020
		}
2021
	}
2022
}
2023

2024
type rtid2ti struct {
2025
	rtid uintptr
2026
	ti   *typeInfo
2027
}
2028

2029
// TypeInfos caches typeInfo for each type on first inspection.
2030
//
2031
// It is configured with a set of tag keys, which are used to get
2032
// configuration for the type.
2033
type TypeInfos struct {
2034
	infos atomicTypeInfoSlice
2035
	mu    sync.Mutex
2036
	_     uint64 // padding (cache-aligned)
2037
	tags  []string
2038
	_     uint64 // padding (cache-aligned)
2039
}
2040

2041
// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
2042
//
2043
// This allows users customize the struct tag keys which contain configuration
2044
// of their types.
2045
func NewTypeInfos(tags []string) *TypeInfos {
2046
	return &TypeInfos{tags: tags}
2047
}
2048

2049
func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
2050
	// check for tags: codec, json, in that order.
2051
	// this allows seamless support for many configured structs.
2052
	for _, x := range x.tags {
2053
		s = t.Get(x)
2054
		if s != "" {
2055
			return s
2056
		}
2057
	}
2058
	return
2059
}
2060

2061
func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
2062
	// binary search. adapted from sort/search.go.
2063
	// Note: we use goto (instead of for loop) so this can be inlined.
2064

2065
	var h uint
2066
	var j = uint(len(s))
2067
LOOP:
2068
	if i < j {
2069
		h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
2070
		if s[h].rtid < rtid {
2071
			i = h + 1
2072
		} else {
2073
			j = h
2074
		}
2075
		goto LOOP
2076
	}
2077
	if i < uint(len(s)) && s[i].rtid == rtid {
2078
		ti = s[i].ti
2079
	}
2080
	return
2081
}
2082

2083
func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
2084
	if pti = x.find(rtid); pti == nil {
2085
		pti = x.load(rt)
2086
	}
2087
	return
2088
}
2089

2090
func (x *TypeInfos) find(rtid uintptr) (pti *typeInfo) {
2091
	sp := x.infos.load()
2092
	if sp != nil {
2093
		_, pti = findTypeInfo(sp, rtid)
2094
	}
2095
	return
2096
}
2097

2098
func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) {
2099
	rk := rt.Kind()
2100

2101
	if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
2102
		halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
2103
	}
2104

2105
	rtid := rt2id(rt)
2106

2107
	// do not hold lock while computing this.
2108
	// it may lead to duplication, but that's ok.
2109
	ti := typeInfo{
2110
		rt:      rt,
2111
		ptr:     reflect.PtrTo(rt),
2112
		rtid:    rtid,
2113
		kind:    uint8(rk),
2114
		size:    uint32(rt.Size()),
2115
		numMeth: uint16(rt.NumMethod()),
2116
		keyType: valueTypeString, // default it - so it's never 0
2117

2118
		// pkgpath: rt.PkgPath(),
2119
		flagHasPkgPath: rt.PkgPath() != "",
2120
	}
2121

2122
	// bset sets custom implementation flags
2123
	bset := func(when bool, b *bool) {
2124
		if when {
2125
			*b = true
2126
		}
2127
	}
2128

2129
	var b1, b2 bool
2130

2131
	b1, b2 = implIntf(rt, binaryMarshalerTyp)
2132
	bset(b1, &ti.flagBinaryMarshaler)
2133
	bset(b2, &ti.flagBinaryMarshalerPtr)
2134
	b1, b2 = implIntf(rt, binaryUnmarshalerTyp)
2135
	bset(b1, &ti.flagBinaryUnmarshaler)
2136
	bset(b2, &ti.flagBinaryUnmarshalerPtr)
2137
	b1, b2 = implIntf(rt, textMarshalerTyp)
2138
	bset(b1, &ti.flagTextMarshaler)
2139
	bset(b2, &ti.flagTextMarshalerPtr)
2140
	b1, b2 = implIntf(rt, textUnmarshalerTyp)
2141
	bset(b1, &ti.flagTextUnmarshaler)
2142
	bset(b2, &ti.flagTextUnmarshalerPtr)
2143
	b1, b2 = implIntf(rt, jsonMarshalerTyp)
2144
	bset(b1, &ti.flagJsonMarshaler)
2145
	bset(b2, &ti.flagJsonMarshalerPtr)
2146
	b1, b2 = implIntf(rt, jsonUnmarshalerTyp)
2147
	bset(b1, &ti.flagJsonUnmarshaler)
2148
	bset(b2, &ti.flagJsonUnmarshalerPtr)
2149
	b1, b2 = implIntf(rt, selferTyp)
2150
	bset(b1, &ti.flagSelfer)
2151
	bset(b2, &ti.flagSelferPtr)
2152
	b1, b2 = implIntf(rt, missingFielderTyp)
2153
	bset(b1, &ti.flagMissingFielder)
2154
	bset(b2, &ti.flagMissingFielderPtr)
2155
	b1, b2 = implIntf(rt, iszeroTyp)
2156
	bset(b1, &ti.flagIsZeroer)
2157
	bset(b2, &ti.flagIsZeroerPtr)
2158
	b1, b2 = implIntf(rt, isCodecEmptyerTyp)
2159
	bset(b1, &ti.flagIsCodecEmptyer)
2160
	bset(b2, &ti.flagIsCodecEmptyerPtr)
2161

2162
	b1, b2 = implIntf(rt, isSelferViaCodecgenerTyp)
2163
	ti.flagSelferViaCodecgen = b1 || b2
2164

2165
	ti.flagMarshalInterface = ti.flagSelfer || ti.flagSelferPtr ||
2166
		ti.flagSelferViaCodecgen ||
2167
		ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr ||
2168
		ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr ||
2169
		ti.flagTextMarshaler || ti.flagTextMarshalerPtr ||
2170
		ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr ||
2171
		ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr ||
2172
		ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr
2173

2174
	b1 = rt.Comparable()
2175
	// bset(b1, &ti.flagComparable)
2176
	ti.flagComparable = b1
2177

2178
	ti.doSetFlagCanTransient()
2179

2180
	var tt reflect.Type
2181
	switch rk {
2182
	case reflect.Struct:
2183
		var omitEmpty bool
2184
		if f, ok := rt.FieldByName(structInfoFieldName); ok {
2185
			ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
2186
			ti.infoFieldOmitempty = omitEmpty
2187
		} else {
2188
			ti.keyType = valueTypeString
2189
		}
2190
		pp, pi := &pool4tiload, pool4tiload.Get()
2191
		pv := pi.(*typeInfoLoad)
2192
		pv.reset()
2193
		pv.etypes = append(pv.etypes, ti.rtid)
2194
		x.rget(rt, rtid, nil, pv, omitEmpty)
2195
		n := ti.resolve(pv.sfis, pv.sfiNames)
2196
		ti.init(pv.sfis, n)
2197
		pp.Put(pi)
2198
	case reflect.Map:
2199
		ti.typeInfo4Container = new(typeInfo4Container)
2200
		ti.elem = rt.Elem()
2201
		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
2202
		}
2203
		ti.tielem = x.get(rt2id(tt), tt)
2204
		ti.elemkind = uint8(ti.elem.Kind())
2205
		ti.elemsize = uint32(ti.elem.Size())
2206
		ti.key = rt.Key()
2207
		for tt = ti.key; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
2208
		}
2209
		ti.tikey = x.get(rt2id(tt), tt)
2210
		ti.keykind = uint8(ti.key.Kind())
2211
		ti.keysize = uint32(ti.key.Size())
2212
		if ti.flagHasPkgPath {
2213
			ti.fastpathUnderlying = reflect.MapOf(ti.key, ti.elem)
2214
		}
2215
	case reflect.Slice:
2216
		ti.typeInfo4Container = new(typeInfo4Container)
2217
		ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
2218
		if !ti.mbs && b2 {
2219
			ti.mbs = b2
2220
		}
2221
		ti.elem = rt.Elem()
2222
		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
2223
		}
2224
		ti.tielem = x.get(rt2id(tt), tt)
2225
		ti.elemkind = uint8(ti.elem.Kind())
2226
		ti.elemsize = uint32(ti.elem.Size())
2227
		if ti.flagHasPkgPath {
2228
			ti.fastpathUnderlying = reflect.SliceOf(ti.elem)
2229
		}
2230
	case reflect.Chan:
2231
		ti.typeInfo4Container = new(typeInfo4Container)
2232
		ti.elem = rt.Elem()
2233
		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
2234
		}
2235
		ti.tielem = x.get(rt2id(tt), tt)
2236
		ti.elemkind = uint8(ti.elem.Kind())
2237
		ti.elemsize = uint32(ti.elem.Size())
2238
		ti.chandir = uint8(rt.ChanDir())
2239
		ti.key = reflect.SliceOf(ti.elem)
2240
		ti.keykind = uint8(reflect.Slice)
2241
	case reflect.Array:
2242
		ti.typeInfo4Container = new(typeInfo4Container)
2243
		ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
2244
		if !ti.mbs && b2 {
2245
			ti.mbs = b2
2246
		}
2247
		ti.elem = rt.Elem()
2248
		ti.elemkind = uint8(ti.elem.Kind())
2249
		ti.elemsize = uint32(ti.elem.Size())
2250
		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
2251
		}
2252
		ti.tielem = x.get(rt2id(tt), tt)
2253
		ti.key = reflect.SliceOf(ti.elem)
2254
		ti.keykind = uint8(reflect.Slice)
2255
		ti.keysize = uint32(ti.key.Size())
2256
		if ti.flagHasPkgPath {
2257
			ti.fastpathUnderlying = ti.key
2258
		}
2259

2260
		// MARKER: reflect.Ptr cannot happen here, as we halt early if reflect.Ptr passed in
2261
		// case reflect.Ptr:
2262
		// 	ti.elem = rt.Elem()
2263
		// 	ti.elemkind = uint8(ti.elem.Kind())
2264
		// 	ti.elemsize = uint32(ti.elem.Size())
2265
	}
2266

2267
	x.mu.Lock()
2268
	sp := x.infos.load()
2269
	// since this is an atomic load/store, we MUST use a different array each time,
2270
	// else we have a data race when a store is happening simultaneously with a findRtidFn call.
2271
	if sp == nil {
2272
		pti = &ti
2273
		sp = []rtid2ti{{rtid, pti}}
2274
		x.infos.store(sp)
2275
	} else {
2276
		var idx uint
2277
		idx, pti = findTypeInfo(sp, rtid)
2278
		if pti == nil {
2279
			pti = &ti
2280
			sp2 := make([]rtid2ti, len(sp)+1)
2281
			copy(sp2[idx+1:], sp[idx:])
2282
			copy(sp2, sp[:idx])
2283
			sp2[idx] = rtid2ti{rtid, pti}
2284
			x.infos.store(sp2)
2285
		}
2286
	}
2287
	x.mu.Unlock()
2288
	return
2289
}
2290

2291
func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
2292
	path *structFieldInfoPathNode, pv *typeInfoLoad, omitEmpty bool) {
2293
	// Read up fields and store how to access the value.
2294
	//
2295
	// It uses go's rules for message selectors,
2296
	// which say that the field with the shallowest depth is selected.
2297
	//
2298
	// Note: we consciously use slices, not a map, to simulate a set.
2299
	//       Typically, types have < 16 fields,
2300
	//       and iteration using equals is faster than maps there
2301
	flen := rt.NumField()
2302
LOOP:
2303
	for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
2304
		f := rt.Field(int(j))
2305
		fkind := f.Type.Kind()
2306

2307
		// skip if a func type, or is unexported, or structTag value == "-"
2308
		switch fkind {
2309
		case reflect.Func, reflect.UnsafePointer:
2310
			continue LOOP
2311
		}
2312

2313
		isUnexported := f.PkgPath != ""
2314
		if isUnexported && !f.Anonymous {
2315
			continue
2316
		}
2317
		stag := x.structTag(f.Tag)
2318
		if stag == "-" {
2319
			continue
2320
		}
2321
		var si structFieldInfo
2322

2323
		var numderef uint8 = 0
2324
		for xft := f.Type; xft.Kind() == reflect.Ptr; xft = xft.Elem() {
2325
			numderef++
2326
		}
2327

2328
		var parsed bool
2329
		// if anonymous and no struct tag (or it's blank),
2330
		// and a struct (or pointer to struct), inline it.
2331
		if f.Anonymous && fkind != reflect.Interface {
2332
			// ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
2333
			ft := f.Type
2334
			isPtr := ft.Kind() == reflect.Ptr
2335
			for ft.Kind() == reflect.Ptr {
2336
				ft = ft.Elem()
2337
			}
2338
			isStruct := ft.Kind() == reflect.Struct
2339

2340
			// Ignore embedded fields of unexported non-struct types.
2341
			// Also, from go1.10, ignore pointers to unexported struct types
2342
			// because unmarshal cannot assign a new struct to an unexported field.
2343
			// See https://golang.org/issue/21357
2344
			if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
2345
				continue
2346
			}
2347
			doInline := stag == ""
2348
			if !doInline {
2349
				si.parseTag(stag)
2350
				parsed = true
2351
				doInline = si.encName == "" // si.isZero()
2352
			}
2353
			if doInline && isStruct {
2354
				// if etypes contains this, don't call rget again (as fields are already seen here)
2355
				ftid := rt2id(ft)
2356
				// We cannot recurse forever, but we need to track other field depths.
2357
				// So - we break if we see a type twice (not the first time).
2358
				// This should be sufficient to handle an embedded type that refers to its
2359
				// owning type, which then refers to its embedded type.
2360
				processIt := true
2361
				numk := 0
2362
				for _, k := range pv.etypes {
2363
					if k == ftid {
2364
						numk++
2365
						if numk == rgetMaxRecursion {
2366
							processIt = false
2367
							break
2368
						}
2369
					}
2370
				}
2371
				if processIt {
2372
					pv.etypes = append(pv.etypes, ftid)
2373
					path2 := &structFieldInfoPathNode{
2374
						parent:   path,
2375
						typ:      f.Type,
2376
						offset:   uint16(f.Offset),
2377
						index:    j,
2378
						kind:     uint8(fkind),
2379
						numderef: numderef,
2380
					}
2381
					x.rget(ft, ftid, path2, pv, omitEmpty)
2382
				}
2383
				continue
2384
			}
2385
		}
2386

2387
		// after the anonymous dance: if an unexported field, skip
2388
		if isUnexported || f.Name == "" { // f.Name cannot be "", but defensively handle it
2389
			continue
2390
		}
2391

2392
		si.path = structFieldInfoPathNode{
2393
			parent:   path,
2394
			typ:      f.Type,
2395
			offset:   uint16(f.Offset),
2396
			index:    j,
2397
			kind:     uint8(fkind),
2398
			numderef: numderef,
2399
			// set asciiAlphaNum to true (default); checked and may be set to false below
2400
			encNameAsciiAlphaNum: true,
2401
			// note: omitEmpty might have been set in an earlier parseTag call, etc - so carry it forward
2402
			omitEmpty: si.path.omitEmpty,
2403
		}
2404

2405
		if !parsed {
2406
			si.encName = f.Name
2407
			si.parseTag(stag)
2408
			parsed = true
2409
		} else if si.encName == "" {
2410
			si.encName = f.Name
2411
		}
2412

2413
		// si.encNameHash = maxUintptr() // hashShortString(bytesView(si.encName))
2414

2415
		if omitEmpty {
2416
			si.path.omitEmpty = true
2417
		}
2418

2419
		for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination
2420
			if !asciiAlphaNumBitset.isset(si.encName[i]) {
2421
				si.path.encNameAsciiAlphaNum = false
2422
				break
2423
			}
2424
		}
2425

2426
		pv.sfis = append(pv.sfis, si)
2427
	}
2428
}
2429

2430
func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
2431
	// return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
2432

2433
	// if I's method is defined on T (ie T implements I), then *T implements I.
2434
	// The converse is not true.
2435

2436
	// Type.Implements can be expensive, as it does a simulataneous linear search across 2 lists
2437
	// with alphanumeric string comparisons.
2438
	// If we can avoid running one of these 2 calls, we should.
2439

2440
	base = rt.Implements(iTyp)
2441
	if base {
2442
		indir = true
2443
	} else {
2444
		indir = reflect.PtrTo(rt).Implements(iTyp)
2445
	}
2446
	return
2447
}
2448

2449
func bool2int(b bool) (v uint8) {
2450
	// MARKER: optimized to be a single instruction
2451
	if b {
2452
		v = 1
2453
	}
2454
	return
2455
}
2456

2457
func isSliceBoundsError(s string) bool {
2458
	return strings.Contains(s, "index out of range") ||
2459
		strings.Contains(s, "slice bounds out of range")
2460
}
2461

2462
func sprintf(format string, v ...interface{}) string {
2463
	return fmt.Sprintf(format, v...)
2464
}
2465

2466
func panicValToErr(h errDecorator, v interface{}, err *error) {
2467
	if v == *err {
2468
		return
2469
	}
2470
	switch xerr := v.(type) {
2471
	case nil:
2472
	case runtime.Error:
2473
		d, dok := h.(*Decoder)
2474
		if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
2475
			*err = io.ErrUnexpectedEOF
2476
		} else {
2477
			h.wrapErr(xerr, err)
2478
		}
2479
	case error:
2480
		switch xerr {
2481
		case nil:
2482
		case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
2483
			// treat as special (bubble up)
2484
			*err = xerr
2485
		default:
2486
			h.wrapErr(xerr, err)
2487
		}
2488
	default:
2489
		// we don't expect this to happen (as this library always panics with an error)
2490
		h.wrapErr(fmt.Errorf("%v", v), err)
2491
	}
2492
}
2493

2494
func usableByteSlice(bs []byte, slen int) (out []byte, changed bool) {
2495
	const maxCap = 1024 * 1024 * 64 // 64MB
2496
	const skipMaxCap = false        // allow to test
2497
	if slen <= 0 {
2498
		return []byte{}, true
2499
	}
2500
	if slen <= cap(bs) {
2501
		return bs[:slen], false
2502
	}
2503
	// slen > cap(bs) ... handle memory overload appropriately
2504
	if skipMaxCap || slen <= maxCap {
2505
		return make([]byte, slen), true
2506
	}
2507
	return make([]byte, maxCap), true
2508
}
2509

2510
func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
2511
	return mapKeyFastKindVals[k&31]
2512
}
2513

2514
// ----
2515

2516
type codecFnInfo struct {
2517
	ti     *typeInfo
2518
	xfFn   Ext
2519
	xfTag  uint64
2520
	addrD  bool
2521
	addrDf bool // force: if addrD, then decode function MUST take a ptr
2522
	addrE  bool
2523
	// addrEf bool // force: if addrE, then encode function MUST take a ptr
2524
}
2525

2526
// codecFn encapsulates the captured variables and the encode function.
2527
// This way, we only do some calculations one times, and pass to the
2528
// code block that should be called (encapsulated in a function)
2529
// instead of executing the checks every time.
2530
type codecFn struct {
2531
	i  codecFnInfo
2532
	fe func(*Encoder, *codecFnInfo, reflect.Value)
2533
	fd func(*Decoder, *codecFnInfo, reflect.Value)
2534
	// _  [1]uint64 // padding (cache-aligned)
2535
}
2536

2537
type codecRtidFn struct {
2538
	rtid uintptr
2539
	fn   *codecFn
2540
}
2541

2542
func makeExt(ext interface{}) Ext {
2543
	switch t := ext.(type) {
2544
	case Ext:
2545
		return t
2546
	case BytesExt:
2547
		return &bytesExtWrapper{BytesExt: t}
2548
	case InterfaceExt:
2549
		return &interfaceExtWrapper{InterfaceExt: t}
2550
	}
2551
	return &extFailWrapper{}
2552
}
2553

2554
func baseRV(v interface{}) (rv reflect.Value) {
2555
	// use reflect.ValueOf, not rv4i, as of go 1.16beta, rv4i was not inlineable
2556
	for rv = reflect.ValueOf(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() {
2557
	}
2558
	return
2559
}
2560

2561
// ----
2562

2563
// these "checkOverflow" functions must be inlinable, and not call anybody.
2564
// Overflow means that the value cannot be represented without wrapping/overflow.
2565
// Overflow=false does not mean that the value can be represented without losing precision
2566
// (especially for floating point).
2567

2568
type checkOverflow struct{}
2569

2570
func (checkOverflow) Float32(v float64) (overflow bool) {
2571
	if v < 0 {
2572
		v = -v
2573
	}
2574
	return math.MaxFloat32 < v && v <= math.MaxFloat64
2575
}
2576
func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
2577
	if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
2578
		overflow = true
2579
	}
2580
	return
2581
}
2582
func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
2583
	if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
2584
		overflow = true
2585
	}
2586
	return
2587
}
2588

2589
func (checkOverflow) Uint2Int(v uint64, neg bool) (overflow bool) {
2590
	return (neg && v > 1<<63) || (!neg && v >= 1<<63)
2591
}
2592

2593
func (checkOverflow) SignedInt(v uint64) (overflow bool) {
2594
	//e.g. -127 to 128 for int8
2595
	// pos := (v >> 63) == 0
2596
	// ui2 := v & 0x7fffffffffffffff
2597
	// if pos {
2598
	// 	if ui2 > math.MaxInt64 {
2599
	// 		overflow = true
2600
	// 	}
2601
	// } else {
2602
	// 	if ui2 > math.MaxInt64-1 {
2603
	// 		overflow = true
2604
	// 	}
2605
	// }
2606

2607
	// a signed integer has overflow if the sign (first) bit is 1 (negative)
2608
	// and the numbers after the sign bit is > maxint64 - 1
2609
	overflow = (v>>63) != 0 && v&0x7fffffffffffffff > math.MaxInt64-1
2610

2611
	return
2612
}
2613

2614
func (x checkOverflow) Float32V(v float64) float64 {
2615
	if x.Float32(v) {
2616
		halt.errorf("float32 overflow: %v", v)
2617
	}
2618
	return v
2619
}
2620
func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
2621
	if x.Uint(v, bitsize) {
2622
		halt.errorf("uint64 overflow: %v", v)
2623
	}
2624
	return v
2625
}
2626
func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
2627
	if x.Int(v, bitsize) {
2628
		halt.errorf("int64 overflow: %v", v)
2629
	}
2630
	return v
2631
}
2632
func (x checkOverflow) SignedIntV(v uint64) int64 {
2633
	if x.SignedInt(v) {
2634
		halt.errorf("uint64 to int64 overflow: %v", v)
2635
	}
2636
	return int64(v)
2637
}
2638

2639
// ------------------ FLOATING POINT -----------------
2640

2641
func isNaN64(f float64) bool { return f != f }
2642

2643
func isWhitespaceChar(v byte) bool {
2644
	// these are in order of speed below ...
2645

2646
	return v < 33
2647
	// return v < 33 && whitespaceCharBitset64.isset(v)
2648
	// return v < 33 && (v == ' ' || v == '\n' || v == '\t' || v == '\r')
2649
	// return v == ' ' || v == '\n' || v == '\t' || v == '\r'
2650
	// return whitespaceCharBitset.isset(v)
2651
}
2652

2653
func isNumberChar(v byte) bool {
2654
	// these are in order of speed below ...
2655

2656
	return numCharBitset.isset(v)
2657
	// return v < 64 && numCharNoExpBitset64.isset(v) || v == 'e' || v == 'E'
2658
	// return v > 42 && v < 102 && numCharWithExpBitset64.isset(v-42)
2659
}
2660

2661
// -----------------------
2662

2663
type ioFlusher interface {
2664
	Flush() error
2665
}
2666

2667
type ioBuffered interface {
2668
	Buffered() int
2669
}
2670

2671
// -----------------------
2672

2673
type sfiRv struct {
2674
	v *structFieldInfo
2675
	r reflect.Value
2676
}
2677

2678
// ------
2679

2680
// bitset types are better than [256]bool, because they permit the whole
2681
// bitset array being on a single cache line and use less memory.
2682
//
2683
// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap).
2684
//
2685
// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces
2686
// bounds checking, so we discarded them, and everyone uses bitset256.
2687
//
2688
// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
2689
// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
2690
//
2691
// Note that using >> or & is faster than using / or %, as division is quite expensive if not optimized.
2692

2693
// MARKER:
2694
// We noticed a little performance degradation when using bitset256 as [32]byte (or bitset32 as uint32).
2695
// For example, json encoding went from 188K ns/op to 168K ns/op (~ 10% reduction).
2696
// Consequently, we are using a [NNN]bool for bitsetNNN.
2697
// To eliminate bounds-checking, we use x % v as that is guaranteed to be within bounds.
2698

2699
// ----
2700
type bitset32 [32]bool
2701

2702
func (x *bitset32) set(pos byte) *bitset32 {
2703
	x[pos&31] = true // x[pos%32] = true
2704
	return x
2705
}
2706
func (x *bitset32) isset(pos byte) bool {
2707
	return x[pos&31] // x[pos%32]
2708
}
2709

2710
type bitset256 [256]bool
2711

2712
func (x *bitset256) set(pos byte) *bitset256 {
2713
	x[pos] = true
2714
	return x
2715
}
2716
func (x *bitset256) isset(pos byte) bool {
2717
	return x[pos]
2718
}
2719

2720
// ------------
2721

2722
type panicHdl struct{}
2723

2724
// errorv will panic if err is defined (not nil)
2725
func (panicHdl) onerror(err error) {
2726
	if err != nil {
2727
		panic(err)
2728
	}
2729
}
2730

2731
// errorf will always panic, using the parameters passed.
2732
//
2733
// Note: it is ok to pass in a stringView, as it will just pass it directly
2734
// to a fmt.Sprintf call and not hold onto it.
2735
//
2736
//go:noinline
2737
func (panicHdl) errorf(format string, params ...interface{}) {
2738
	if format == "" {
2739
		panic(errPanicUndefined)
2740
	}
2741
	if len(params) == 0 {
2742
		panic(errors.New(format))
2743
	}
2744
	panic(fmt.Errorf(format, params...))
2745
}
2746

2747
// ----------------------------------------------------
2748

2749
type errDecorator interface {
2750
	wrapErr(in error, out *error)
2751
}
2752

2753
type errDecoratorDef struct{}
2754

2755
func (errDecoratorDef) wrapErr(v error, e *error) { *e = v }
2756

2757
// ----------------------------------------------------
2758

2759
type mustHdl struct{}
2760

2761
func (mustHdl) String(s string, err error) string {
2762
	halt.onerror(err)
2763
	return s
2764
}
2765
func (mustHdl) Int(s int64, err error) int64 {
2766
	halt.onerror(err)
2767
	return s
2768
}
2769
func (mustHdl) Uint(s uint64, err error) uint64 {
2770
	halt.onerror(err)
2771
	return s
2772
}
2773
func (mustHdl) Float(s float64, err error) float64 {
2774
	halt.onerror(err)
2775
	return s
2776
}
2777

2778
// -------------------
2779

2780
func freelistCapacity(length int) (capacity int) {
2781
	for capacity = 8; capacity <= length; capacity *= 2 {
2782
	}
2783
	return
2784
}
2785

2786
// bytesFreelist is a list of byte buffers, sorted by cap.
2787
//
2788
// In anecdotal testing (running go test -tsd 1..6), we couldn't get
2789
// the length of the list > 4 at any time. So we believe a linear search
2790
// without bounds checking is sufficient.
2791
//
2792
// Typical usage model:
2793
//
2794
//	peek may go together with put, iff pop=true. peek gets largest byte slice temporarily.
2795
//	check is used to switch a []byte if necessary
2796
//	get/put go together
2797
//
2798
// Given that folks may get a []byte, and then append to it a lot which may re-allocate
2799
// a new []byte, we should try to return both (one received from blist and new one allocated).
2800
//
2801
// Typical usage model for get/put, when we don't know whether we may need more than requested
2802
//
2803
//	v0 := blist.get()
2804
//	v1 := v0
2805
//	... use v1 ...
2806
//	blist.put(v1)
2807
//	if !byteSliceSameData(v0, v1) {
2808
//	  blist.put(v0)
2809
//	}
2810
type bytesFreelist [][]byte
2811

2812
// peek returns a slice of possibly non-zero'ed bytes, with len=0,
2813
// and with the largest capacity from the list.
2814
func (x *bytesFreelist) peek(length int, pop bool) (out []byte) {
2815
	if bytesFreeListNoCache {
2816
		return make([]byte, 0, freelistCapacity(length))
2817
	}
2818
	y := *x
2819
	if len(y) > 0 {
2820
		out = y[len(y)-1]
2821
	}
2822
	// start buf with a minimum of 64 bytes
2823
	const minLenBytes = 64
2824
	if length < minLenBytes {
2825
		length = minLenBytes
2826
	}
2827
	if cap(out) < length {
2828
		out = make([]byte, 0, freelistCapacity(length))
2829
		y = append(y, out)
2830
		*x = y
2831
	}
2832
	if pop && len(y) > 0 {
2833
		y = y[:len(y)-1]
2834
		*x = y
2835
	}
2836
	return
2837
}
2838

2839
// get returns a slice of possibly non-zero'ed bytes, with len=0,
2840
// and with cap >= length requested.
2841
func (x *bytesFreelist) get(length int) (out []byte) {
2842
	if bytesFreeListNoCache {
2843
		return make([]byte, 0, freelistCapacity(length))
2844
	}
2845
	y := *x
2846
	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
2847
	// for i, v := range y {
2848
	for i := 0; i < len(y); i++ {
2849
		v := y[i]
2850
		if cap(v) >= length {
2851
			// *x = append(y[:i], y[i+1:]...)
2852
			copy(y[i:], y[i+1:])
2853
			*x = y[:len(y)-1]
2854
			return v
2855
		}
2856
	}
2857
	return make([]byte, 0, freelistCapacity(length))
2858
}
2859

2860
func (x *bytesFreelist) put(v []byte) {
2861
	if bytesFreeListNoCache || cap(v) == 0 {
2862
		return
2863
	}
2864
	if len(v) != 0 {
2865
		v = v[:0]
2866
	}
2867
	// append the new value, then try to put it in a better position
2868
	y := append(*x, v)
2869
	*x = y
2870
	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
2871
	// for i, z := range y[:len(y)-1] {
2872
	for i := 0; i < len(y)-1; i++ {
2873
		z := y[i]
2874
		if cap(z) > cap(v) {
2875
			copy(y[i+1:], y[i:])
2876
			y[i] = v
2877
			return
2878
		}
2879
	}
2880
}
2881

2882
func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
2883
	// ensure inlineable, by moving slow-path out to its own function
2884
	if cap(v) >= length {
2885
		return v[:0]
2886
	}
2887
	return x.checkPutGet(v, length)
2888
}
2889

2890
func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte {
2891
	// checkPutGet broken out into its own function, so check is inlineable in general case
2892
	const useSeparateCalls = false
2893

2894
	if useSeparateCalls {
2895
		x.put(v)
2896
		return x.get(length)
2897
	}
2898

2899
	if bytesFreeListNoCache {
2900
		return make([]byte, 0, freelistCapacity(length))
2901
	}
2902

2903
	// assume cap(v) < length, so put must happen before get
2904
	y := *x
2905
	var put = cap(v) == 0 // if empty, consider it already put
2906
	if !put {
2907
		y = append(y, v)
2908
		*x = y
2909
	}
2910
	for i := 0; i < len(y); i++ {
2911
		z := y[i]
2912
		if put {
2913
			if cap(z) >= length {
2914
				copy(y[i:], y[i+1:])
2915
				y = y[:len(y)-1]
2916
				*x = y
2917
				return z
2918
			}
2919
		} else {
2920
			if cap(z) > cap(v) {
2921
				copy(y[i+1:], y[i:])
2922
				y[i] = v
2923
				put = true
2924
			}
2925
		}
2926
	}
2927
	return make([]byte, 0, freelistCapacity(length))
2928
}
2929

2930
// -------------------------
2931

2932
// sfiRvFreelist is used by Encoder for encoding structs,
2933
// where we have to gather the fields first and then
2934
// analyze them for omitEmpty, before knowing the length of the array/map to encode.
2935
//
2936
// Typically, the length here will depend on the number of cycles e.g.
2937
// if type T1 has reference to T1, or T1 has reference to type T2 which has reference to T1.
2938
//
2939
// In the general case, the length of this list at most times is 1,
2940
// so linear search is fine.
2941
type sfiRvFreelist [][]sfiRv
2942

2943
func (x *sfiRvFreelist) get(length int) (out []sfiRv) {
2944
	y := *x
2945

2946
	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
2947
	// for i, v := range y {
2948
	for i := 0; i < len(y); i++ {
2949
		v := y[i]
2950
		if cap(v) >= length {
2951
			// *x = append(y[:i], y[i+1:]...)
2952
			copy(y[i:], y[i+1:])
2953
			*x = y[:len(y)-1]
2954
			return v
2955
		}
2956
	}
2957
	return make([]sfiRv, 0, freelistCapacity(length))
2958
}
2959

2960
func (x *sfiRvFreelist) put(v []sfiRv) {
2961
	if len(v) != 0 {
2962
		v = v[:0]
2963
	}
2964
	// append the new value, then try to put it in a better position
2965
	y := append(*x, v)
2966
	*x = y
2967
	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
2968
	// for i, z := range y[:len(y)-1] {
2969
	for i := 0; i < len(y)-1; i++ {
2970
		z := y[i]
2971
		if cap(z) > cap(v) {
2972
			copy(y[i+1:], y[i:])
2973
			y[i] = v
2974
			return
2975
		}
2976
	}
2977
}
2978

2979
// ---- multiple interner implementations ----
2980

2981
// Hard to tell which is most performant:
2982
//   - use a map[string]string - worst perf, no collisions, and unlimited entries
2983
//   - use a linear search with move to front heuristics - no collisions, and maxed at 64 entries
2984
//   - use a computationally-intensive hash - best performance, some collisions, maxed at 64 entries
2985

2986
const (
2987
	internMaxStrLen = 16     // if more than 16 bytes, faster to copy than compare bytes
2988
	internCap       = 64 * 2 // 64 uses 1K bytes RAM, so 128 (anecdotal sweet spot) uses 2K bytes
2989
)
2990

2991
type internerMap map[string]string
2992

2993
func (x *internerMap) init() {
2994
	*x = make(map[string]string, internCap)
2995
}
2996

2997
func (x internerMap) string(v []byte) (s string) {
2998
	s, ok := x[string(v)] // no allocation here, per go implementation
2999
	if !ok {
3000
		s = string(v) // new allocation here
3001
		x[s] = s
3002
	}
3003
	return
3004
}
3005

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.