cubefs

Форк
0
1684 строки · 47.9 Кб
1
/**
2
 * Reed-Solomon Coding over 8-bit values.
3
 *
4
 * Copyright 2015, Klaus Post
5
 * Copyright 2015, Backblaze, Inc.
6
 */
7

8
// Package reedsolomon enables Erasure Coding in Go
9
//
10
// For usage and examples, see https://github.com/klauspost/reedsolomon
11
package reedsolomon
12

13
import (
14
	"bytes"
15
	"errors"
16
	"fmt"
17
	"io"
18
	"runtime"
19
	"sync"
20

21
	"github.com/klauspost/cpuid/v2"
22
)
23

24
// Encoder is an interface to encode Reed-Salomon parity sets for your data.
25
type Encoder interface {
26
	// Encode parity for a set of data shards.
27
	// Input is 'shards' containing data shards followed by parity shards.
28
	// The number of shards must match the number given to New().
29
	// Each shard is a byte array, and they must all be the same size.
30
	// The parity shards will always be overwritten and the data shards
31
	// will remain the same, so it is safe for you to read from the
32
	// data shards while this is running.
33
	Encode(shards [][]byte) error
34

35
	// EncodeIdx will add parity for a single data shard.
36
	// Parity shards should start out as 0. The caller must zero them.
37
	// Data shards must be delivered exactly once. There is no check for this.
38
	// The parity shards will always be updated and the data shards will remain the same.
39
	EncodeIdx(dataShard []byte, idx int, parity [][]byte) error
40

41
	// Verify returns true if the parity shards contain correct data.
42
	// The data is the same format as Encode. No data is modified, so
43
	// you are allowed to read from data while this is running.
44
	Verify(shards [][]byte) (bool, error)
45

46
	// Reconstruct will recreate the missing shards if possible.
47
	//
48
	// Given a list of shards, some of which contain data, fills in the
49
	// ones that don't have data.
50
	//
51
	// The length of the array must be equal to the total number of shards.
52
	// You indicate that a shard is missing by setting it to nil or zero-length.
53
	// If a shard is zero-length but has sufficient capacity, that memory will
54
	// be used, otherwise a new []byte will be allocated.
55
	//
56
	// If there are too few shards to reconstruct the missing
57
	// ones, ErrTooFewShards will be returned.
58
	//
59
	// The reconstructed shard set is complete, but integrity is not verified.
60
	// Use the Verify function to check if data set is ok.
61
	Reconstruct(shards [][]byte) error
62

63
	// ReconstructData will recreate any missing data shards, if possible.
64
	//
65
	// Given a list of shards, some of which contain data, fills in the
66
	// data shards that don't have data.
67
	//
68
	// The length of the array must be equal to Shards.
69
	// You indicate that a shard is missing by setting it to nil or zero-length.
70
	// If a shard is zero-length but has sufficient capacity, that memory will
71
	// be used, otherwise a new []byte will be allocated.
72
	//
73
	// If there are too few shards to reconstruct the missing
74
	// ones, ErrTooFewShards will be returned.
75
	//
76
	// As the reconstructed shard set may contain missing parity shards,
77
	// calling the Verify function is likely to fail.
78
	ReconstructData(shards [][]byte) error
79

80
	// ReconstructSome will recreate only requested data shards, if possible.
81
	//
82
	// Given a list of shards, some of which contain data, fills in the
83
	// data shards indicated by true values in the "required" parameter.
84
	// The length of "required" array must be equal to DataShards.
85
	//
86
	// The length of "shards" array must be equal to Shards.
87
	// You indicate that a shard is missing by setting it to nil or zero-length.
88
	// If a shard is zero-length but has sufficient capacity, that memory will
89
	// be used, otherwise a new []byte will be allocated.
90
	//
91
	// If there are too few shards to reconstruct the missing
92
	// ones, ErrTooFewShards will be returned.
93
	//
94
	// As the reconstructed shard set may contain missing parity shards,
95
	// calling the Verify function is likely to fail.
96
	ReconstructSome(shards [][]byte, required []bool) error
97

98
	// Update parity is use for change a few data shards and update it's parity.
99
	// Input 'newDatashards' containing data shards changed.
100
	// Input 'shards' containing old data shards (if data shard not changed, it can be nil) and old parity shards.
101
	// new parity shards will in shards[DataShards:]
102
	// Update is very useful if  DataShards much larger than ParityShards and changed data shards is few. It will
103
	// faster than Encode and not need read all data shards to encode.
104
	Update(shards [][]byte, newDatashards [][]byte) error
105

106
	// Split a data slice into the number of shards given to the encoder,
107
	// and create empty parity shards if necessary.
108
	//
109
	// The data will be split into equally sized shards.
110
	// If the data size isn't divisible by the number of shards,
111
	// the last shard will contain extra zeros.
112
	//
113
	// If there is extra capacity on the provided data slice
114
	// it will be used instead of allocating parity shards.
115
	// It will be zeroed out.
116
	//
117
	// There must be at least 1 byte otherwise ErrShortData will be
118
	// returned.
119
	//
120
	// The data will not be copied, except for the last shard, so you
121
	// should not modify the data of the input slice afterwards.
122
	Split(data []byte) ([][]byte, error)
123

124
	// Join the shards and write the data segment to dst.
125
	//
126
	// Only the data shards are considered.
127
	// You must supply the exact output size you want.
128
	// If there are to few shards given, ErrTooFewShards will be returned.
129
	// If the total data size is less than outSize, ErrShortData will be returned.
130
	Join(dst io.Writer, shards [][]byte, outSize int) error
131
}
132

133
// Extensions is an optional interface.
134
// All returned instances will support this interface.
135
type Extensions interface {
136
	// ShardSizeMultiple will return the size the shard sizes must be a multiple of.
137
	ShardSizeMultiple() int
138

139
	// DataShards will return the number of data shards.
140
	DataShards() int
141

142
	// ParityShards will return the number of parity shards.
143
	ParityShards() int
144

145
	// TotalShards will return the total number of shards.
146
	TotalShards() int
147

148
	// AllocAligned will allocate TotalShards number of slices,
149
	// aligned to reasonable memory sizes.
150
	// Provide the size of each shard.
151
	AllocAligned(each int) [][]byte
152
}
153

154
const (
155
	avx2CodeGenMinSize       = 64
156
	avx2CodeGenMinShards     = 3
157
	avx2CodeGenMaxGoroutines = 8
158
	gfniCodeGenMaxGoroutines = 4
159

160
	intSize = 32 << (^uint(0) >> 63) // 32 or 64
161
	maxInt  = 1<<(intSize-1) - 1
162
)
163

164
// reedSolomon contains a matrix for a specific
165
// distribution of datashards and parity shards.
166
// Construct if using New()
167
type reedSolomon struct {
168
	dataShards   int // Number of data shards, should not be modified.
169
	parityShards int // Number of parity shards, should not be modified.
170
	totalShards  int // Total number of shards. Calculated, and should not be modified.
171
	m            matrix
172
	tree         *inversionTree
173
	parity       [][]byte
174
	o            options
175
	mPoolSz      int
176
	mPool        sync.Pool // Pool for temp matrices, etc
177
}
178

179
var _ = Extensions(&reedSolomon{})
180

181
func (r *reedSolomon) ShardSizeMultiple() int {
182
	return 1
183
}
184

185
func (r *reedSolomon) DataShards() int {
186
	return r.dataShards
187
}
188

189
func (r *reedSolomon) ParityShards() int {
190
	return r.parityShards
191
}
192

193
func (r *reedSolomon) TotalShards() int {
194
	return r.totalShards
195
}
196

197
func (r *reedSolomon) AllocAligned(each int) [][]byte {
198
	return AllocAligned(r.totalShards, each)
199
}
200

201
// ErrInvShardNum will be returned by New, if you attempt to create
202
// an Encoder with less than one data shard or less than zero parity
203
// shards.
204
var ErrInvShardNum = errors.New("cannot create Encoder with less than one data shard or less than zero parity shards")
205

206
// ErrMaxShardNum will be returned by New, if you attempt to create an
207
// Encoder where data and parity shards are bigger than the order of
208
// GF(2^8).
209
var ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
210

211
// ErrNotSupported is returned when an operation is not supported.
212
var ErrNotSupported = errors.New("operation not supported")
213

214
// buildMatrix creates the matrix to use for encoding, given the
215
// number of data shards and the number of total shards.
216
//
217
// The top square of the matrix is guaranteed to be an identity
218
// matrix, which means that the data shards are unchanged after
219
// encoding.
220
func buildMatrix(dataShards, totalShards int) (matrix, error) {
221
	// Start with a Vandermonde matrix.  This matrix would work,
222
	// in theory, but doesn't have the property that the data
223
	// shards are unchanged after encoding.
224
	vm, err := vandermonde(totalShards, dataShards)
225
	if err != nil {
226
		return nil, err
227
	}
228

229
	// Multiply by the inverse of the top square of the matrix.
230
	// This will make the top square be the identity matrix, but
231
	// preserve the property that any square subset of rows is
232
	// invertible.
233
	top, err := vm.SubMatrix(0, 0, dataShards, dataShards)
234
	if err != nil {
235
		return nil, err
236
	}
237

238
	topInv, err := top.Invert()
239
	if err != nil {
240
		return nil, err
241
	}
242

243
	return vm.Multiply(topInv)
244
}
245

246
// buildMatrixJerasure creates the same encoding matrix as Jerasure library
247
//
248
// The top square of the matrix is guaranteed to be an identity
249
// matrix, which means that the data shards are unchanged after
250
// encoding.
251
func buildMatrixJerasure(dataShards, totalShards int) (matrix, error) {
252
	// Start with a Vandermonde matrix.  This matrix would work,
253
	// in theory, but doesn't have the property that the data
254
	// shards are unchanged after encoding.
255
	vm, err := vandermonde(totalShards, dataShards)
256
	if err != nil {
257
		return nil, err
258
	}
259

260
	// Jerasure does this:
261
	// first row is always 100..00
262
	vm[0][0] = 1
263
	for i := 1; i < dataShards; i++ {
264
		vm[0][i] = 0
265
	}
266
	// last row is always 000..01
267
	for i := 0; i < dataShards-1; i++ {
268
		vm[totalShards-1][i] = 0
269
	}
270
	vm[totalShards-1][dataShards-1] = 1
271

272
	for i := 0; i < dataShards; i++ {
273
		// Find the row where i'th col is not 0
274
		r := i
275
		for ; r < totalShards && vm[r][i] == 0; r++ {
276
		}
277
		if r != i {
278
			// Swap it with i'th row if not already
279
			t := vm[r]
280
			vm[r] = vm[i]
281
			vm[i] = t
282
		}
283
		// Multiply by the inverted matrix (same as vm.Multiply(vm[0:dataShards].Invert()))
284
		if vm[i][i] != 1 {
285
			// Make vm[i][i] = 1 by dividing the column by vm[i][i]
286
			tmp := galDivide(1, vm[i][i])
287
			for j := 0; j < totalShards; j++ {
288
				vm[j][i] = galMultiply(vm[j][i], tmp)
289
			}
290
		}
291
		for j := 0; j < dataShards; j++ {
292
			// Make vm[i][j] = 0 where j != i by adding vm[i][j]*vm[.][i] to each column
293
			tmp := vm[i][j]
294
			if j != i && tmp != 0 {
295
				for r := 0; r < totalShards; r++ {
296
					vm[r][j] = galAdd(vm[r][j], galMultiply(tmp, vm[r][i]))
297
				}
298
			}
299
		}
300
	}
301

302
	// Make vm[dataShards] row all ones - divide each column j by vm[dataShards][j]
303
	for j := 0; j < dataShards; j++ {
304
		tmp := vm[dataShards][j]
305
		if tmp != 1 {
306
			tmp = galDivide(1, tmp)
307
			for i := dataShards; i < totalShards; i++ {
308
				vm[i][j] = galMultiply(vm[i][j], tmp)
309
			}
310
		}
311
	}
312

313
	// Make vm[dataShards...totalShards-1][0] column all ones - divide each row
314
	for i := dataShards + 1; i < totalShards; i++ {
315
		tmp := vm[i][0]
316
		if tmp != 1 {
317
			tmp = galDivide(1, tmp)
318
			for j := 0; j < dataShards; j++ {
319
				vm[i][j] = galMultiply(vm[i][j], tmp)
320
			}
321
		}
322
	}
323

324
	return vm, nil
325
}
326

327
// buildMatrixPAR1 creates the matrix to use for encoding according to
328
// the PARv1 spec, given the number of data shards and the number of
329
// total shards. Note that the method they use is buggy, and may lead
330
// to cases where recovery is impossible, even if there are enough
331
// parity shards.
332
//
333
// The top square of the matrix is guaranteed to be an identity
334
// matrix, which means that the data shards are unchanged after
335
// encoding.
336
func buildMatrixPAR1(dataShards, totalShards int) (matrix, error) {
337
	result, err := newMatrix(totalShards, dataShards)
338
	if err != nil {
339
		return nil, err
340
	}
341

342
	for r, row := range result {
343
		// The top portion of the matrix is the identity
344
		// matrix, and the bottom is a transposed Vandermonde
345
		// matrix starting at 1 instead of 0.
346
		if r < dataShards {
347
			result[r][r] = 1
348
		} else {
349
			for c := range row {
350
				result[r][c] = galExp(byte(c+1), r-dataShards)
351
			}
352
		}
353
	}
354
	return result, nil
355
}
356

357
func buildMatrixCauchy(dataShards, totalShards int) (matrix, error) {
358
	result, err := newMatrix(totalShards, dataShards)
359
	if err != nil {
360
		return nil, err
361
	}
362

363
	for r, row := range result {
364
		// The top portion of the matrix is the identity
365
		// matrix, and the bottom is a transposed Cauchy matrix.
366
		if r < dataShards {
367
			result[r][r] = 1
368
		} else {
369
			for c := range row {
370
				result[r][c] = invTable[(byte(r ^ c))]
371
			}
372
		}
373
	}
374
	return result, nil
375
}
376

377
// buildXorMatrix can be used to build a matrix with pure XOR
378
// operations if there is only one parity shard.
379
func buildXorMatrix(dataShards, totalShards int) (matrix, error) {
380
	if dataShards+1 != totalShards {
381
		return nil, errors.New("internal error")
382
	}
383
	result, err := newMatrix(totalShards, dataShards)
384
	if err != nil {
385
		return nil, err
386
	}
387

388
	for r, row := range result {
389
		// The top portion of the matrix is the identity
390
		// matrix.
391
		if r < dataShards {
392
			result[r][r] = 1
393
		} else {
394
			// Set all values to 1 (XOR)
395
			for c := range row {
396
				result[r][c] = 1
397
			}
398
		}
399
	}
400
	return result, nil
401
}
402

403
// New creates a new encoder and initializes it to
404
// the number of data shards and parity shards that
405
// you want to use. You can reuse this encoder.
406
// Note that the maximum number of total shards is 65536, with some
407
// restrictions for a total larger than 256:
408
//
409
//   - Shard sizes must be multiple of 64
410
//   - The methods Join/Split/Update/EncodeIdx are not supported
411
//
412
// If no options are supplied, default options are used.
413
func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
414
	o := defaultOptions
415
	for _, opt := range opts {
416
		opt(&o)
417
	}
418

419
	totShards := dataShards + parityShards
420
	switch {
421
	case o.withLeopard == leopardGF16 && parityShards > 0 || totShards > 256:
422
		return newFF16(dataShards, parityShards, o)
423
	case o.withLeopard == leopardAlways && parityShards > 0:
424
		return newFF8(dataShards, parityShards, o)
425
	}
426
	if totShards > 256 {
427
		return nil, ErrMaxShardNum
428
	}
429

430
	r := reedSolomon{
431
		dataShards:   dataShards,
432
		parityShards: parityShards,
433
		totalShards:  dataShards + parityShards,
434
		o:            o,
435
	}
436

437
	if dataShards <= 0 || parityShards < 0 {
438
		return nil, ErrInvShardNum
439
	}
440

441
	if parityShards == 0 {
442
		return &r, nil
443
	}
444

445
	var err error
446
	switch {
447
	case r.o.customMatrix != nil:
448
		if len(r.o.customMatrix) < parityShards {
449
			return nil, errors.New("coding matrix must contain at least parityShards rows")
450
		}
451
		r.m = make([][]byte, r.totalShards)
452
		for i := 0; i < dataShards; i++ {
453
			r.m[i] = make([]byte, dataShards)
454
			r.m[i][i] = 1
455
		}
456
		for k, row := range r.o.customMatrix {
457
			if len(row) < dataShards {
458
				return nil, errors.New("coding matrix must contain at least dataShards columns")
459
			}
460
			r.m[dataShards+k] = make([]byte, dataShards)
461
			copy(r.m[dataShards+k], row)
462
		}
463
	case r.o.fastOneParity && parityShards == 1:
464
		r.m, err = buildXorMatrix(dataShards, r.totalShards)
465
	case r.o.useCauchy:
466
		r.m, err = buildMatrixCauchy(dataShards, r.totalShards)
467
	case r.o.usePAR1Matrix:
468
		r.m, err = buildMatrixPAR1(dataShards, r.totalShards)
469
	case r.o.useJerasureMatrix:
470
		r.m, err = buildMatrixJerasure(dataShards, r.totalShards)
471
	default:
472
		r.m, err = buildMatrix(dataShards, r.totalShards)
473
	}
474
	if err != nil {
475
		return nil, err
476
	}
477

478
	// Calculate what we want per round
479
	r.o.perRound = cpuid.CPU.Cache.L2
480

481
	divide := parityShards + 1
482
	if avx2CodeGen && r.o.useAVX2 && (dataShards > maxAvx2Inputs || parityShards > maxAvx2Outputs) {
483
		// Base on L1 cache if we have many inputs.
484
		r.o.perRound = cpuid.CPU.Cache.L1D
485
		divide = 0
486
		if dataShards > maxAvx2Inputs {
487
			divide += maxAvx2Inputs
488
		} else {
489
			divide += dataShards
490
		}
491
		if parityShards > maxAvx2Inputs {
492
			divide += maxAvx2Outputs
493
		} else {
494
			divide += parityShards
495
		}
496
	}
497

498
	if r.o.perRound <= 0 {
499
		// Set to 128K if undetectable.
500
		r.o.perRound = 128 << 10
501
	}
502

503
	if cpuid.CPU.ThreadsPerCore > 1 && r.o.maxGoroutines > cpuid.CPU.PhysicalCores {
504
		// If multiple threads per core, make sure they don't contend for cache.
505
		r.o.perRound /= cpuid.CPU.ThreadsPerCore
506
	}
507

508
	// 1 input + parity must fit in cache, and we add one more to be safer.
509
	r.o.perRound = r.o.perRound / divide
510
	// Align to 64 bytes.
511
	r.o.perRound = ((r.o.perRound + 63) / 64) * 64
512

513
	if r.o.minSplitSize <= 0 {
514
		// Set minsplit as high as we can, but still have parity in L1.
515
		cacheSize := cpuid.CPU.Cache.L1D
516
		if cacheSize <= 0 {
517
			cacheSize = 32 << 10
518
		}
519

520
		r.o.minSplitSize = cacheSize / (parityShards + 1)
521
		// Min 1K
522
		if r.o.minSplitSize < 1024 {
523
			r.o.minSplitSize = 1024
524
		}
525
	}
526

527
	if r.o.shardSize > 0 {
528
		p := runtime.GOMAXPROCS(0)
529
		if p == 1 || r.o.shardSize <= r.o.minSplitSize*2 {
530
			// Not worth it.
531
			r.o.maxGoroutines = 1
532
		} else {
533
			g := r.o.shardSize / r.o.perRound
534

535
			// Overprovision by a factor of 2.
536
			if g < p*2 && r.o.perRound > r.o.minSplitSize*2 {
537
				g = p * 2
538
				r.o.perRound /= 2
539
			}
540

541
			// Have g be multiple of p
542
			g += p - 1
543
			g -= g % p
544

545
			r.o.maxGoroutines = g
546
		}
547
	}
548

549
	// Generated AVX2 does not need data to stay in L1 cache between runs.
550
	// We will be purely limited by RAM speed.
551
	if r.canAVX2C(avx2CodeGenMinSize, maxAvx2Inputs, maxAvx2Outputs) && r.o.maxGoroutines > avx2CodeGenMaxGoroutines {
552
		r.o.maxGoroutines = avx2CodeGenMaxGoroutines
553
	}
554

555
	if r.canGFNI(avx2CodeGenMinSize, maxAvx2Inputs, maxAvx2Outputs) && r.o.maxGoroutines > gfniCodeGenMaxGoroutines {
556
		r.o.maxGoroutines = gfniCodeGenMaxGoroutines
557
	}
558

559
	// Inverted matrices are cached in a tree keyed by the indices
560
	// of the invalid rows of the data to reconstruct.
561
	// The inversion root node will have the identity matrix as
562
	// its inversion matrix because it implies there are no errors
563
	// with the original data.
564
	if r.o.inversionCache {
565
		r.tree = newInversionTree(dataShards, parityShards)
566
	}
567

568
	r.parity = make([][]byte, parityShards)
569
	for i := range r.parity {
570
		r.parity[i] = r.m[dataShards+i]
571
	}
572

573
	if avx2CodeGen && r.o.useAVX2 {
574
		sz := r.dataShards * r.parityShards * 2 * 32
575
		r.mPool.New = func() interface{} {
576
			return AllocAligned(1, sz)[0]
577
		}
578
		r.mPoolSz = sz
579
	}
580
	return &r, err
581
}
582

583
func (r *reedSolomon) getTmpSlice() []byte {
584
	return r.mPool.Get().([]byte)
585
}
586

587
func (r *reedSolomon) putTmpSlice(b []byte) {
588
	if b != nil && cap(b) >= r.mPoolSz {
589
		r.mPool.Put(b[:r.mPoolSz])
590
		return
591
	}
592
	if false {
593
		// Sanity check
594
		panic(fmt.Sprintf("got short tmp returned, want %d, got %d", r.mPoolSz, cap(b)))
595
	}
596
}
597

598
// ErrTooFewShards is returned if too few shards where given to
599
// Encode/Verify/Reconstruct/Update. It will also be returned from Reconstruct
600
// if there were too few shards to reconstruct the missing data.
601
var ErrTooFewShards = errors.New("too few shards given")
602

603
// Encode parity for a set of data shards.
604
// An array 'shards' containing data shards followed by parity shards.
605
// The number of shards must match the number given to New.
606
// Each shard is a byte array, and they must all be the same size.
607
// The parity shards will always be overwritten and the data shards
608
// will remain the same.
609
func (r *reedSolomon) Encode(shards [][]byte) error {
610
	if len(shards) != r.totalShards {
611
		return ErrTooFewShards
612
	}
613

614
	err := checkShards(shards, false)
615
	if err != nil {
616
		return err
617
	}
618

619
	// Get the slice of output buffers.
620
	output := shards[r.dataShards:]
621

622
	// Do the coding.
623
	r.codeSomeShards(r.parity, shards[0:r.dataShards], output[:r.parityShards], len(shards[0]))
624
	return nil
625
}
626

627
// EncodeIdx will add parity for a single data shard.
628
// Parity shards should start out zeroed. The caller must zero them before first call.
629
// Data shards should only be delivered once. There is no check for this.
630
// The parity shards will always be updated and the data shards will remain the unchanged.
631
func (r *reedSolomon) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error {
632
	if len(parity) != r.parityShards {
633
		return ErrTooFewShards
634
	}
635
	if len(parity) == 0 {
636
		return nil
637
	}
638
	if idx < 0 || idx >= r.dataShards {
639
		return ErrInvShardNum
640
	}
641
	err := checkShards(parity, false)
642
	if err != nil {
643
		return err
644
	}
645
	if len(parity[0]) != len(dataShard) {
646
		return ErrShardSize
647
	}
648

649
	// Process using no goroutines for now.
650
	start, end := 0, r.o.perRound
651
	if end > len(dataShard) {
652
		end = len(dataShard)
653
	}
654

655
	for start < len(dataShard) {
656
		in := dataShard[start:end]
657
		for iRow := 0; iRow < r.parityShards; iRow++ {
658
			galMulSliceXor(r.parity[iRow][idx], in, parity[iRow][start:end], &r.o)
659
		}
660
		start = end
661
		end += r.o.perRound
662
		if end > len(dataShard) {
663
			end = len(dataShard)
664
		}
665
	}
666
	return nil
667
}
668

669
// ErrInvalidInput is returned if invalid input parameter of Update.
670
var ErrInvalidInput = errors.New("invalid input")
671

672
func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error {
673
	if len(shards) != r.totalShards {
674
		return ErrTooFewShards
675
	}
676

677
	if len(newDatashards) != r.dataShards {
678
		return ErrTooFewShards
679
	}
680

681
	err := checkShards(shards, true)
682
	if err != nil {
683
		return err
684
	}
685

686
	err = checkShards(newDatashards, true)
687
	if err != nil {
688
		return err
689
	}
690

691
	for i := range newDatashards {
692
		if newDatashards[i] != nil && shards[i] == nil {
693
			return ErrInvalidInput
694
		}
695
	}
696
	for _, p := range shards[r.dataShards:] {
697
		if p == nil {
698
			return ErrInvalidInput
699
		}
700
	}
701

702
	shardSize := shardSize(shards)
703

704
	// Get the slice of output buffers.
705
	output := shards[r.dataShards:]
706

707
	// Do the coding.
708
	r.updateParityShards(r.parity, shards[0:r.dataShards], newDatashards[0:r.dataShards], output, r.parityShards, shardSize)
709
	return nil
710
}
711

712
func (r *reedSolomon) updateParityShards(matrixRows, oldinputs, newinputs, outputs [][]byte, outputCount, byteCount int) {
713
	if len(outputs) == 0 {
714
		return
715
	}
716

717
	if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
718
		r.updateParityShardsP(matrixRows, oldinputs, newinputs, outputs, outputCount, byteCount)
719
		return
720
	}
721

722
	for c := 0; c < r.dataShards; c++ {
723
		in := newinputs[c]
724
		if in == nil {
725
			continue
726
		}
727
		oldin := oldinputs[c]
728
		// oldinputs data will be changed
729
		sliceXor(in, oldin, &r.o)
730
		for iRow := 0; iRow < outputCount; iRow++ {
731
			galMulSliceXor(matrixRows[iRow][c], oldin, outputs[iRow], &r.o)
732
		}
733
	}
734
}
735

736
func (r *reedSolomon) updateParityShardsP(matrixRows, oldinputs, newinputs, outputs [][]byte, outputCount, byteCount int) {
737
	var wg sync.WaitGroup
738
	do := byteCount / r.o.maxGoroutines
739
	if do < r.o.minSplitSize {
740
		do = r.o.minSplitSize
741
	}
742
	start := 0
743
	for start < byteCount {
744
		if start+do > byteCount {
745
			do = byteCount - start
746
		}
747
		wg.Add(1)
748
		go func(start, stop int) {
749
			for c := 0; c < r.dataShards; c++ {
750
				in := newinputs[c]
751
				if in == nil {
752
					continue
753
				}
754
				oldin := oldinputs[c]
755
				// oldinputs data will be change
756
				sliceXor(in[start:stop], oldin[start:stop], &r.o)
757
				for iRow := 0; iRow < outputCount; iRow++ {
758
					galMulSliceXor(matrixRows[iRow][c], oldin[start:stop], outputs[iRow][start:stop], &r.o)
759
				}
760
			}
761
			wg.Done()
762
		}(start, start+do)
763
		start += do
764
	}
765
	wg.Wait()
766
}
767

768
// Verify returns true if the parity shards contain the right data.
769
// The data is the same format as Encode. No data is modified.
770
func (r *reedSolomon) Verify(shards [][]byte) (bool, error) {
771
	if len(shards) != r.totalShards {
772
		return false, ErrTooFewShards
773
	}
774
	err := checkShards(shards, false)
775
	if err != nil {
776
		return false, err
777
	}
778

779
	// Slice of buffers being checked.
780
	toCheck := shards[r.dataShards:]
781

782
	// Do the checking.
783
	return r.checkSomeShards(r.parity, shards[:r.dataShards], toCheck[:r.parityShards], len(shards[0])), nil
784
}
785

786
func (r *reedSolomon) canAVX2C(byteCount int, inputs, outputs int) bool {
787
	return avx2CodeGen && r.o.useAVX2 &&
788
		byteCount >= avx2CodeGenMinSize && inputs+outputs >= avx2CodeGenMinShards &&
789
		inputs <= maxAvx2Inputs && outputs <= maxAvx2Outputs
790
}
791

792
func (r *reedSolomon) canGFNI(byteCount int, inputs, outputs int) bool {
793
	return avx2CodeGen && r.o.useGFNI &&
794
		byteCount >= avx2CodeGenMinSize && inputs+outputs >= avx2CodeGenMinShards &&
795
		inputs <= maxAvx2Inputs && outputs <= maxAvx2Outputs
796
}
797

798
// Multiplies a subset of rows from a coding matrix by a full set of
799
// input totalShards to produce some output totalShards.
800
// 'matrixRows' is The rows from the matrix to use.
801
// 'inputs' An array of byte arrays, each of which is one input shard.
802
// The number of inputs used is determined by the length of each matrix row.
803
// outputs Byte arrays where the computed totalShards are stored.
804
// The number of outputs computed, and the
805
// number of matrix rows used, is determined by
806
// outputCount, which is the number of outputs to compute.
807
func (r *reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, byteCount int) {
808
	if len(outputs) == 0 {
809
		return
810
	}
811
	if byteCount > r.o.minSplitSize {
812
		r.codeSomeShardsP(matrixRows, inputs, outputs, byteCount)
813
		return
814
	}
815

816
	// Process using no goroutines
817
	start, end := 0, r.o.perRound
818
	if end > len(inputs[0]) {
819
		end = len(inputs[0])
820
	}
821
	if r.canGFNI(byteCount, len(inputs), len(outputs)) {
822
		var gfni [maxAvx2Inputs * maxAvx2Outputs]uint64
823
		m := genGFNIMatrix(matrixRows, len(inputs), 0, len(outputs), gfni[:])
824
		start += galMulSlicesGFNI(m, inputs, outputs, 0, byteCount)
825
		end = len(inputs[0])
826
	} else if r.canAVX2C(byteCount, len(inputs), len(outputs)) {
827
		m := genAvx2Matrix(matrixRows, len(inputs), 0, len(outputs), r.getTmpSlice())
828
		start += galMulSlicesAvx2(m, inputs, outputs, 0, byteCount)
829
		r.putTmpSlice(m)
830
		end = len(inputs[0])
831
	} else if len(inputs)+len(outputs) > avx2CodeGenMinShards && r.canAVX2C(byteCount, maxAvx2Inputs, maxAvx2Outputs) {
832
		var gfni [maxAvx2Inputs * maxAvx2Outputs]uint64
833
		end = len(inputs[0])
834
		inIdx := 0
835
		m := r.getTmpSlice()
836
		defer r.putTmpSlice(m)
837
		ins := inputs
838
		for len(ins) > 0 {
839
			inPer := ins
840
			if len(inPer) > maxAvx2Inputs {
841
				inPer = inPer[:maxAvx2Inputs]
842
			}
843
			outs := outputs
844
			outIdx := 0
845
			for len(outs) > 0 {
846
				outPer := outs
847
				if len(outPer) > maxAvx2Outputs {
848
					outPer = outPer[:maxAvx2Outputs]
849
				}
850
				if r.o.useGFNI {
851
					m := genGFNIMatrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), gfni[:])
852
					if inIdx == 0 {
853
						galMulSlicesGFNI(m, inPer, outPer, 0, byteCount)
854
					} else {
855
						galMulSlicesGFNIXor(m, inPer, outPer, 0, byteCount)
856
					}
857
				} else {
858
					m = genAvx2Matrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), m)
859
					if inIdx == 0 {
860
						galMulSlicesAvx2(m, inPer, outPer, 0, byteCount)
861
					} else {
862
						galMulSlicesAvx2Xor(m, inPer, outPer, 0, byteCount)
863
					}
864
				}
865
				start = byteCount & avxSizeMask
866
				outIdx += len(outPer)
867
				outs = outs[len(outPer):]
868
			}
869
			inIdx += len(inPer)
870
			ins = ins[len(inPer):]
871
		}
872
		if start >= end {
873
			return
874
		}
875
	}
876
	for start < len(inputs[0]) {
877
		for c := 0; c < len(inputs); c++ {
878
			in := inputs[c][start:end]
879
			for iRow := 0; iRow < len(outputs); iRow++ {
880
				if c == 0 {
881
					galMulSlice(matrixRows[iRow][c], in, outputs[iRow][start:end], &r.o)
882
				} else {
883
					galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][start:end], &r.o)
884
				}
885
			}
886
		}
887
		start = end
888
		end += r.o.perRound
889
		if end > len(inputs[0]) {
890
			end = len(inputs[0])
891
		}
892
	}
893
}
894

895
// Perform the same as codeSomeShards, but split the workload into
896
// several goroutines.
897
func (r *reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, byteCount int) {
898
	var wg sync.WaitGroup
899
	gor := r.o.maxGoroutines
900

901
	var avx2Matrix []byte
902
	var gfniMatrix []uint64
903
	useAvx2 := r.canAVX2C(byteCount, len(inputs), len(outputs))
904
	useGFNI := r.canGFNI(byteCount, len(inputs), len(outputs))
905
	if useGFNI {
906
		var tmp [maxAvx2Inputs * maxAvx2Outputs]uint64
907
		gfniMatrix = genGFNIMatrix(matrixRows, len(inputs), 0, len(outputs), tmp[:])
908
	} else if useAvx2 {
909
		avx2Matrix = genAvx2Matrix(matrixRows, len(inputs), 0, len(outputs), r.getTmpSlice())
910
		defer r.putTmpSlice(avx2Matrix)
911
	} else if r.o.useGFNI && byteCount < 10<<20 && len(inputs)+len(outputs) > avx2CodeGenMinShards &&
912
		r.canAVX2C(byteCount/4, maxAvx2Inputs, maxAvx2Outputs) {
913
		// It appears there is a switchover point at around 10MB where
914
		// Regular processing is faster...
915
		r.codeSomeShardsAVXP(matrixRows, inputs, outputs, byteCount)
916
		return
917
	} else if r.o.useAVX2 && byteCount < 10<<20 && len(inputs)+len(outputs) > avx2CodeGenMinShards &&
918
		r.canAVX2C(byteCount/4, maxAvx2Inputs, maxAvx2Outputs) {
919
		// It appears there is a switchover point at around 10MB where
920
		// Regular processing is faster...
921
		r.codeSomeShardsAVXP(matrixRows, inputs, outputs, byteCount)
922
		return
923
	}
924

925
	do := byteCount / gor
926
	if do < r.o.minSplitSize {
927
		do = r.o.minSplitSize
928
	}
929

930
	exec := func(start, stop int) {
931
		if stop-start >= 64 {
932
			if useGFNI {
933
				start += galMulSlicesGFNI(gfniMatrix, inputs, outputs, start, stop)
934
			} else if useAvx2 {
935
				start += galMulSlicesAvx2(avx2Matrix, inputs, outputs, start, stop)
936
			}
937
		}
938

939
		lstart, lstop := start, start+r.o.perRound
940
		if lstop > stop {
941
			lstop = stop
942
		}
943
		for lstart < stop {
944
			for c := 0; c < len(inputs); c++ {
945
				in := inputs[c][lstart:lstop]
946
				for iRow := 0; iRow < len(outputs); iRow++ {
947
					if c == 0 {
948
						galMulSlice(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
949
					} else {
950
						galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
951
					}
952
				}
953
			}
954
			lstart = lstop
955
			lstop += r.o.perRound
956
			if lstop > stop {
957
				lstop = stop
958
			}
959
		}
960
		wg.Done()
961
	}
962
	if gor <= 1 {
963
		wg.Add(1)
964
		exec(0, byteCount)
965
		return
966
	}
967

968
	// Make sizes divisible by 64
969
	do = (do + 63) & (^63)
970
	start := 0
971
	for start < byteCount {
972
		if start+do > byteCount {
973
			do = byteCount - start
974
		}
975

976
		wg.Add(1)
977
		go exec(start, start+do)
978
		start += do
979
	}
980
	wg.Wait()
981
}
982

983
// Perform the same as codeSomeShards, but split the workload into
984
// several goroutines.
985
func (r *reedSolomon) codeSomeShardsAVXP(matrixRows, inputs, outputs [][]byte, byteCount int) {
986
	var wg sync.WaitGroup
987
	gor := r.o.maxGoroutines
988

989
	type state struct {
990
		input  [][]byte
991
		output [][]byte
992
		m      []byte
993
		first  bool
994
	}
995
	// Make a plan...
996
	plan := make([]state, 0, ((len(inputs)+maxAvx2Inputs-1)/maxAvx2Inputs)*((len(outputs)+maxAvx2Outputs-1)/maxAvx2Outputs))
997

998
	tmp := r.getTmpSlice()
999
	defer r.putTmpSlice(tmp)
1000

1001
	// Flips between input first to output first.
1002
	// We put the smallest data load in the inner loop.
1003
	if len(inputs) > len(outputs) {
1004
		inIdx := 0
1005
		ins := inputs
1006
		for len(ins) > 0 {
1007
			inPer := ins
1008
			if len(inPer) > maxAvx2Inputs {
1009
				inPer = inPer[:maxAvx2Inputs]
1010
			}
1011
			outs := outputs
1012
			outIdx := 0
1013
			for len(outs) > 0 {
1014
				outPer := outs
1015
				if len(outPer) > maxAvx2Outputs {
1016
					outPer = outPer[:maxAvx2Outputs]
1017
				}
1018
				// Generate local matrix
1019
				m := genAvx2Matrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), tmp)
1020
				tmp = tmp[len(m):]
1021
				plan = append(plan, state{
1022
					input:  inPer,
1023
					output: outPer,
1024
					m:      m,
1025
					first:  inIdx == 0,
1026
				})
1027
				outIdx += len(outPer)
1028
				outs = outs[len(outPer):]
1029
			}
1030
			inIdx += len(inPer)
1031
			ins = ins[len(inPer):]
1032
		}
1033
	} else {
1034
		outs := outputs
1035
		outIdx := 0
1036
		for len(outs) > 0 {
1037
			outPer := outs
1038
			if len(outPer) > maxAvx2Outputs {
1039
				outPer = outPer[:maxAvx2Outputs]
1040
			}
1041

1042
			inIdx := 0
1043
			ins := inputs
1044
			for len(ins) > 0 {
1045
				inPer := ins
1046
				if len(inPer) > maxAvx2Inputs {
1047
					inPer = inPer[:maxAvx2Inputs]
1048
				}
1049
				// Generate local matrix
1050
				m := genAvx2Matrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), tmp)
1051
				tmp = tmp[len(m):]
1052
				//fmt.Println("bytes:", len(inPer)*r.o.perRound, "out:", len(outPer)*r.o.perRound)
1053
				plan = append(plan, state{
1054
					input:  inPer,
1055
					output: outPer,
1056
					m:      m,
1057
					first:  inIdx == 0,
1058
				})
1059
				inIdx += len(inPer)
1060
				ins = ins[len(inPer):]
1061
			}
1062
			outIdx += len(outPer)
1063
			outs = outs[len(outPer):]
1064
		}
1065
	}
1066

1067
	do := byteCount / gor
1068
	if do < r.o.minSplitSize {
1069
		do = r.o.minSplitSize
1070
	}
1071

1072
	exec := func(start, stop int) {
1073
		lstart, lstop := start, start+r.o.perRound
1074
		if lstop > stop {
1075
			lstop = stop
1076
		}
1077
		for lstart < stop {
1078
			if lstop-lstart >= minAvx2Size {
1079
				// Execute plan...
1080
				for _, p := range plan {
1081
					if p.first {
1082
						galMulSlicesAvx2(p.m, p.input, p.output, lstart, lstop)
1083
					} else {
1084
						galMulSlicesAvx2Xor(p.m, p.input, p.output, lstart, lstop)
1085
					}
1086
				}
1087
				lstart += (lstop - lstart) & avxSizeMask
1088
				if lstart == lstop {
1089
					lstop += r.o.perRound
1090
					if lstop > stop {
1091
						lstop = stop
1092
					}
1093
					continue
1094
				}
1095
			}
1096

1097
			for c := range inputs {
1098
				in := inputs[c][lstart:lstop]
1099
				for iRow := 0; iRow < len(outputs); iRow++ {
1100
					if c == 0 {
1101
						galMulSlice(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
1102
					} else {
1103
						galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
1104
					}
1105
				}
1106
			}
1107
			lstart = lstop
1108
			lstop += r.o.perRound
1109
			if lstop > stop {
1110
				lstop = stop
1111
			}
1112
		}
1113
		wg.Done()
1114
	}
1115
	if gor == 1 {
1116
		wg.Add(1)
1117
		exec(0, byteCount)
1118
		return
1119
	}
1120

1121
	// Make sizes divisible by 64
1122
	do = (do + 63) & (^63)
1123
	start := 0
1124
	for start < byteCount {
1125
		if start+do > byteCount {
1126
			do = byteCount - start
1127
		}
1128

1129
		wg.Add(1)
1130
		go exec(start, start+do)
1131
		start += do
1132
	}
1133
	wg.Wait()
1134
}
1135

1136
// Perform the same as codeSomeShards, but split the workload into
1137
// several goroutines.
1138
func (r *reedSolomon) codeSomeShardsGFNI(matrixRows, inputs, outputs [][]byte, byteCount int) {
1139
	var wg sync.WaitGroup
1140
	gor := r.o.maxGoroutines
1141

1142
	type state struct {
1143
		input  [][]byte
1144
		output [][]byte
1145
		m      []uint64
1146
		first  bool
1147
	}
1148
	// Make a plan...
1149
	plan := make([]state, 0, ((len(inputs)+maxAvx2Inputs-1)/maxAvx2Inputs)*((len(outputs)+maxAvx2Outputs-1)/maxAvx2Outputs))
1150

1151
	// Flips between input first to output first.
1152
	// We put the smallest data load in the inner loop.
1153
	if len(inputs) > len(outputs) {
1154
		inIdx := 0
1155
		ins := inputs
1156
		for len(ins) > 0 {
1157
			inPer := ins
1158
			if len(inPer) > maxAvx2Inputs {
1159
				inPer = inPer[:maxAvx2Inputs]
1160
			}
1161
			outs := outputs
1162
			outIdx := 0
1163
			for len(outs) > 0 {
1164
				outPer := outs
1165
				if len(outPer) > maxAvx2Outputs {
1166
					outPer = outPer[:maxAvx2Outputs]
1167
				}
1168
				// Generate local matrix
1169
				m := genGFNIMatrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), make([]uint64, len(inPer)*len(outPer)))
1170
				plan = append(plan, state{
1171
					input:  inPer,
1172
					output: outPer,
1173
					m:      m,
1174
					first:  inIdx == 0,
1175
				})
1176
				outIdx += len(outPer)
1177
				outs = outs[len(outPer):]
1178
			}
1179
			inIdx += len(inPer)
1180
			ins = ins[len(inPer):]
1181
		}
1182
	} else {
1183
		outs := outputs
1184
		outIdx := 0
1185
		for len(outs) > 0 {
1186
			outPer := outs
1187
			if len(outPer) > maxAvx2Outputs {
1188
				outPer = outPer[:maxAvx2Outputs]
1189
			}
1190

1191
			inIdx := 0
1192
			ins := inputs
1193
			for len(ins) > 0 {
1194
				inPer := ins
1195
				if len(inPer) > maxAvx2Inputs {
1196
					inPer = inPer[:maxAvx2Inputs]
1197
				}
1198
				// Generate local matrix
1199
				m := genGFNIMatrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), make([]uint64, len(inPer)*len(outPer)))
1200
				//fmt.Println("bytes:", len(inPer)*r.o.perRound, "out:", len(outPer)*r.o.perRound)
1201
				plan = append(plan, state{
1202
					input:  inPer,
1203
					output: outPer,
1204
					m:      m,
1205
					first:  inIdx == 0,
1206
				})
1207
				inIdx += len(inPer)
1208
				ins = ins[len(inPer):]
1209
			}
1210
			outIdx += len(outPer)
1211
			outs = outs[len(outPer):]
1212
		}
1213
	}
1214

1215
	do := byteCount / gor
1216
	if do < r.o.minSplitSize {
1217
		do = r.o.minSplitSize
1218
	}
1219

1220
	exec := func(start, stop int) {
1221
		lstart, lstop := start, start+r.o.perRound
1222
		if lstop > stop {
1223
			lstop = stop
1224
		}
1225
		for lstart < stop {
1226
			if lstop-lstart >= minAvx2Size {
1227
				// Execute plan...
1228
				for _, p := range plan {
1229
					if p.first {
1230
						galMulSlicesGFNI(p.m, p.input, p.output, lstart, lstop)
1231
					} else {
1232
						galMulSlicesGFNIXor(p.m, p.input, p.output, lstart, lstop)
1233
					}
1234
				}
1235
				lstart += (lstop - lstart) & avxSizeMask
1236
				if lstart == lstop {
1237
					lstop += r.o.perRound
1238
					if lstop > stop {
1239
						lstop = stop
1240
					}
1241
					continue
1242
				}
1243
			}
1244

1245
			for c := range inputs {
1246
				in := inputs[c][lstart:lstop]
1247
				for iRow := 0; iRow < len(outputs); iRow++ {
1248
					if c == 0 {
1249
						galMulSlice(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
1250
					} else {
1251
						galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
1252
					}
1253
				}
1254
			}
1255
			lstart = lstop
1256
			lstop += r.o.perRound
1257
			if lstop > stop {
1258
				lstop = stop
1259
			}
1260
		}
1261
		wg.Done()
1262
	}
1263
	if gor == 1 {
1264
		wg.Add(1)
1265
		exec(0, byteCount)
1266
		return
1267
	}
1268

1269
	// Make sizes divisible by 64
1270
	do = (do + 63) & (^63)
1271
	start := 0
1272
	for start < byteCount {
1273
		if start+do > byteCount {
1274
			do = byteCount - start
1275
		}
1276

1277
		wg.Add(1)
1278
		go exec(start, start+do)
1279
		start += do
1280
	}
1281
	wg.Wait()
1282
}
1283

1284
// checkSomeShards is mostly the same as codeSomeShards,
1285
// except this will check values and return
1286
// as soon as a difference is found.
1287
func (r *reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, byteCount int) bool {
1288
	if len(toCheck) == 0 {
1289
		return true
1290
	}
1291

1292
	outputs := AllocAligned(len(toCheck), byteCount)
1293
	r.codeSomeShards(matrixRows, inputs, outputs, byteCount)
1294

1295
	for i, calc := range outputs {
1296
		if !bytes.Equal(calc, toCheck[i]) {
1297
			return false
1298
		}
1299
	}
1300
	return true
1301
}
1302

1303
// ErrShardNoData will be returned if there are no shards,
1304
// or if the length of all shards is zero.
1305
var ErrShardNoData = errors.New("no shard data")
1306

1307
// ErrShardSize is returned if shard length isn't the same for all
1308
// shards.
1309
var ErrShardSize = errors.New("shard sizes do not match")
1310

1311
// checkShards will check if shards are the same size
1312
// or 0, if allowed. An error is returned if this fails.
1313
// An error is also returned if all shards are size 0.
1314
func checkShards(shards [][]byte, nilok bool) error {
1315
	size := shardSize(shards)
1316
	if size == 0 {
1317
		return ErrShardNoData
1318
	}
1319
	for _, shard := range shards {
1320
		if len(shard) != size {
1321
			if len(shard) != 0 || !nilok {
1322
				return ErrShardSize
1323
			}
1324
		}
1325
	}
1326
	return nil
1327
}
1328

1329
// shardSize return the size of a single shard.
1330
// The first non-zero size is returned,
1331
// or 0 if all shards are size 0.
1332
func shardSize(shards [][]byte) int {
1333
	for _, shard := range shards {
1334
		if len(shard) != 0 {
1335
			return len(shard)
1336
		}
1337
	}
1338
	return 0
1339
}
1340

1341
// Reconstruct will recreate the missing shards, if possible.
1342
//
1343
// Given a list of shards, some of which contain data, fills in the
1344
// ones that don't have data.
1345
//
1346
// The length of the array must be equal to shards.
1347
// You indicate that a shard is missing by setting it to nil or zero-length.
1348
// If a shard is zero-length but has sufficient capacity, that memory will
1349
// be used, otherwise a new []byte will be allocated.
1350
//
1351
// If there are too few shards to reconstruct the missing
1352
// ones, ErrTooFewShards will be returned.
1353
//
1354
// The reconstructed shard set is complete, but integrity is not verified.
1355
// Use the Verify function to check if data set is ok.
1356
func (r *reedSolomon) Reconstruct(shards [][]byte) error {
1357
	return r.reconstruct(shards, false, nil)
1358
}
1359

1360
// ReconstructData will recreate any missing data shards, if possible.
1361
//
1362
// Given a list of shards, some of which contain data, fills in the
1363
// data shards that don't have data.
1364
//
1365
// The length of the array must be equal to shards.
1366
// You indicate that a shard is missing by setting it to nil or zero-length.
1367
// If a shard is zero-length but has sufficient capacity, that memory will
1368
// be used, otherwise a new []byte will be allocated.
1369
//
1370
// If there are too few shards to reconstruct the missing
1371
// ones, ErrTooFewShards will be returned.
1372
//
1373
// As the reconstructed shard set may contain missing parity shards,
1374
// calling the Verify function is likely to fail.
1375
func (r *reedSolomon) ReconstructData(shards [][]byte) error {
1376
	return r.reconstruct(shards, true, nil)
1377
}
1378

1379
// ReconstructSome will recreate only requested data shards, if possible.
1380
//
1381
// Given a list of shards, some of which contain data, fills in the
1382
// data shards indicated by true values in the "required" parameter.
1383
// The length of "required" array must be equal to dataShards.
1384
//
1385
// The length of "shards" array must be equal to shards.
1386
// You indicate that a shard is missing by setting it to nil or zero-length.
1387
// If a shard is zero-length but has sufficient capacity, that memory will
1388
// be used, otherwise a new []byte will be allocated.
1389
//
1390
// If there are too few shards to reconstruct the missing
1391
// ones, ErrTooFewShards will be returned.
1392
//
1393
// As the reconstructed shard set may contain missing parity shards,
1394
// calling the Verify function is likely to fail.
1395
func (r *reedSolomon) ReconstructSome(shards [][]byte, required []bool) error {
1396
	return r.reconstruct(shards, true, required)
1397
}
1398

1399
// reconstruct will recreate the missing data totalShards, and unless
1400
// dataOnly is true, also the missing parity totalShards
1401
//
1402
// The length of "shards" array must be equal to totalShards.
1403
// You indicate that a shard is missing by setting it to nil.
1404
//
1405
// If there are too few totalShards to reconstruct the missing
1406
// ones, ErrTooFewShards will be returned.
1407
func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []bool) error {
1408
	if len(shards) != r.totalShards || required != nil && len(required) < r.dataShards {
1409
		return ErrTooFewShards
1410
	}
1411
	// Check arguments.
1412
	err := checkShards(shards, true)
1413
	if err != nil {
1414
		return err
1415
	}
1416

1417
	shardSize := shardSize(shards)
1418

1419
	// Quick check: are all of the shards present?  If so, there's
1420
	// nothing to do.
1421
	numberPresent := 0
1422
	dataPresent := 0
1423
	missingRequired := 0
1424
	for i := 0; i < r.totalShards; i++ {
1425
		if len(shards[i]) != 0 {
1426
			numberPresent++
1427
			if i < r.dataShards {
1428
				dataPresent++
1429
			}
1430
		} else if required != nil && required[i] {
1431
			missingRequired++
1432
		}
1433
	}
1434
	if numberPresent == r.totalShards || dataOnly && dataPresent == r.dataShards ||
1435
		required != nil && missingRequired == 0 {
1436
		// Cool. All of the shards have data. We don't
1437
		// need to do anything.
1438
		return nil
1439
	}
1440

1441
	// More complete sanity check
1442
	if numberPresent < r.dataShards {
1443
		return ErrTooFewShards
1444
	}
1445

1446
	// Pull out an array holding just the shards that
1447
	// correspond to the rows of the submatrix.  These shards
1448
	// will be the input to the decoding process that re-creates
1449
	// the missing data shards.
1450
	//
1451
	// Also, create an array of indices of the valid rows we do have
1452
	// and the invalid rows we don't have up until we have enough valid rows.
1453
	subShards := make([][]byte, r.dataShards)
1454
	validIndices := make([]int, r.dataShards)
1455
	invalidIndices := make([]int, 0)
1456
	subMatrixRow := 0
1457
	for matrixRow := 0; matrixRow < r.totalShards && subMatrixRow < r.dataShards; matrixRow++ {
1458
		if len(shards[matrixRow]) != 0 {
1459
			subShards[subMatrixRow] = shards[matrixRow]
1460
			validIndices[subMatrixRow] = matrixRow
1461
			subMatrixRow++
1462
		} else {
1463
			invalidIndices = append(invalidIndices, matrixRow)
1464
		}
1465
	}
1466

1467
	// Attempt to get the cached inverted matrix out of the tree
1468
	// based on the indices of the invalid rows.
1469
	dataDecodeMatrix := r.tree.GetInvertedMatrix(invalidIndices)
1470

1471
	// If the inverted matrix isn't cached in the tree yet we must
1472
	// construct it ourselves and insert it into the tree for the
1473
	// future.  In this way the inversion tree is lazily loaded.
1474
	if dataDecodeMatrix == nil {
1475
		// Pull out the rows of the matrix that correspond to the
1476
		// shards that we have and build a square matrix.  This
1477
		// matrix could be used to generate the shards that we have
1478
		// from the original data.
1479
		subMatrix, _ := newMatrix(r.dataShards, r.dataShards)
1480
		for subMatrixRow, validIndex := range validIndices {
1481
			for c := 0; c < r.dataShards; c++ {
1482
				subMatrix[subMatrixRow][c] = r.m[validIndex][c]
1483
			}
1484
		}
1485
		// Invert the matrix, so we can go from the encoded shards
1486
		// back to the original data.  Then pull out the row that
1487
		// generates the shard that we want to decode.  Note that
1488
		// since this matrix maps back to the original data, it can
1489
		// be used to create a data shard, but not a parity shard.
1490
		dataDecodeMatrix, err = subMatrix.Invert()
1491
		if err != nil {
1492
			return err
1493
		}
1494

1495
		// Cache the inverted matrix in the tree for future use keyed on the
1496
		// indices of the invalid rows.
1497
		err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.totalShards)
1498
		if err != nil {
1499
			return err
1500
		}
1501
	}
1502

1503
	// Re-create any data shards that were missing.
1504
	//
1505
	// The input to the coding is all of the shards we actually
1506
	// have, and the output is the missing data shards.  The computation
1507
	// is done using the special decode matrix we just built.
1508
	outputs := make([][]byte, r.parityShards)
1509
	matrixRows := make([][]byte, r.parityShards)
1510
	outputCount := 0
1511

1512
	for iShard := 0; iShard < r.dataShards; iShard++ {
1513
		if len(shards[iShard]) == 0 && (required == nil || required[iShard]) {
1514
			if cap(shards[iShard]) >= shardSize {
1515
				shards[iShard] = shards[iShard][0:shardSize]
1516
			} else {
1517
				shards[iShard] = AllocAligned(1, shardSize)[0]
1518
			}
1519
			outputs[outputCount] = shards[iShard]
1520
			matrixRows[outputCount] = dataDecodeMatrix[iShard]
1521
			outputCount++
1522
		}
1523
	}
1524
	r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], shardSize)
1525

1526
	if dataOnly {
1527
		// Exit out early if we are only interested in the data shards
1528
		return nil
1529
	}
1530

1531
	// Now that we have all of the data shards intact, we can
1532
	// compute any of the parity that is missing.
1533
	//
1534
	// The input to the coding is ALL of the data shards, including
1535
	// any that we just calculated.  The output is whichever of the
1536
	// data shards were missing.
1537
	outputCount = 0
1538
	for iShard := r.dataShards; iShard < r.totalShards; iShard++ {
1539
		if len(shards[iShard]) == 0 && (required == nil || required[iShard]) {
1540
			if cap(shards[iShard]) >= shardSize {
1541
				shards[iShard] = shards[iShard][0:shardSize]
1542
			} else {
1543
				shards[iShard] = AllocAligned(1, shardSize)[0]
1544
			}
1545
			outputs[outputCount] = shards[iShard]
1546
			matrixRows[outputCount] = r.parity[iShard-r.dataShards]
1547
			outputCount++
1548
		}
1549
	}
1550
	r.codeSomeShards(matrixRows, shards[:r.dataShards], outputs[:outputCount], shardSize)
1551
	return nil
1552
}
1553

1554
// ErrShortData will be returned by Split(), if there isn't enough data
1555
// to fill the number of shards.
1556
var ErrShortData = errors.New("not enough data to fill the number of requested shards")
1557

1558
// Split a data slice into the number of shards given to the encoder,
1559
// and create empty parity shards if necessary.
1560
//
1561
// The data will be split into equally sized shards.
1562
// If the data size isn't divisible by the number of shards,
1563
// the last shard will contain extra zeros.
1564
//
1565
// If there is extra capacity on the provided data slice
1566
// it will be used instead of allocating parity shards.
1567
// It will be zeroed out.
1568
//
1569
// There must be at least 1 byte otherwise ErrShortData will be
1570
// returned.
1571
//
1572
// The data will not be copied, except for the last shard, so you
1573
// should not modify the data of the input slice afterwards.
1574
func (r *reedSolomon) Split(data []byte) ([][]byte, error) {
1575
	if len(data) == 0 {
1576
		return nil, ErrShortData
1577
	}
1578
	if r.totalShards == 1 {
1579
		return [][]byte{data}, nil
1580
	}
1581

1582
	dataLen := len(data)
1583
	// Calculate number of bytes per data shard.
1584
	perShard := (len(data) + r.dataShards - 1) / r.dataShards
1585
	needTotal := r.totalShards * perShard
1586

1587
	if cap(data) > len(data) {
1588
		if cap(data) > needTotal {
1589
			data = data[:needTotal]
1590
		} else {
1591
			data = data[:cap(data)]
1592
		}
1593
		clear := data[dataLen:]
1594
		for i := range clear {
1595
			clear[i] = 0
1596
		}
1597
	}
1598

1599
	// Only allocate memory if necessary
1600
	var padding [][]byte
1601
	if len(data) < needTotal {
1602
		// calculate maximum number of full shards in `data` slice
1603
		fullShards := len(data) / perShard
1604
		padding = AllocAligned(r.totalShards-fullShards, perShard)
1605

1606
		if dataLen > perShard*fullShards {
1607
			// Copy partial shards
1608
			copyFrom := data[perShard*fullShards : dataLen]
1609
			for i := range padding {
1610
				if len(copyFrom) <= 0 {
1611
					break
1612
				}
1613
				copyFrom = copyFrom[copy(padding[i], copyFrom):]
1614
			}
1615
		}
1616
	}
1617

1618
	// Split into equal-length shards.
1619
	dst := make([][]byte, r.totalShards)
1620
	i := 0
1621
	for ; i < len(dst) && len(data) >= perShard; i++ {
1622
		dst[i] = data[:perShard:perShard]
1623
		data = data[perShard:]
1624
	}
1625

1626
	for j := 0; i+j < len(dst); j++ {
1627
		dst[i+j] = padding[0]
1628
		padding = padding[1:]
1629
	}
1630

1631
	return dst, nil
1632
}
1633

1634
// ErrReconstructRequired is returned if too few data shards are intact and a
1635
// reconstruction is required before you can successfully join the shards.
1636
var ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
1637

1638
// Join the shards and write the data segment to dst.
1639
//
1640
// Only the data shards are considered.
1641
// You must supply the exact output size you want.
1642
//
1643
// If there are to few shards given, ErrTooFewShards will be returned.
1644
// If the total data size is less than outSize, ErrShortData will be returned.
1645
// If one or more required data shards are nil, ErrReconstructRequired will be returned.
1646
func (r *reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error {
1647
	// Do we have enough shards?
1648
	if len(shards) < r.dataShards {
1649
		return ErrTooFewShards
1650
	}
1651
	shards = shards[:r.dataShards]
1652

1653
	// Do we have enough data?
1654
	size := 0
1655
	for _, shard := range shards {
1656
		if shard == nil {
1657
			return ErrReconstructRequired
1658
		}
1659
		size += len(shard)
1660

1661
		// Do we have enough data already?
1662
		if size >= outSize {
1663
			break
1664
		}
1665
	}
1666
	if size < outSize {
1667
		return ErrShortData
1668
	}
1669

1670
	// Copy data to dst
1671
	write := outSize
1672
	for _, shard := range shards {
1673
		if write < len(shard) {
1674
			_, err := dst.Write(shard[:write])
1675
			return err
1676
		}
1677
		n, err := dst.Write(shard)
1678
		if err != nil {
1679
			return err
1680
		}
1681
		write -= n
1682
	}
1683
	return nil
1684
}
1685

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.