cubefs

Форк
0
1259 строк · 26.6 Кб
1
package reedsolomon
2

3
// This is a O(n*log n) implementation of Reed-Solomon
4
// codes, ported from the C++ library https://github.com/catid/leopard.
5
//
6
// The implementation is based on the paper
7
//
8
// S.-J. Lin, T. Y. Al-Naffouri, Y. S. Han, and W.-H. Chung,
9
// "Novel Polynomial Basis with Fast Fourier Transform
10
// and Its Application to Reed-Solomon Erasure Codes"
11
// IEEE Trans. on Information Theory, pp. 6284-6299, November, 2016.
12

13
import (
14
	"bytes"
15
	"io"
16
	"math/bits"
17
	"sync"
18
	"unsafe"
19

20
	"github.com/klauspost/cpuid/v2"
21
)
22

23
// leopardFF16 is like reedSolomon but for more than 256 total shards.
24
type leopardFF16 struct {
25
	dataShards   int // Number of data shards, should not be modified.
26
	parityShards int // Number of parity shards, should not be modified.
27
	totalShards  int // Total number of shards. Calculated, and should not be modified.
28

29
	workPool sync.Pool
30

31
	o options
32
}
33

34
// newFF16 is like New, but for more than 256 total shards.
35
func newFF16(dataShards, parityShards int, opt options) (*leopardFF16, error) {
36
	initConstants()
37

38
	if dataShards <= 0 || parityShards <= 0 {
39
		return nil, ErrInvShardNum
40
	}
41

42
	if dataShards+parityShards > 65536 {
43
		return nil, ErrMaxShardNum
44
	}
45

46
	r := &leopardFF16{
47
		dataShards:   dataShards,
48
		parityShards: parityShards,
49
		totalShards:  dataShards + parityShards,
50
		o:            opt,
51
	}
52
	return r, nil
53
}
54

55
var _ = Extensions(&leopardFF16{})
56

57
func (r *leopardFF16) ShardSizeMultiple() int {
58
	return 64
59
}
60

61
func (r *leopardFF16) DataShards() int {
62
	return r.dataShards
63
}
64

65
func (r *leopardFF16) ParityShards() int {
66
	return r.parityShards
67
}
68

69
func (r *leopardFF16) TotalShards() int {
70
	return r.totalShards
71
}
72

73
func (r *leopardFF16) AllocAligned(each int) [][]byte {
74
	return AllocAligned(r.totalShards, each)
75
}
76

77
type ffe uint16
78

79
const (
80
	bitwidth   = 16
81
	order      = 1 << bitwidth
82
	modulus    = order - 1
83
	polynomial = 0x1002D
84
)
85

86
var (
87
	fftSkew  *[modulus]ffe
88
	logWalsh *[order]ffe
89
)
90

91
// Logarithm Tables
92
var (
93
	logLUT *[order]ffe
94
	expLUT *[order]ffe
95
)
96

97
// Stores the partial products of x * y at offset x + y * 65536
98
// Repeated accesses from the same y value are faster
99
var mul16LUTs *[order]mul16LUT
100

101
type mul16LUT struct {
102
	// Contains Lo product as a single lookup.
103
	// Should be XORed with Hi lookup for result.
104
	Lo [256]ffe
105
	Hi [256]ffe
106
}
107

108
// Stores lookup for avx2
109
var multiply256LUT *[order][8 * 16]byte
110

111
func (r *leopardFF16) Encode(shards [][]byte) error {
112
	if len(shards) != r.totalShards {
113
		return ErrTooFewShards
114
	}
115

116
	if err := checkShards(shards, false); err != nil {
117
		return err
118
	}
119
	return r.encode(shards)
120
}
121

122
func (r *leopardFF16) encode(shards [][]byte) error {
123
	shardSize := shardSize(shards)
124
	if shardSize%64 != 0 {
125
		return ErrShardSize
126
	}
127

128
	m := ceilPow2(r.parityShards)
129
	var work [][]byte
130
	if w, ok := r.workPool.Get().([][]byte); ok {
131
		work = w
132
	}
133
	if cap(work) >= m*2 {
134
		work = work[:m*2]
135
	} else {
136
		work = AllocAligned(m*2, shardSize)
137
	}
138
	for i := range work {
139
		if cap(work[i]) < shardSize {
140
			work[i] = AllocAligned(1, shardSize)[0]
141
		} else {
142
			work[i] = work[i][:shardSize]
143
		}
144
	}
145
	defer r.workPool.Put(work)
146

147
	mtrunc := m
148
	if r.dataShards < mtrunc {
149
		mtrunc = r.dataShards
150
	}
151

152
	skewLUT := fftSkew[m-1:]
153

154
	sh := shards
155
	ifftDITEncoder(
156
		sh[:r.dataShards],
157
		mtrunc,
158
		work,
159
		nil, // No xor output
160
		m,
161
		skewLUT,
162
		&r.o,
163
	)
164

165
	lastCount := r.dataShards % m
166
	if m >= r.dataShards {
167
		goto skip_body
168
	}
169

170
	// For sets of m data pieces:
171
	for i := m; i+m <= r.dataShards; i += m {
172
		sh = sh[m:]
173
		skewLUT = skewLUT[m:]
174

175
		// work <- work xor IFFT(data + i, m, m + i)
176

177
		ifftDITEncoder(
178
			sh, // data source
179
			m,
180
			work[m:], // temporary workspace
181
			work,     // xor destination
182
			m,
183
			skewLUT,
184
			&r.o,
185
		)
186
	}
187

188
	// Handle final partial set of m pieces:
189
	if lastCount != 0 {
190
		sh = sh[m:]
191
		skewLUT = skewLUT[m:]
192

193
		// work <- work xor IFFT(data + i, m, m + i)
194

195
		ifftDITEncoder(
196
			sh, // data source
197
			lastCount,
198
			work[m:], // temporary workspace
199
			work,     // xor destination
200
			m,
201
			skewLUT,
202
			&r.o,
203
		)
204
	}
205

206
skip_body:
207
	// work <- FFT(work, m, 0)
208
	fftDIT(work, r.parityShards, m, fftSkew[:], &r.o)
209

210
	for i, w := range work[:r.parityShards] {
211
		sh := shards[i+r.dataShards]
212
		if cap(sh) >= shardSize {
213
			sh = append(sh[:0], w...)
214
		} else {
215
			sh = w
216
		}
217
		shards[i+r.dataShards] = sh
218
	}
219

220
	return nil
221
}
222

223
func (r *leopardFF16) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error {
224
	return ErrNotSupported
225
}
226

227
func (r *leopardFF16) Join(dst io.Writer, shards [][]byte, outSize int) error {
228
	// Do we have enough shards?
229
	if len(shards) < r.dataShards {
230
		return ErrTooFewShards
231
	}
232
	shards = shards[:r.dataShards]
233

234
	// Do we have enough data?
235
	size := 0
236
	for _, shard := range shards {
237
		if shard == nil {
238
			return ErrReconstructRequired
239
		}
240
		size += len(shard)
241

242
		// Do we have enough data already?
243
		if size >= outSize {
244
			break
245
		}
246
	}
247
	if size < outSize {
248
		return ErrShortData
249
	}
250

251
	// Copy data to dst
252
	write := outSize
253
	for _, shard := range shards {
254
		if write < len(shard) {
255
			_, err := dst.Write(shard[:write])
256
			return err
257
		}
258
		n, err := dst.Write(shard)
259
		if err != nil {
260
			return err
261
		}
262
		write -= n
263
	}
264
	return nil
265
}
266

267
func (r *leopardFF16) Update(shards [][]byte, newDatashards [][]byte) error {
268
	return ErrNotSupported
269
}
270

271
func (r *leopardFF16) Split(data []byte) ([][]byte, error) {
272
	if len(data) == 0 {
273
		return nil, ErrShortData
274
	}
275
	if r.totalShards == 1 && len(data)&63 == 0 {
276
		return [][]byte{data}, nil
277
	}
278
	dataLen := len(data)
279
	// Calculate number of bytes per data shard.
280
	perShard := (len(data) + r.dataShards - 1) / r.dataShards
281
	perShard = ((perShard + 63) / 64) * 64
282
	needTotal := r.totalShards * perShard
283

284
	if cap(data) > len(data) {
285
		if cap(data) > needTotal {
286
			data = data[:needTotal]
287
		} else {
288
			data = data[:cap(data)]
289
		}
290
		clear := data[dataLen:]
291
		for i := range clear {
292
			clear[i] = 0
293
		}
294
	}
295

296
	// Only allocate memory if necessary
297
	var padding [][]byte
298
	if len(data) < needTotal {
299
		// calculate maximum number of full shards in `data` slice
300
		fullShards := len(data) / perShard
301
		padding = AllocAligned(r.totalShards-fullShards, perShard)
302
		if dataLen > perShard*fullShards {
303
			// Copy partial shards
304
			copyFrom := data[perShard*fullShards : dataLen]
305
			for i := range padding {
306
				if len(copyFrom) <= 0 {
307
					break
308
				}
309
				copyFrom = copyFrom[copy(padding[i], copyFrom):]
310
			}
311
		}
312
	} else {
313
		zero := data[dataLen : r.totalShards*perShard]
314
		for i := range zero {
315
			zero[i] = 0
316
		}
317
	}
318

319
	// Split into equal-length shards.
320
	dst := make([][]byte, r.totalShards)
321
	i := 0
322
	for ; i < len(dst) && len(data) >= perShard; i++ {
323
		dst[i] = data[:perShard:perShard]
324
		data = data[perShard:]
325
	}
326

327
	for j := 0; i+j < len(dst); j++ {
328
		dst[i+j] = padding[0]
329
		padding = padding[1:]
330
	}
331

332
	return dst, nil
333
}
334

335
func (r *leopardFF16) ReconstructSome(shards [][]byte, required []bool) error {
336
	return r.ReconstructData(shards)
337
}
338

339
func (r *leopardFF16) Reconstruct(shards [][]byte) error {
340
	return r.reconstruct(shards, true)
341
}
342

343
func (r *leopardFF16) ReconstructData(shards [][]byte) error {
344
	return r.reconstruct(shards, false)
345
}
346

347
func (r *leopardFF16) Verify(shards [][]byte) (bool, error) {
348
	if len(shards) != r.totalShards {
349
		return false, ErrTooFewShards
350
	}
351
	if err := checkShards(shards, false); err != nil {
352
		return false, err
353
	}
354

355
	// Re-encode parity shards to temporary storage.
356
	shardSize := len(shards[0])
357
	outputs := make([][]byte, r.totalShards)
358
	copy(outputs, shards[:r.dataShards])
359
	for i := r.dataShards; i < r.totalShards; i++ {
360
		outputs[i] = make([]byte, shardSize)
361
	}
362
	if err := r.Encode(outputs); err != nil {
363
		return false, err
364
	}
365

366
	// Compare.
367
	for i := r.dataShards; i < r.totalShards; i++ {
368
		if !bytes.Equal(outputs[i], shards[i]) {
369
			return false, nil
370
		}
371
	}
372
	return true, nil
373
}
374

375
func (r *leopardFF16) reconstruct(shards [][]byte, recoverAll bool) error {
376
	if len(shards) != r.totalShards {
377
		return ErrTooFewShards
378
	}
379

380
	if err := checkShards(shards, true); err != nil {
381
		return err
382
	}
383

384
	// Quick check: are all of the shards present?  If so, there's
385
	// nothing to do.
386
	numberPresent := 0
387
	dataPresent := 0
388
	for i := 0; i < r.totalShards; i++ {
389
		if len(shards[i]) != 0 {
390
			numberPresent++
391
			if i < r.dataShards {
392
				dataPresent++
393
			}
394
		}
395
	}
396
	if numberPresent == r.totalShards || !recoverAll && dataPresent == r.dataShards {
397
		// Cool. All of the shards have data. We don't
398
		// need to do anything.
399
		return nil
400
	}
401

402
	// Use only if we are missing less than 1/4 parity.
403
	useBits := r.totalShards-numberPresent <= r.parityShards/4
404

405
	// Check if we have enough to reconstruct.
406
	if numberPresent < r.dataShards {
407
		return ErrTooFewShards
408
	}
409

410
	shardSize := shardSize(shards)
411
	if shardSize%64 != 0 {
412
		return ErrShardSize
413
	}
414

415
	m := ceilPow2(r.parityShards)
416
	n := ceilPow2(m + r.dataShards)
417

418
	const LEO_ERROR_BITFIELD_OPT = true
419

420
	// Fill in error locations.
421
	var errorBits errorBitfield
422
	var errLocs [order]ffe
423
	for i := 0; i < r.parityShards; i++ {
424
		if len(shards[i+r.dataShards]) == 0 {
425
			errLocs[i] = 1
426
			if LEO_ERROR_BITFIELD_OPT && recoverAll {
427
				errorBits.set(i)
428
			}
429
		}
430
	}
431
	for i := r.parityShards; i < m; i++ {
432
		errLocs[i] = 1
433
		if LEO_ERROR_BITFIELD_OPT && recoverAll {
434
			errorBits.set(i)
435
		}
436
	}
437
	for i := 0; i < r.dataShards; i++ {
438
		if len(shards[i]) == 0 {
439
			errLocs[i+m] = 1
440
			if LEO_ERROR_BITFIELD_OPT {
441
				errorBits.set(i + m)
442
			}
443
		}
444
	}
445

446
	if LEO_ERROR_BITFIELD_OPT && useBits {
447
		errorBits.prepare()
448
	}
449

450
	// Evaluate error locator polynomial
451
	fwht(&errLocs, order, m+r.dataShards)
452

453
	for i := 0; i < order; i++ {
454
		errLocs[i] = ffe((uint(errLocs[i]) * uint(logWalsh[i])) % modulus)
455
	}
456

457
	fwht(&errLocs, order, order)
458

459
	var work [][]byte
460
	if w, ok := r.workPool.Get().([][]byte); ok {
461
		work = w
462
	}
463
	if cap(work) >= n {
464
		work = work[:n]
465
	} else {
466
		work = make([][]byte, n)
467
	}
468
	for i := range work {
469
		if cap(work[i]) < shardSize {
470
			work[i] = make([]byte, shardSize)
471
		} else {
472
			work[i] = work[i][:shardSize]
473
		}
474
	}
475
	defer r.workPool.Put(work)
476

477
	// work <- recovery data
478

479
	for i := 0; i < r.parityShards; i++ {
480
		if len(shards[i+r.dataShards]) != 0 {
481
			mulgf16(work[i], shards[i+r.dataShards], errLocs[i], &r.o)
482
		} else {
483
			memclr(work[i])
484
		}
485
	}
486
	for i := r.parityShards; i < m; i++ {
487
		memclr(work[i])
488
	}
489

490
	// work <- original data
491

492
	for i := 0; i < r.dataShards; i++ {
493
		if len(shards[i]) != 0 {
494
			mulgf16(work[m+i], shards[i], errLocs[m+i], &r.o)
495
		} else {
496
			memclr(work[m+i])
497
		}
498
	}
499
	for i := m + r.dataShards; i < n; i++ {
500
		memclr(work[i])
501
	}
502

503
	// work <- IFFT(work, n, 0)
504

505
	ifftDITDecoder(
506
		m+r.dataShards,
507
		work,
508
		n,
509
		fftSkew[:],
510
		&r.o,
511
	)
512

513
	// work <- FormalDerivative(work, n)
514

515
	for i := 1; i < n; i++ {
516
		width := ((i ^ (i - 1)) + 1) >> 1
517
		slicesXor(work[i-width:i], work[i:i+width], &r.o)
518
	}
519

520
	// work <- FFT(work, n, 0) truncated to m + dataShards
521

522
	outputCount := m + r.dataShards
523

524
	if LEO_ERROR_BITFIELD_OPT && useBits {
525
		errorBits.fftDIT(work, outputCount, n, fftSkew[:], &r.o)
526
	} else {
527
		fftDIT(work, outputCount, n, fftSkew[:], &r.o)
528
	}
529

530
	// Reveal erasures
531
	//
532
	//  Original = -ErrLocator * FFT( Derivative( IFFT( ErrLocator * ReceivedData ) ) )
533
	//  mul_mem(x, y, log_m, ) equals x[] = y[] * log_m
534
	//
535
	// mem layout: [Recovery Data (Power of Two = M)] [Original Data (K)] [Zero Padding out to N]
536
	end := r.dataShards
537
	if recoverAll {
538
		end = r.totalShards
539
	}
540
	for i := 0; i < end; i++ {
541
		if len(shards[i]) != 0 {
542
			continue
543
		}
544
		if cap(shards[i]) >= shardSize {
545
			shards[i] = shards[i][:shardSize]
546
		} else {
547
			shards[i] = make([]byte, shardSize)
548
		}
549
		if i >= r.dataShards {
550
			// Parity shard.
551
			mulgf16(shards[i], work[i-r.dataShards], modulus-errLocs[i-r.dataShards], &r.o)
552
		} else {
553
			// Data shard.
554
			mulgf16(shards[i], work[i+m], modulus-errLocs[i+m], &r.o)
555
		}
556
	}
557
	return nil
558
}
559

560
// Basic no-frills version for decoder
561
func ifftDITDecoder(mtrunc int, work [][]byte, m int, skewLUT []ffe, o *options) {
562
	// Decimation in time: Unroll 2 layers at a time
563
	dist := 1
564
	dist4 := 4
565
	for dist4 <= m {
566
		// For each set of dist*4 elements:
567
		for r := 0; r < mtrunc; r += dist4 {
568
			iend := r + dist
569
			log_m01 := skewLUT[iend-1]
570
			log_m02 := skewLUT[iend+dist-1]
571
			log_m23 := skewLUT[iend+dist*2-1]
572

573
			// For each set of dist elements:
574
			for i := r; i < iend; i++ {
575
				ifftDIT4(work[i:], dist, log_m01, log_m23, log_m02, o)
576
			}
577
		}
578
		dist = dist4
579
		dist4 <<= 2
580
	}
581

582
	// If there is one layer left:
583
	if dist < m {
584
		// Assuming that dist = m / 2
585
		if dist*2 != m {
586
			panic("internal error")
587
		}
588

589
		log_m := skewLUT[dist-1]
590

591
		if log_m == modulus {
592
			slicesXor(work[dist:2*dist], work[:dist], o)
593
		} else {
594
			for i := 0; i < dist; i++ {
595
				ifftDIT2(
596
					work[i],
597
					work[i+dist],
598
					log_m,
599
					o,
600
				)
601
			}
602
		}
603
	}
604
}
605

606
// In-place FFT for encoder and decoder
607
func fftDIT(work [][]byte, mtrunc, m int, skewLUT []ffe, o *options) {
608
	// Decimation in time: Unroll 2 layers at a time
609
	dist4 := m
610
	dist := m >> 2
611
	for dist != 0 {
612
		// For each set of dist*4 elements:
613
		for r := 0; r < mtrunc; r += dist4 {
614
			iend := r + dist
615
			log_m01 := skewLUT[iend-1]
616
			log_m02 := skewLUT[iend+dist-1]
617
			log_m23 := skewLUT[iend+dist*2-1]
618

619
			// For each set of dist elements:
620
			for i := r; i < iend; i++ {
621
				fftDIT4(
622
					work[i:],
623
					dist,
624
					log_m01,
625
					log_m23,
626
					log_m02,
627
					o,
628
				)
629
			}
630
		}
631
		dist4 = dist
632
		dist >>= 2
633
	}
634

635
	// If there is one layer left:
636
	if dist4 == 2 {
637
		for r := 0; r < mtrunc; r += 2 {
638
			log_m := skewLUT[r+1-1]
639

640
			if log_m == modulus {
641
				sliceXor(work[r], work[r+1], o)
642
			} else {
643
				fftDIT2(work[r], work[r+1], log_m, o)
644
			}
645
		}
646
	}
647
}
648

649
// 4-way butterfly
650
func fftDIT4Ref(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
651
	// First layer:
652
	if log_m02 == modulus {
653
		sliceXor(work[0], work[dist*2], o)
654
		sliceXor(work[dist], work[dist*3], o)
655
	} else {
656
		fftDIT2(work[0], work[dist*2], log_m02, o)
657
		fftDIT2(work[dist], work[dist*3], log_m02, o)
658
	}
659

660
	// Second layer:
661
	if log_m01 == modulus {
662
		sliceXor(work[0], work[dist], o)
663
	} else {
664
		fftDIT2(work[0], work[dist], log_m01, o)
665
	}
666

667
	if log_m23 == modulus {
668
		sliceXor(work[dist*2], work[dist*3], o)
669
	} else {
670
		fftDIT2(work[dist*2], work[dist*3], log_m23, o)
671
	}
672
}
673

674
// Unrolled IFFT for encoder
675
func ifftDITEncoder(data [][]byte, mtrunc int, work [][]byte, xorRes [][]byte, m int, skewLUT []ffe, o *options) {
676
	// I tried rolling the memcpy/memset into the first layer of the FFT and
677
	// found that it only yields a 4% performance improvement, which is not
678
	// worth the extra complexity.
679
	for i := 0; i < mtrunc; i++ {
680
		copy(work[i], data[i])
681
	}
682
	for i := mtrunc; i < m; i++ {
683
		memclr(work[i])
684
	}
685

686
	// I tried splitting up the first few layers into L3-cache sized blocks but
687
	// found that it only provides about 5% performance boost, which is not
688
	// worth the extra complexity.
689

690
	// Decimation in time: Unroll 2 layers at a time
691
	dist := 1
692
	dist4 := 4
693
	for dist4 <= m {
694
		// For each set of dist*4 elements:
695
		for r := 0; r < mtrunc; r += dist4 {
696
			iend := r + dist
697
			log_m01 := skewLUT[iend]
698
			log_m02 := skewLUT[iend+dist]
699
			log_m23 := skewLUT[iend+dist*2]
700

701
			// For each set of dist elements:
702
			for i := r; i < iend; i++ {
703
				ifftDIT4(
704
					work[i:],
705
					dist,
706
					log_m01,
707
					log_m23,
708
					log_m02,
709
					o,
710
				)
711
			}
712
		}
713

714
		dist = dist4
715
		dist4 <<= 2
716
		// I tried alternating sweeps left->right and right->left to reduce cache misses.
717
		// It provides about 1% performance boost when done for both FFT and IFFT, so it
718
		// does not seem to be worth the extra complexity.
719
	}
720

721
	// If there is one layer left:
722
	if dist < m {
723
		// Assuming that dist = m / 2
724
		if dist*2 != m {
725
			panic("internal error")
726
		}
727

728
		logm := skewLUT[dist]
729

730
		if logm == modulus {
731
			slicesXor(work[dist:dist*2], work[:dist], o)
732
		} else {
733
			for i := 0; i < dist; i++ {
734
				ifftDIT2(work[i], work[i+dist], logm, o)
735
			}
736
		}
737
	}
738

739
	// I tried unrolling this but it does not provide more than 5% performance
740
	// improvement for 16-bit finite fields, so it's not worth the complexity.
741
	if xorRes != nil {
742
		slicesXor(xorRes[:m], work[:m], o)
743
	}
744
}
745

746
func ifftDIT4Ref(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
747
	// First layer:
748
	if log_m01 == modulus {
749
		sliceXor(work[0], work[dist], o)
750
	} else {
751
		ifftDIT2(work[0], work[dist], log_m01, o)
752
	}
753

754
	if log_m23 == modulus {
755
		sliceXor(work[dist*2], work[dist*3], o)
756
	} else {
757
		ifftDIT2(work[dist*2], work[dist*3], log_m23, o)
758
	}
759

760
	// Second layer:
761
	if log_m02 == modulus {
762
		sliceXor(work[0], work[dist*2], o)
763
		sliceXor(work[dist], work[dist*3], o)
764
	} else {
765
		ifftDIT2(work[0], work[dist*2], log_m02, o)
766
		ifftDIT2(work[dist], work[dist*3], log_m02, o)
767
	}
768
}
769

770
// Reference version of muladd: x[] ^= y[] * log_m
771
func refMulAdd(x, y []byte, log_m ffe) {
772
	lut := &mul16LUTs[log_m]
773

774
	for len(x) >= 64 {
775
		// Assert sizes for no bounds checks in loop
776
		hiA := y[32:64]
777
		loA := y[:32]
778
		dst := x[:64] // Needed, but not checked...
779
		for i, lo := range loA {
780
			hi := hiA[i]
781
			prod := lut.Lo[lo] ^ lut.Hi[hi]
782

783
			dst[i] ^= byte(prod)
784
			dst[i+32] ^= byte(prod >> 8)
785
		}
786
		x = x[64:]
787
		y = y[64:]
788
	}
789
}
790

791
func memclr(s []byte) {
792
	for i := range s {
793
		s[i] = 0
794
	}
795
}
796

797
// slicesXor calls xor for every slice pair in v1, v2.
798
func slicesXor(v1, v2 [][]byte, o *options) {
799
	for i, v := range v1 {
800
		sliceXor(v2[i], v, o)
801
	}
802
}
803

804
// Reference version of mul: x[] = y[] * log_m
805
func refMul(x, y []byte, log_m ffe) {
806
	lut := &mul16LUTs[log_m]
807

808
	for off := 0; off < len(x); off += 64 {
809
		loA := y[off : off+32]
810
		hiA := y[off+32:]
811
		hiA = hiA[:len(loA)]
812
		for i, lo := range loA {
813
			hi := hiA[i]
814
			prod := lut.Lo[lo] ^ lut.Hi[hi]
815

816
			x[off+i] = byte(prod)
817
			x[off+i+32] = byte(prod >> 8)
818
		}
819
	}
820
}
821

822
// Returns a * Log(b)
823
func mulLog(a, log_b ffe) ffe {
824
	/*
825
	   Note that this operation is not a normal multiplication in a finite
826
	   field because the right operand is already a logarithm.  This is done
827
	   because it moves K table lookups from the Decode() method into the
828
	   initialization step that is less performance critical.  The LogWalsh[]
829
	   table below contains precalculated logarithms so it is easier to do
830
	   all the other multiplies in that form as well.
831
	*/
832
	if a == 0 {
833
		return 0
834
	}
835
	return expLUT[addMod(logLUT[a], log_b)]
836
}
837

838
// z = x + y (mod kModulus)
839
func addMod(a, b ffe) ffe {
840
	sum := uint(a) + uint(b)
841

842
	// Partial reduction step, allowing for kModulus to be returned
843
	return ffe(sum + sum>>bitwidth)
844
}
845

846
// z = x - y (mod kModulus)
847
func subMod(a, b ffe) ffe {
848
	dif := uint(a) - uint(b)
849

850
	// Partial reduction step, allowing for kModulus to be returned
851
	return ffe(dif + dif>>bitwidth)
852
}
853

854
// ceilPow2 returns power of two at or above n.
855
func ceilPow2(n int) int {
856
	const w = int(unsafe.Sizeof(n) * 8)
857
	return 1 << (w - bits.LeadingZeros(uint(n-1)))
858
}
859

860
// Decimation in time (DIT) Fast Walsh-Hadamard Transform
861
// Unrolls pairs of layers to perform cross-layer operations in registers
862
// mtrunc: Number of elements that are non-zero at the front of data
863
func fwht(data *[order]ffe, m, mtrunc int) {
864
	// Decimation in time: Unroll 2 layers at a time
865
	dist := 1
866
	dist4 := 4
867
	for dist4 <= m {
868
		// For each set of dist*4 elements:
869
		for r := 0; r < mtrunc; r += dist4 {
870
			// For each set of dist elements:
871
			// Use 16 bit indices to avoid bounds check on [65536]ffe.
872
			dist := uint16(dist)
873
			off := uint16(r)
874
			for i := uint16(0); i < dist; i++ {
875
				// fwht4(data[i:], dist) inlined...
876
				// Reading values appear faster than updating pointers.
877
				// Casting to uint is not faster.
878
				t0 := data[off]
879
				t1 := data[off+dist]
880
				t2 := data[off+dist*2]
881
				t3 := data[off+dist*3]
882

883
				t0, t1 = fwht2alt(t0, t1)
884
				t2, t3 = fwht2alt(t2, t3)
885
				t0, t2 = fwht2alt(t0, t2)
886
				t1, t3 = fwht2alt(t1, t3)
887

888
				data[off] = t0
889
				data[off+dist] = t1
890
				data[off+dist*2] = t2
891
				data[off+dist*3] = t3
892
				off++
893
			}
894
		}
895
		dist = dist4
896
		dist4 <<= 2
897
	}
898

899
	// If there is one layer left:
900
	if dist < m {
901
		dist := uint16(dist)
902
		for i := uint16(0); i < dist; i++ {
903
			fwht2(&data[i], &data[i+dist])
904
		}
905
	}
906
}
907

908
func fwht4(data []ffe, s int) {
909
	s2 := s << 1
910

911
	t0 := &data[0]
912
	t1 := &data[s]
913
	t2 := &data[s2]
914
	t3 := &data[s2+s]
915

916
	fwht2(t0, t1)
917
	fwht2(t2, t3)
918
	fwht2(t0, t2)
919
	fwht2(t1, t3)
920
}
921

922
// {a, b} = {a + b, a - b} (Mod Q)
923
func fwht2(a, b *ffe) {
924
	sum := addMod(*a, *b)
925
	dif := subMod(*a, *b)
926
	*a = sum
927
	*b = dif
928
}
929

930
// fwht2alt is as fwht2, but returns result.
931
func fwht2alt(a, b ffe) (ffe, ffe) {
932
	return addMod(a, b), subMod(a, b)
933
}
934

935
var initOnce sync.Once
936

937
func initConstants() {
938
	initOnce.Do(func() {
939
		initLUTs()
940
		initFFTSkew()
941
		initMul16LUT()
942
	})
943
}
944

945
// Initialize logLUT, expLUT.
946
func initLUTs() {
947
	cantorBasis := [bitwidth]ffe{
948
		0x0001, 0xACCA, 0x3C0E, 0x163E,
949
		0xC582, 0xED2E, 0x914C, 0x4012,
950
		0x6C98, 0x10D8, 0x6A72, 0xB900,
951
		0xFDB8, 0xFB34, 0xFF38, 0x991E,
952
	}
953

954
	expLUT = &[order]ffe{}
955
	logLUT = &[order]ffe{}
956

957
	// LFSR table generation:
958
	state := 1
959
	for i := ffe(0); i < modulus; i++ {
960
		expLUT[state] = i
961
		state <<= 1
962
		if state >= order {
963
			state ^= polynomial
964
		}
965
	}
966
	expLUT[0] = modulus
967

968
	// Conversion to Cantor basis:
969

970
	logLUT[0] = 0
971
	for i := 0; i < bitwidth; i++ {
972
		basis := cantorBasis[i]
973
		width := 1 << i
974

975
		for j := 0; j < width; j++ {
976
			logLUT[j+width] = logLUT[j] ^ basis
977
		}
978
	}
979

980
	for i := 0; i < order; i++ {
981
		logLUT[i] = expLUT[logLUT[i]]
982
	}
983

984
	for i := 0; i < order; i++ {
985
		expLUT[logLUT[i]] = ffe(i)
986
	}
987

988
	expLUT[modulus] = expLUT[0]
989
}
990

991
// Initialize fftSkew.
992
func initFFTSkew() {
993
	var temp [bitwidth - 1]ffe
994

995
	// Generate FFT skew vector {1}:
996

997
	for i := 1; i < bitwidth; i++ {
998
		temp[i-1] = ffe(1 << i)
999
	}
1000

1001
	fftSkew = &[modulus]ffe{}
1002
	logWalsh = &[order]ffe{}
1003

1004
	for m := 0; m < bitwidth-1; m++ {
1005
		step := 1 << (m + 1)
1006

1007
		fftSkew[1<<m-1] = 0
1008

1009
		for i := m; i < bitwidth-1; i++ {
1010
			s := 1 << (i + 1)
1011

1012
			for j := 1<<m - 1; j < s; j += step {
1013
				fftSkew[j+s] = fftSkew[j] ^ temp[i]
1014
			}
1015
		}
1016

1017
		temp[m] = modulus - logLUT[mulLog(temp[m], logLUT[temp[m]^1])]
1018

1019
		for i := m + 1; i < bitwidth-1; i++ {
1020
			sum := addMod(logLUT[temp[i]^1], temp[m])
1021
			temp[i] = mulLog(temp[i], sum)
1022
		}
1023
	}
1024

1025
	for i := 0; i < modulus; i++ {
1026
		fftSkew[i] = logLUT[fftSkew[i]]
1027
	}
1028

1029
	// Precalculate FWHT(Log[i]):
1030

1031
	for i := 0; i < order; i++ {
1032
		logWalsh[i] = logLUT[i]
1033
	}
1034
	logWalsh[0] = 0
1035

1036
	fwht(logWalsh, order, order)
1037
}
1038

1039
func initMul16LUT() {
1040
	mul16LUTs = &[order]mul16LUT{}
1041

1042
	// For each log_m multiplicand:
1043
	for log_m := 0; log_m < order; log_m++ {
1044
		var tmp [64]ffe
1045
		for nibble, shift := 0, 0; nibble < 4; {
1046
			nibble_lut := tmp[nibble*16:]
1047

1048
			for xnibble := 0; xnibble < 16; xnibble++ {
1049
				prod := mulLog(ffe(xnibble<<shift), ffe(log_m))
1050
				nibble_lut[xnibble] = prod
1051
			}
1052
			nibble++
1053
			shift += 4
1054
		}
1055
		lut := &mul16LUTs[log_m]
1056
		for i := range lut.Lo[:] {
1057
			lut.Lo[i] = tmp[i&15] ^ tmp[((i>>4)+16)]
1058
			lut.Hi[i] = tmp[((i&15)+32)] ^ tmp[((i>>4)+48)]
1059
		}
1060
	}
1061
	if cpuid.CPU.Has(cpuid.SSSE3) || cpuid.CPU.Has(cpuid.AVX2) || cpuid.CPU.Has(cpuid.AVX512F) {
1062
		multiply256LUT = &[order][16 * 8]byte{}
1063

1064
		for logM := range multiply256LUT[:] {
1065
			// For each 4 bits of the finite field width in bits:
1066
			shift := 0
1067
			for i := 0; i < 4; i++ {
1068
				// Construct 16 entry LUT for PSHUFB
1069
				prodLo := multiply256LUT[logM][i*16 : i*16+16]
1070
				prodHi := multiply256LUT[logM][4*16+i*16 : 4*16+i*16+16]
1071
				for x := range prodLo[:] {
1072
					prod := mulLog(ffe(x<<shift), ffe(logM))
1073
					prodLo[x] = byte(prod)
1074
					prodHi[x] = byte(prod >> 8)
1075
				}
1076
				shift += 4
1077
			}
1078
		}
1079
	}
1080
}
1081

1082
const kWordMips = 5
1083
const kWords = order / 64
1084
const kBigMips = 6
1085
const kBigWords = (kWords + 63) / 64
1086
const kBiggestMips = 4
1087

1088
// errorBitfield contains progressive errors to help indicate which
1089
// shards need reconstruction.
1090
type errorBitfield struct {
1091
	Words        [kWordMips][kWords]uint64
1092
	BigWords     [kBigMips][kBigWords]uint64
1093
	BiggestWords [kBiggestMips]uint64
1094
}
1095

1096
func (e *errorBitfield) set(i int) {
1097
	e.Words[0][i/64] |= uint64(1) << (i & 63)
1098
}
1099

1100
func (e *errorBitfield) isNeededFn(mipLevel int) func(bit int) bool {
1101
	if mipLevel >= 16 {
1102
		return func(bit int) bool {
1103
			return true
1104
		}
1105
	}
1106
	if mipLevel >= 12 {
1107
		w := e.BiggestWords[mipLevel-12]
1108
		return func(bit int) bool {
1109
			bit /= 4096
1110
			return 0 != (w & (uint64(1) << bit))
1111
		}
1112
	}
1113
	if mipLevel >= 6 {
1114
		w := e.BigWords[mipLevel-6][:]
1115
		return func(bit int) bool {
1116
			bit /= 64
1117
			return 0 != (w[bit/64] & (uint64(1) << (bit & 63)))
1118
		}
1119
	}
1120
	if mipLevel > 0 {
1121
		w := e.Words[mipLevel-1][:]
1122
		return func(bit int) bool {
1123
			return 0 != (w[bit/64] & (uint64(1) << (bit & 63)))
1124
		}
1125
	}
1126
	return nil
1127
}
1128

1129
func (e *errorBitfield) isNeeded(mipLevel int, bit uint) bool {
1130
	if mipLevel >= 16 {
1131
		return true
1132
	}
1133
	if mipLevel >= 12 {
1134
		bit /= 4096
1135
		return 0 != (e.BiggestWords[mipLevel-12] & (uint64(1) << bit))
1136
	}
1137
	if mipLevel >= 6 {
1138
		bit /= 64
1139
		return 0 != (e.BigWords[mipLevel-6][bit/64] & (uint64(1) << (bit % 64)))
1140
	}
1141
	return 0 != (e.Words[mipLevel-1][bit/64] & (uint64(1) << (bit % 64)))
1142
}
1143

1144
var kHiMasks = [5]uint64{
1145
	0xAAAAAAAAAAAAAAAA,
1146
	0xCCCCCCCCCCCCCCCC,
1147
	0xF0F0F0F0F0F0F0F0,
1148
	0xFF00FF00FF00FF00,
1149
	0xFFFF0000FFFF0000,
1150
}
1151

1152
func (e *errorBitfield) prepare() {
1153
	// First mip level is for final layer of FFT: pairs of data
1154
	for i := 0; i < kWords; i++ {
1155
		w_i := e.Words[0][i]
1156
		hi2lo0 := w_i | ((w_i & kHiMasks[0]) >> 1)
1157
		lo2hi0 := (w_i & (kHiMasks[0] >> 1)) << 1
1158
		w_i = hi2lo0 | lo2hi0
1159
		e.Words[0][i] = w_i
1160

1161
		bits := 2
1162
		for j := 1; j < kWordMips; j++ {
1163
			hi2lo_j := w_i | ((w_i & kHiMasks[j]) >> bits)
1164
			lo2hi_j := (w_i & (kHiMasks[j] >> bits)) << bits
1165
			w_i = hi2lo_j | lo2hi_j
1166
			e.Words[j][i] = w_i
1167
			bits <<= 1
1168
		}
1169
	}
1170

1171
	for i := 0; i < kBigWords; i++ {
1172
		w_i := uint64(0)
1173
		bit := uint64(1)
1174
		src := e.Words[kWordMips-1][i*64 : i*64+64]
1175
		for _, w := range src {
1176
			w_i |= (w | (w >> 32) | (w << 32)) & bit
1177
			bit <<= 1
1178
		}
1179
		e.BigWords[0][i] = w_i
1180

1181
		bits := 1
1182
		for j := 1; j < kBigMips; j++ {
1183
			hi2lo_j := w_i | ((w_i & kHiMasks[j-1]) >> bits)
1184
			lo2hi_j := (w_i & (kHiMasks[j-1] >> bits)) << bits
1185
			w_i = hi2lo_j | lo2hi_j
1186
			e.BigWords[j][i] = w_i
1187
			bits <<= 1
1188
		}
1189
	}
1190

1191
	w_i := uint64(0)
1192
	bit := uint64(1)
1193
	for _, w := range e.BigWords[kBigMips-1][:kBigWords] {
1194
		w_i |= (w | (w >> 32) | (w << 32)) & bit
1195
		bit <<= 1
1196
	}
1197
	e.BiggestWords[0] = w_i
1198

1199
	bits := uint64(1)
1200
	for j := 1; j < kBiggestMips; j++ {
1201
		hi2lo_j := w_i | ((w_i & kHiMasks[j-1]) >> bits)
1202
		lo2hi_j := (w_i & (kHiMasks[j-1] >> bits)) << bits
1203
		w_i = hi2lo_j | lo2hi_j
1204
		e.BiggestWords[j] = w_i
1205
		bits <<= 1
1206
	}
1207
}
1208

1209
func (e *errorBitfield) fftDIT(work [][]byte, mtrunc, m int, skewLUT []ffe, o *options) {
1210
	// Decimation in time: Unroll 2 layers at a time
1211
	mipLevel := bits.Len32(uint32(m)) - 1
1212

1213
	dist4 := m
1214
	dist := m >> 2
1215
	needed := e.isNeededFn(mipLevel)
1216
	for dist != 0 {
1217
		// For each set of dist*4 elements:
1218
		for r := 0; r < mtrunc; r += dist4 {
1219
			if !needed(r) {
1220
				continue
1221
			}
1222
			iEnd := r + dist
1223
			logM01 := skewLUT[iEnd-1]
1224
			logM02 := skewLUT[iEnd+dist-1]
1225
			logM23 := skewLUT[iEnd+dist*2-1]
1226

1227
			// For each set of dist elements:
1228
			for i := r; i < iEnd; i++ {
1229
				fftDIT4(
1230
					work[i:],
1231
					dist,
1232
					logM01,
1233
					logM23,
1234
					logM02,
1235
					o)
1236
			}
1237
		}
1238
		dist4 = dist
1239
		dist >>= 2
1240
		mipLevel -= 2
1241
		needed = e.isNeededFn(mipLevel)
1242
	}
1243

1244
	// If there is one layer left:
1245
	if dist4 == 2 {
1246
		for r := 0; r < mtrunc; r += 2 {
1247
			if !needed(r) {
1248
				continue
1249
			}
1250
			logM := skewLUT[r+1-1]
1251

1252
			if logM == modulus {
1253
				sliceXor(work[r], work[r+1], o)
1254
			} else {
1255
				fftDIT2(work[r], work[r+1], logM, o)
1256
			}
1257
		}
1258
	}
1259
}
1260

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.