cubefs

Форк
0
1266 строк · 27.0 Кб
1
package reedsolomon
2

3
// This is a O(n*log n) implementation of Reed-Solomon
4
// codes, ported from the C++ library https://github.com/catid/leopard.
5
//
6
// The implementation is based on the paper
7
//
8
// S.-J. Lin, T. Y. Al-Naffouri, Y. S. Han, and W.-H. Chung,
9
// "Novel Polynomial Basis with Fast Fourier Transform
10
// and Its Application to Reed-Solomon Erasure Codes"
11
// IEEE Trans. on Information Theory, pp. 6284-6299, November, 2016.
12

13
import (
14
	"bytes"
15
	"encoding/binary"
16
	"io"
17
	"math/bits"
18
	"sync"
19
)
20

21
// leopardFF8 is like reedSolomon but for the 8-bit "leopard" implementation.
22
type leopardFF8 struct {
23
	dataShards   int // Number of data shards, should not be modified.
24
	parityShards int // Number of parity shards, should not be modified.
25
	totalShards  int // Total number of shards. Calculated, and should not be modified.
26

27
	workPool    sync.Pool
28
	inversion   map[[inversion8Bytes]byte]leopardGF8cache
29
	inversionMu sync.Mutex
30

31
	o options
32
}
33

34
const inversion8Bytes = 256 / 8
35

36
type leopardGF8cache struct {
37
	errorLocs [256]ffe8
38
	bits      *errorBitfield8
39
}
40

41
// newFF8 is like New, but for the 8-bit "leopard" implementation.
42
func newFF8(dataShards, parityShards int, opt options) (*leopardFF8, error) {
43
	initConstants8()
44

45
	if dataShards <= 0 || parityShards <= 0 {
46
		return nil, ErrInvShardNum
47
	}
48

49
	if dataShards+parityShards > 65536 {
50
		return nil, ErrMaxShardNum
51
	}
52

53
	r := &leopardFF8{
54
		dataShards:   dataShards,
55
		parityShards: parityShards,
56
		totalShards:  dataShards + parityShards,
57
		o:            opt,
58
	}
59
	if opt.inversionCache && (r.totalShards <= 64 || opt.forcedInversionCache) {
60
		// Inversion cache is relatively ineffective for big shard counts and takes up potentially lots of memory
61
		// r.totalShards is not covering the space, but an estimate.
62
		r.inversion = make(map[[inversion8Bytes]byte]leopardGF8cache, r.totalShards)
63
	}
64
	return r, nil
65
}
66

67
var _ = Extensions(&leopardFF8{})
68

69
func (r *leopardFF8) ShardSizeMultiple() int {
70
	return 64
71
}
72

73
func (r *leopardFF8) DataShards() int {
74
	return r.dataShards
75
}
76

77
func (r *leopardFF8) ParityShards() int {
78
	return r.parityShards
79
}
80

81
func (r *leopardFF8) TotalShards() int {
82
	return r.totalShards
83
}
84

85
func (r *leopardFF8) AllocAligned(each int) [][]byte {
86
	return AllocAligned(r.totalShards, each)
87
}
88

89
type ffe8 uint8
90

91
const (
92
	bitwidth8   = 8
93
	order8      = 1 << bitwidth8
94
	modulus8    = order8 - 1
95
	polynomial8 = 0x11D
96

97
	// Encode in blocks of this size.
98
	workSize8 = 32 << 10
99
)
100

101
var (
102
	fftSkew8  *[modulus8]ffe8
103
	logWalsh8 *[order8]ffe8
104
)
105

106
// Logarithm Tables
107
var (
108
	logLUT8 *[order8]ffe8
109
	expLUT8 *[order8]ffe8
110
)
111

112
// Stores the partial products of x * y at offset x + y * 256
113
// Repeated accesses from the same y value are faster
114
var mul8LUTs *[order8]mul8LUT
115

116
type mul8LUT struct {
117
	Value [256]ffe8
118
}
119

120
// Stores lookup for avx2
121
var multiply256LUT8 *[order8][2 * 16]byte
122

123
func (r *leopardFF8) Encode(shards [][]byte) error {
124
	if len(shards) != r.totalShards {
125
		return ErrTooFewShards
126
	}
127

128
	if err := checkShards(shards, false); err != nil {
129
		return err
130
	}
131
	return r.encode(shards)
132
}
133

134
func (r *leopardFF8) encode(shards [][]byte) error {
135
	shardSize := shardSize(shards)
136
	if shardSize%64 != 0 {
137
		return ErrShardSize
138
	}
139

140
	m := ceilPow2(r.parityShards)
141
	var work [][]byte
142
	if w, ok := r.workPool.Get().([][]byte); ok {
143
		work = w
144
	} else {
145
		work = AllocAligned(m*2, workSize8)
146
	}
147
	if cap(work) >= m*2 {
148
		work = work[:m*2]
149
		for i := range work {
150
			if i >= r.parityShards {
151
				if cap(work[i]) < workSize8 {
152
					work[i] = AllocAligned(1, workSize8)[0]
153
				} else {
154
					work[i] = work[i][:workSize8]
155
				}
156
			}
157
		}
158
	} else {
159
		work = AllocAligned(m*2, workSize8)
160
	}
161

162
	defer r.workPool.Put(work)
163

164
	mtrunc := m
165
	if r.dataShards < mtrunc {
166
		mtrunc = r.dataShards
167
	}
168

169
	skewLUT := fftSkew8[m-1:]
170

171
	// Split large shards.
172
	// More likely on lower shard count.
173
	off := 0
174
	sh := make([][]byte, len(shards))
175

176
	// work slice we can modify
177
	wMod := make([][]byte, len(work))
178
	copy(wMod, work)
179
	for off < shardSize {
180
		work := wMod
181
		sh := sh
182
		end := off + workSize8
183
		if end > shardSize {
184
			end = shardSize
185
			sz := shardSize - off
186
			for i := range work {
187
				// Last iteration only...
188
				work[i] = work[i][:sz]
189
			}
190
		}
191
		for i := range shards {
192
			sh[i] = shards[i][off:end]
193
		}
194

195
		// Replace work slices, so we write directly to output.
196
		// Note that work has parity *before* data shards.
197
		res := shards[r.dataShards:r.totalShards]
198
		for i := range res {
199
			work[i] = res[i][off:end]
200
		}
201

202
		ifftDITEncoder8(
203
			sh[:r.dataShards],
204
			mtrunc,
205
			work,
206
			nil, // No xor output
207
			m,
208
			skewLUT,
209
			&r.o,
210
		)
211

212
		lastCount := r.dataShards % m
213
		skewLUT2 := skewLUT
214
		if m >= r.dataShards {
215
			goto skip_body
216
		}
217

218
		// For sets of m data pieces:
219
		for i := m; i+m <= r.dataShards; i += m {
220
			sh = sh[m:]
221
			skewLUT2 = skewLUT2[m:]
222

223
			// work <- work xor IFFT(data + i, m, m + i)
224

225
			ifftDITEncoder8(
226
				sh, // data source
227
				m,
228
				work[m:], // temporary workspace
229
				work,     // xor destination
230
				m,
231
				skewLUT2,
232
				&r.o,
233
			)
234
		}
235

236
		// Handle final partial set of m pieces:
237
		if lastCount != 0 {
238
			sh = sh[m:]
239
			skewLUT2 = skewLUT2[m:]
240

241
			// work <- work xor IFFT(data + i, m, m + i)
242

243
			ifftDITEncoder8(
244
				sh, // data source
245
				lastCount,
246
				work[m:], // temporary workspace
247
				work,     // xor destination
248
				m,
249
				skewLUT2,
250
				&r.o,
251
			)
252
		}
253

254
	skip_body:
255
		// work <- FFT(work, m, 0)
256
		fftDIT8(work, r.parityShards, m, fftSkew8[:], &r.o)
257
		off += workSize8
258
	}
259

260
	return nil
261
}
262

263
func (r *leopardFF8) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error {
264
	return ErrNotSupported
265
}
266

267
func (r *leopardFF8) Join(dst io.Writer, shards [][]byte, outSize int) error {
268
	// Do we have enough shards?
269
	if len(shards) < r.dataShards {
270
		return ErrTooFewShards
271
	}
272
	shards = shards[:r.dataShards]
273

274
	// Do we have enough data?
275
	size := 0
276
	for _, shard := range shards {
277
		if shard == nil {
278
			return ErrReconstructRequired
279
		}
280
		size += len(shard)
281

282
		// Do we have enough data already?
283
		if size >= outSize {
284
			break
285
		}
286
	}
287
	if size < outSize {
288
		return ErrShortData
289
	}
290

291
	// Copy data to dst
292
	write := outSize
293
	for _, shard := range shards {
294
		if write < len(shard) {
295
			_, err := dst.Write(shard[:write])
296
			return err
297
		}
298
		n, err := dst.Write(shard)
299
		if err != nil {
300
			return err
301
		}
302
		write -= n
303
	}
304
	return nil
305
}
306

307
func (r *leopardFF8) Update(shards [][]byte, newDatashards [][]byte) error {
308
	return ErrNotSupported
309
}
310

311
func (r *leopardFF8) Split(data []byte) ([][]byte, error) {
312
	if len(data) == 0 {
313
		return nil, ErrShortData
314
	}
315
	if r.totalShards == 1 && len(data)&63 == 0 {
316
		return [][]byte{data}, nil
317
	}
318

319
	dataLen := len(data)
320
	// Calculate number of bytes per data shard.
321
	perShard := (len(data) + r.dataShards - 1) / r.dataShards
322
	perShard = ((perShard + 63) / 64) * 64
323
	needTotal := r.totalShards * perShard
324

325
	if cap(data) > len(data) {
326
		if cap(data) > needTotal {
327
			data = data[:needTotal]
328
		} else {
329
			data = data[:cap(data)]
330
		}
331
		clear := data[dataLen:]
332
		for i := range clear {
333
			clear[i] = 0
334
		}
335
	}
336

337
	// Only allocate memory if necessary
338
	var padding [][]byte
339
	if len(data) < needTotal {
340
		// calculate maximum number of full shards in `data` slice
341
		fullShards := len(data) / perShard
342
		padding = AllocAligned(r.totalShards-fullShards, perShard)
343
		if dataLen > perShard*fullShards {
344
			// Copy partial shards
345
			copyFrom := data[perShard*fullShards : dataLen]
346
			for i := range padding {
347
				if len(copyFrom) <= 0 {
348
					break
349
				}
350
				copyFrom = copyFrom[copy(padding[i], copyFrom):]
351
			}
352
		}
353
	}
354

355
	// Split into equal-length shards.
356
	dst := make([][]byte, r.totalShards)
357
	i := 0
358
	for ; i < len(dst) && len(data) >= perShard; i++ {
359
		dst[i] = data[:perShard:perShard]
360
		data = data[perShard:]
361
	}
362

363
	for j := 0; i+j < len(dst); j++ {
364
		dst[i+j] = padding[0]
365
		padding = padding[1:]
366
	}
367

368
	return dst, nil
369
}
370

371
func (r *leopardFF8) ReconstructSome(shards [][]byte, required []bool) error {
372
	return r.ReconstructData(shards)
373
}
374

375
func (r *leopardFF8) Reconstruct(shards [][]byte) error {
376
	return r.reconstruct(shards, true)
377
}
378

379
func (r *leopardFF8) ReconstructData(shards [][]byte) error {
380
	return r.reconstruct(shards, false)
381
}
382

383
func (r *leopardFF8) Verify(shards [][]byte) (bool, error) {
384
	if len(shards) != r.totalShards {
385
		return false, ErrTooFewShards
386
	}
387
	if err := checkShards(shards, false); err != nil {
388
		return false, err
389
	}
390

391
	// Re-encode parity shards to temporary storage.
392
	shardSize := len(shards[0])
393
	outputs := make([][]byte, r.totalShards)
394
	copy(outputs, shards[:r.dataShards])
395
	for i := r.dataShards; i < r.totalShards; i++ {
396
		outputs[i] = make([]byte, shardSize)
397
	}
398
	if err := r.Encode(outputs); err != nil {
399
		return false, err
400
	}
401

402
	// Compare.
403
	for i := r.dataShards; i < r.totalShards; i++ {
404
		if !bytes.Equal(outputs[i], shards[i]) {
405
			return false, nil
406
		}
407
	}
408
	return true, nil
409
}
410

411
func (r *leopardFF8) reconstruct(shards [][]byte, recoverAll bool) error {
412
	if len(shards) != r.totalShards {
413
		return ErrTooFewShards
414
	}
415

416
	if err := checkShards(shards, true); err != nil {
417
		return err
418
	}
419

420
	// Quick check: are all of the shards present?  If so, there's
421
	// nothing to do.
422
	numberPresent := 0
423
	dataPresent := 0
424
	for i := 0; i < r.totalShards; i++ {
425
		if len(shards[i]) != 0 {
426
			numberPresent++
427
			if i < r.dataShards {
428
				dataPresent++
429
			}
430
		}
431
	}
432
	if numberPresent == r.totalShards || !recoverAll && dataPresent == r.dataShards {
433
		// Cool. All of the shards have data. We don't
434
		// need to do anything.
435
		return nil
436
	}
437

438
	// Check if we have enough to reconstruct.
439
	if numberPresent < r.dataShards {
440
		return ErrTooFewShards
441
	}
442

443
	shardSize := shardSize(shards)
444
	if shardSize%64 != 0 {
445
		return ErrShardSize
446
	}
447

448
	// Use only if we are missing less than 1/4 parity,
449
	// And we are restoring a significant amount of data.
450
	useBits := r.totalShards-numberPresent <= r.parityShards/4 && shardSize*r.totalShards >= 64<<10
451

452
	m := ceilPow2(r.parityShards)
453
	n := ceilPow2(m + r.dataShards)
454

455
	const LEO_ERROR_BITFIELD_OPT = true
456

457
	// Fill in error locations.
458
	var errorBits errorBitfield8
459
	var errLocs [order8]ffe8
460
	for i := 0; i < r.parityShards; i++ {
461
		if len(shards[i+r.dataShards]) == 0 {
462
			errLocs[i] = 1
463
			if LEO_ERROR_BITFIELD_OPT && recoverAll {
464
				errorBits.set(i)
465
			}
466
		}
467
	}
468
	for i := r.parityShards; i < m; i++ {
469
		errLocs[i] = 1
470
		if LEO_ERROR_BITFIELD_OPT && recoverAll {
471
			errorBits.set(i)
472
		}
473
	}
474
	for i := 0; i < r.dataShards; i++ {
475
		if len(shards[i]) == 0 {
476
			errLocs[i+m] = 1
477
			if LEO_ERROR_BITFIELD_OPT {
478
				errorBits.set(i + m)
479
			}
480
		}
481
	}
482

483
	var gotInversion bool
484
	if LEO_ERROR_BITFIELD_OPT && r.inversion != nil {
485
		cacheID := errorBits.cacheID()
486
		r.inversionMu.Lock()
487
		if inv, ok := r.inversion[cacheID]; ok {
488
			r.inversionMu.Unlock()
489
			errLocs = inv.errorLocs
490
			if inv.bits != nil && useBits {
491
				errorBits = *inv.bits
492
				useBits = true
493
			} else {
494
				useBits = false
495
			}
496
			gotInversion = true
497
		} else {
498
			r.inversionMu.Unlock()
499
		}
500
	}
501

502
	if !gotInversion {
503
		// No inversion...
504
		if LEO_ERROR_BITFIELD_OPT && useBits {
505
			errorBits.prepare()
506
		}
507

508
		// Evaluate error locator polynomial8
509
		fwht8(&errLocs, order8, m+r.dataShards)
510

511
		for i := 0; i < order8; i++ {
512
			errLocs[i] = ffe8((uint(errLocs[i]) * uint(logWalsh8[i])) % modulus8)
513
		}
514

515
		fwht8(&errLocs, order8, order8)
516

517
		if r.inversion != nil {
518
			c := leopardGF8cache{
519
				errorLocs: errLocs,
520
			}
521
			if useBits {
522
				// Heap alloc
523
				var x errorBitfield8
524
				x = errorBits
525
				c.bits = &x
526
			}
527
			r.inversionMu.Lock()
528
			r.inversion[errorBits.cacheID()] = c
529
			r.inversionMu.Unlock()
530
		}
531
	}
532

533
	var work [][]byte
534
	if w, ok := r.workPool.Get().([][]byte); ok {
535
		work = w
536
	}
537
	if cap(work) >= n {
538
		work = work[:n]
539
		for i := range work {
540
			if cap(work[i]) < workSize8 {
541
				work[i] = make([]byte, workSize8)
542
			} else {
543
				work[i] = work[i][:workSize8]
544
			}
545
		}
546

547
	} else {
548
		work = make([][]byte, n)
549
		all := make([]byte, n*workSize8)
550
		for i := range work {
551
			work[i] = all[i*workSize8 : i*workSize8+workSize8]
552
		}
553
	}
554
	defer r.workPool.Put(work)
555

556
	// work <- recovery data
557

558
	// Split large shards.
559
	// More likely on lower shard count.
560
	sh := make([][]byte, len(shards))
561
	// Copy...
562
	copy(sh, shards)
563

564
	// Add output
565
	for i, sh := range shards {
566
		if !recoverAll && i >= r.dataShards {
567
			continue
568
		}
569
		if len(sh) == 0 {
570
			if cap(sh) >= shardSize {
571
				shards[i] = sh[:shardSize]
572
			} else {
573
				shards[i] = make([]byte, shardSize)
574
			}
575
		}
576
	}
577

578
	off := 0
579
	for off < shardSize {
580
		endSlice := off + workSize8
581
		if endSlice > shardSize {
582
			endSlice = shardSize
583
			sz := shardSize - off
584
			// Last iteration only
585
			for i := range work {
586
				work[i] = work[i][:sz]
587
			}
588
		}
589
		for i := range shards {
590
			if len(sh[i]) != 0 {
591
				sh[i] = shards[i][off:endSlice]
592
			}
593
		}
594
		for i := 0; i < r.parityShards; i++ {
595
			if len(sh[i+r.dataShards]) != 0 {
596
				mulgf8(work[i], sh[i+r.dataShards], errLocs[i], &r.o)
597
			} else {
598
				memclr(work[i])
599
			}
600
		}
601
		for i := r.parityShards; i < m; i++ {
602
			memclr(work[i])
603
		}
604

605
		// work <- original data
606

607
		for i := 0; i < r.dataShards; i++ {
608
			if len(sh[i]) != 0 {
609
				mulgf8(work[m+i], sh[i], errLocs[m+i], &r.o)
610
			} else {
611
				memclr(work[m+i])
612
			}
613
		}
614
		for i := m + r.dataShards; i < n; i++ {
615
			memclr(work[i])
616
		}
617

618
		// work <- IFFT(work, n, 0)
619

620
		ifftDITDecoder8(
621
			m+r.dataShards,
622
			work,
623
			n,
624
			fftSkew8[:],
625
			&r.o,
626
		)
627

628
		// work <- FormalDerivative(work, n)
629

630
		for i := 1; i < n; i++ {
631
			width := ((i ^ (i - 1)) + 1) >> 1
632
			slicesXor(work[i-width:i], work[i:i+width], &r.o)
633
		}
634

635
		// work <- FFT(work, n, 0) truncated to m + dataShards
636

637
		outputCount := m + r.dataShards
638

639
		if LEO_ERROR_BITFIELD_OPT && useBits {
640
			errorBits.fftDIT8(work, outputCount, n, fftSkew8[:], &r.o)
641
		} else {
642
			fftDIT8(work, outputCount, n, fftSkew8[:], &r.o)
643
		}
644

645
		// Reveal erasures
646
		//
647
		//  Original = -ErrLocator * FFT( Derivative( IFFT( ErrLocator * ReceivedData ) ) )
648
		//  mul_mem(x, y, log_m, ) equals x[] = y[] * log_m
649
		//
650
		// mem layout: [Recovery Data (Power of Two = M)] [Original Data (K)] [Zero Padding out to N]
651
		end := r.dataShards
652
		if recoverAll {
653
			end = r.totalShards
654
		}
655
		// Restore
656
		for i := 0; i < end; i++ {
657
			if len(sh[i]) != 0 {
658
				continue
659
			}
660

661
			if i >= r.dataShards {
662
				// Parity shard.
663
				mulgf8(shards[i][off:endSlice], work[i-r.dataShards], modulus8-errLocs[i-r.dataShards], &r.o)
664
			} else {
665
				// Data shard.
666
				mulgf8(shards[i][off:endSlice], work[i+m], modulus8-errLocs[i+m], &r.o)
667
			}
668
		}
669
		off += workSize8
670
	}
671
	return nil
672
}
673

674
// Basic no-frills version for decoder
675
func ifftDITDecoder8(mtrunc int, work [][]byte, m int, skewLUT []ffe8, o *options) {
676
	// Decimation in time: Unroll 2 layers at a time
677
	dist := 1
678
	dist4 := 4
679
	for dist4 <= m {
680
		// For each set of dist*4 elements:
681
		for r := 0; r < mtrunc; r += dist4 {
682
			iend := r + dist
683
			log_m01 := skewLUT[iend-1]
684
			log_m02 := skewLUT[iend+dist-1]
685
			log_m23 := skewLUT[iend+dist*2-1]
686

687
			// For each set of dist elements:
688
			for i := r; i < iend; i++ {
689
				ifftDIT48(work[i:], dist, log_m01, log_m23, log_m02, o)
690
			}
691
		}
692
		dist = dist4
693
		dist4 <<= 2
694
	}
695

696
	// If there is one layer left:
697
	if dist < m {
698
		// Assuming that dist = m / 2
699
		if dist*2 != m {
700
			panic("internal error")
701
		}
702

703
		log_m := skewLUT[dist-1]
704

705
		if log_m == modulus8 {
706
			slicesXor(work[dist:2*dist], work[:dist], o)
707
		} else {
708
			for i := 0; i < dist; i++ {
709
				ifftDIT28(
710
					work[i],
711
					work[i+dist],
712
					log_m,
713
					o,
714
				)
715
			}
716
		}
717
	}
718
}
719

720
// In-place FFT for encoder and decoder
721
func fftDIT8(work [][]byte, mtrunc, m int, skewLUT []ffe8, o *options) {
722
	// Decimation in time: Unroll 2 layers at a time
723
	dist4 := m
724
	dist := m >> 2
725
	for dist != 0 {
726
		// For each set of dist*4 elements:
727
		for r := 0; r < mtrunc; r += dist4 {
728
			iend := r + dist
729
			log_m01 := skewLUT[iend-1]
730
			log_m02 := skewLUT[iend+dist-1]
731
			log_m23 := skewLUT[iend+dist*2-1]
732

733
			// For each set of dist elements:
734
			for i := r; i < iend; i++ {
735
				fftDIT48(
736
					work[i:],
737
					dist,
738
					log_m01,
739
					log_m23,
740
					log_m02,
741
					o,
742
				)
743
			}
744
		}
745
		dist4 = dist
746
		dist >>= 2
747
	}
748

749
	// If there is one layer left:
750
	if dist4 == 2 {
751
		for r := 0; r < mtrunc; r += 2 {
752
			log_m := skewLUT[r+1-1]
753

754
			if log_m == modulus8 {
755
				sliceXor(work[r], work[r+1], o)
756
			} else {
757
				fftDIT28(work[r], work[r+1], log_m, o)
758
			}
759
		}
760
	}
761
}
762

763
// 4-way butterfly
764
func fftDIT4Ref8(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
765
	// First layer:
766
	if log_m02 == modulus8 {
767
		sliceXor(work[0], work[dist*2], o)
768
		sliceXor(work[dist], work[dist*3], o)
769
	} else {
770
		fftDIT28(work[0], work[dist*2], log_m02, o)
771
		fftDIT28(work[dist], work[dist*3], log_m02, o)
772
	}
773

774
	// Second layer:
775
	if log_m01 == modulus8 {
776
		sliceXor(work[0], work[dist], o)
777
	} else {
778
		fftDIT28(work[0], work[dist], log_m01, o)
779
	}
780

781
	if log_m23 == modulus8 {
782
		sliceXor(work[dist*2], work[dist*3], o)
783
	} else {
784
		fftDIT28(work[dist*2], work[dist*3], log_m23, o)
785
	}
786
}
787

788
// Unrolled IFFT for encoder
789
func ifftDITEncoder8(data [][]byte, mtrunc int, work [][]byte, xorRes [][]byte, m int, skewLUT []ffe8, o *options) {
790
	// I tried rolling the memcpy/memset into the first layer of the FFT and
791
	// found that it only yields a 4% performance improvement, which is not
792
	// worth the extra complexity.
793
	for i := 0; i < mtrunc; i++ {
794
		copy(work[i], data[i])
795
	}
796
	for i := mtrunc; i < m; i++ {
797
		memclr(work[i])
798
	}
799

800
	// Decimation in time: Unroll 2 layers at a time
801
	dist := 1
802
	dist4 := 4
803
	for dist4 <= m {
804
		// For each set of dist*4 elements:
805
		for r := 0; r < mtrunc; r += dist4 {
806
			iend := r + dist
807
			log_m01 := skewLUT[iend]
808
			log_m02 := skewLUT[iend+dist]
809
			log_m23 := skewLUT[iend+dist*2]
810

811
			// For each set of dist elements:
812
			for i := r; i < iend; i++ {
813
				ifftDIT48(
814
					work[i:],
815
					dist,
816
					log_m01,
817
					log_m23,
818
					log_m02,
819
					o,
820
				)
821
			}
822
		}
823

824
		dist = dist4
825
		dist4 <<= 2
826
		// I tried alternating sweeps left->right and right->left to reduce cache misses.
827
		// It provides about 1% performance boost when done for both FFT and IFFT, so it
828
		// does not seem to be worth the extra complexity.
829
	}
830

831
	// If there is one layer left:
832
	if dist < m {
833
		// Assuming that dist = m / 2
834
		if dist*2 != m {
835
			panic("internal error")
836
		}
837

838
		logm := skewLUT[dist]
839

840
		if logm == modulus8 {
841
			slicesXor(work[dist:dist*2], work[:dist], o)
842
		} else {
843
			for i := 0; i < dist; i++ {
844
				ifftDIT28(work[i], work[i+dist], logm, o)
845
			}
846
		}
847
	}
848

849
	// I tried unrolling this but it does not provide more than 5% performance
850
	// improvement for 16-bit finite fields, so it's not worth the complexity.
851
	if xorRes != nil {
852
		slicesXor(xorRes[:m], work[:m], o)
853
	}
854
}
855

856
func ifftDIT4Ref8(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
857
	// First layer:
858
	if log_m01 == modulus8 {
859
		sliceXor(work[0], work[dist], o)
860
	} else {
861
		ifftDIT28(work[0], work[dist], log_m01, o)
862
	}
863

864
	if log_m23 == modulus8 {
865
		sliceXor(work[dist*2], work[dist*3], o)
866
	} else {
867
		ifftDIT28(work[dist*2], work[dist*3], log_m23, o)
868
	}
869

870
	// Second layer:
871
	if log_m02 == modulus8 {
872
		sliceXor(work[0], work[dist*2], o)
873
		sliceXor(work[dist], work[dist*3], o)
874
	} else {
875
		ifftDIT28(work[0], work[dist*2], log_m02, o)
876
		ifftDIT28(work[dist], work[dist*3], log_m02, o)
877
	}
878
}
879

880
// Reference version of muladd: x[] ^= y[] * log_m
881
func refMulAdd8(x, y []byte, log_m ffe8) {
882
	lut := &mul8LUTs[log_m]
883

884
	for len(x) >= 64 {
885
		// Assert sizes for no bounds checks in loop
886
		src := y[:64]
887
		dst := x[:len(src)] // Needed, but not checked...
888
		for i, y1 := range src {
889
			dst[i] ^= byte(lut.Value[y1])
890
		}
891
		x = x[64:]
892
		y = y[64:]
893
	}
894
}
895

896
// Reference version of mul: x[] = y[] * log_m
897
func refMul8(x, y []byte, log_m ffe8) {
898
	lut := &mul8LUTs[log_m]
899

900
	for off := 0; off < len(x); off += 64 {
901
		src := y[off : off+64]
902
		for i, y1 := range src {
903
			x[off+i] = byte(lut.Value[y1])
904
		}
905
	}
906
}
907

908
// Returns a * Log(b)
909
func mulLog8(a, log_b ffe8) ffe8 {
910
	/*
911
	   Note that this operation is not a normal multiplication in a finite
912
	   field because the right operand is already a logarithm.  This is done
913
	   because it moves K table lookups from the Decode() method into the
914
	   initialization step that is less performance critical.  The LogWalsh[]
915
	   table below contains precalculated logarithms so it is easier to do
916
	   all the other multiplies in that form as well.
917
	*/
918
	if a == 0 {
919
		return 0
920
	}
921
	return expLUT8[addMod8(logLUT8[a], log_b)]
922
}
923

924
// z = x + y (mod kModulus)
925
func addMod8(a, b ffe8) ffe8 {
926
	sum := uint(a) + uint(b)
927

928
	// Partial reduction step, allowing for kModulus to be returned
929
	return ffe8(sum + sum>>bitwidth8)
930
}
931

932
// z = x - y (mod kModulus)
933
func subMod8(a, b ffe8) ffe8 {
934
	dif := uint(a) - uint(b)
935

936
	// Partial reduction step, allowing for kModulus to be returned
937
	return ffe8(dif + dif>>bitwidth8)
938
}
939

940
// Decimation in time (DIT) Fast Walsh-Hadamard Transform
941
// Unrolls pairs of layers to perform cross-layer operations in registers
942
// mtrunc: Number of elements that are non-zero at the front of data
943
func fwht8(data *[order8]ffe8, m, mtrunc int) {
944
	// Decimation in time: Unroll 2 layers at a time
945
	dist := 1
946
	dist4 := 4
947
	for dist4 <= m {
948
		// For each set of dist*4 elements:
949
		for r := 0; r < mtrunc; r += dist4 {
950
			// For each set of dist elements:
951
			// Use 16 bit indices to avoid bounds check on [65536]ffe8.
952
			dist := uint16(dist)
953
			off := uint16(r)
954
			for i := uint16(0); i < dist; i++ {
955
				// fwht48(data[i:], dist) inlined...
956
				// Reading values appear faster than updating pointers.
957
				// Casting to uint is not faster.
958
				t0 := data[off]
959
				t1 := data[off+dist]
960
				t2 := data[off+dist*2]
961
				t3 := data[off+dist*3]
962

963
				t0, t1 = fwht2alt8(t0, t1)
964
				t2, t3 = fwht2alt8(t2, t3)
965
				t0, t2 = fwht2alt8(t0, t2)
966
				t1, t3 = fwht2alt8(t1, t3)
967

968
				data[off] = t0
969
				data[off+dist] = t1
970
				data[off+dist*2] = t2
971
				data[off+dist*3] = t3
972
				off++
973
			}
974
		}
975
		dist = dist4
976
		dist4 <<= 2
977
	}
978

979
	// If there is one layer left:
980
	if dist < m {
981
		dist := uint16(dist)
982
		for i := uint16(0); i < dist; i++ {
983
			fwht28(&data[i], &data[i+dist])
984
		}
985
	}
986
}
987

988
func fwht48(data []ffe8, s int) {
989
	s2 := s << 1
990

991
	t0 := &data[0]
992
	t1 := &data[s]
993
	t2 := &data[s2]
994
	t3 := &data[s2+s]
995

996
	fwht28(t0, t1)
997
	fwht28(t2, t3)
998
	fwht28(t0, t2)
999
	fwht28(t1, t3)
1000
}
1001

1002
// {a, b} = {a + b, a - b} (Mod Q)
1003
func fwht28(a, b *ffe8) {
1004
	sum := addMod8(*a, *b)
1005
	dif := subMod8(*a, *b)
1006
	*a = sum
1007
	*b = dif
1008
}
1009

1010
// fwht2alt8  is as fwht28, but returns result.
1011
func fwht2alt8(a, b ffe8) (ffe8, ffe8) {
1012
	return addMod8(a, b), subMod8(a, b)
1013
}
1014

1015
var initOnce8 sync.Once
1016

1017
func initConstants8() {
1018
	initOnce8.Do(func() {
1019
		initLUTs8()
1020
		initFFTSkew8()
1021
		initMul8LUT()
1022
	})
1023
}
1024

1025
// Initialize logLUT8, expLUT8.
1026
func initLUTs8() {
1027
	cantorBasis := [bitwidth8]ffe8{
1028
		1, 214, 152, 146, 86, 200, 88, 230,
1029
	}
1030

1031
	expLUT8 = &[order8]ffe8{}
1032
	logLUT8 = &[order8]ffe8{}
1033

1034
	// LFSR table generation:
1035
	state := 1
1036
	for i := ffe8(0); i < modulus8; i++ {
1037
		expLUT8[state] = i
1038
		state <<= 1
1039
		if state >= order8 {
1040
			state ^= polynomial8
1041
		}
1042
	}
1043
	expLUT8[0] = modulus8
1044

1045
	// Conversion to Cantor basis:
1046

1047
	logLUT8[0] = 0
1048
	for i := 0; i < bitwidth8; i++ {
1049
		basis := cantorBasis[i]
1050
		width := 1 << i
1051

1052
		for j := 0; j < width; j++ {
1053
			logLUT8[j+width] = logLUT8[j] ^ basis
1054
		}
1055
	}
1056

1057
	for i := 0; i < order8; i++ {
1058
		logLUT8[i] = expLUT8[logLUT8[i]]
1059
	}
1060

1061
	for i := 0; i < order8; i++ {
1062
		expLUT8[logLUT8[i]] = ffe8(i)
1063
	}
1064

1065
	expLUT8[modulus8] = expLUT8[0]
1066
}
1067

1068
// Initialize fftSkew8.
1069
func initFFTSkew8() {
1070
	var temp [bitwidth8 - 1]ffe8
1071

1072
	// Generate FFT skew vector {1}:
1073

1074
	for i := 1; i < bitwidth8; i++ {
1075
		temp[i-1] = ffe8(1 << i)
1076
	}
1077

1078
	fftSkew8 = &[modulus8]ffe8{}
1079
	logWalsh8 = &[order8]ffe8{}
1080

1081
	for m := 0; m < bitwidth8-1; m++ {
1082
		step := 1 << (m + 1)
1083

1084
		fftSkew8[1<<m-1] = 0
1085

1086
		for i := m; i < bitwidth8-1; i++ {
1087
			s := 1 << (i + 1)
1088

1089
			for j := 1<<m - 1; j < s; j += step {
1090
				fftSkew8[j+s] = fftSkew8[j] ^ temp[i]
1091
			}
1092
		}
1093

1094
		temp[m] = modulus8 - logLUT8[mulLog8(temp[m], logLUT8[temp[m]^1])]
1095

1096
		for i := m + 1; i < bitwidth8-1; i++ {
1097
			sum := addMod8(logLUT8[temp[i]^1], temp[m])
1098
			temp[i] = mulLog8(temp[i], sum)
1099
		}
1100
	}
1101

1102
	for i := 0; i < modulus8; i++ {
1103
		fftSkew8[i] = logLUT8[fftSkew8[i]]
1104
	}
1105

1106
	// Precalculate FWHT(Log[i]):
1107

1108
	for i := 0; i < order8; i++ {
1109
		logWalsh8[i] = logLUT8[i]
1110
	}
1111
	logWalsh8[0] = 0
1112

1113
	fwht8(logWalsh8, order8, order8)
1114
}
1115

1116
func initMul8LUT() {
1117
	mul8LUTs = &[order8]mul8LUT{}
1118

1119
	// For each log_m multiplicand:
1120
	for log_m := 0; log_m < order8; log_m++ {
1121
		var tmp [64]ffe8
1122
		for nibble, shift := 0, 0; nibble < 4; {
1123
			nibble_lut := tmp[nibble*16:]
1124

1125
			for xnibble := 0; xnibble < 16; xnibble++ {
1126
				prod := mulLog8(ffe8(xnibble<<shift), ffe8(log_m))
1127
				nibble_lut[xnibble] = prod
1128
			}
1129
			nibble++
1130
			shift += 4
1131
		}
1132
		lut := &mul8LUTs[log_m]
1133
		for i := range lut.Value[:] {
1134
			lut.Value[i] = tmp[i&15] ^ tmp[((i>>4)+16)]
1135
		}
1136
	}
1137
	// Always initialize assembly tables.
1138
	// Not as big resource hog as gf16.
1139
	if true {
1140
		multiply256LUT8 = &[order8][16 * 2]byte{}
1141

1142
		for logM := range multiply256LUT8[:] {
1143
			// For each 4 bits of the finite field width in bits:
1144
			shift := 0
1145
			for i := 0; i < 2; i++ {
1146
				// Construct 16 entry LUT for PSHUFB
1147
				prod := multiply256LUT8[logM][i*16 : i*16+16]
1148
				for x := range prod[:] {
1149
					prod[x] = byte(mulLog8(ffe8(x<<shift), ffe8(logM)))
1150
				}
1151
				shift += 4
1152
			}
1153
		}
1154
	}
1155
}
1156

1157
const kWords8 = order8 / 64
1158

1159
// errorBitfield contains progressive errors to help indicate which
1160
// shards need reconstruction.
1161
type errorBitfield8 struct {
1162
	Words [7][kWords8]uint64
1163
}
1164

1165
func (e *errorBitfield8) set(i int) {
1166
	e.Words[0][(i/64)&3] |= uint64(1) << (i & 63)
1167
}
1168

1169
func (e *errorBitfield8) cacheID() [inversion8Bytes]byte {
1170
	var res [inversion8Bytes]byte
1171
	binary.LittleEndian.PutUint64(res[0:8], e.Words[0][0])
1172
	binary.LittleEndian.PutUint64(res[8:16], e.Words[0][1])
1173
	binary.LittleEndian.PutUint64(res[16:24], e.Words[0][2])
1174
	binary.LittleEndian.PutUint64(res[24:32], e.Words[0][3])
1175
	return res
1176
}
1177

1178
func (e *errorBitfield8) isNeeded(mipLevel, bit int) bool {
1179
	if mipLevel >= 8 || mipLevel <= 0 {
1180
		return true
1181
	}
1182
	return 0 != (e.Words[mipLevel-1][bit/64] & (uint64(1) << (bit & 63)))
1183
}
1184

1185
func (e *errorBitfield8) prepare() {
1186
	// First mip level is for final layer of FFT: pairs of data
1187
	for i := 0; i < kWords8; i++ {
1188
		w_i := e.Words[0][i]
1189
		hi2lo0 := w_i | ((w_i & kHiMasks[0]) >> 1)
1190
		lo2hi0 := (w_i & (kHiMasks[0] >> 1)) << 1
1191
		w_i = hi2lo0 | lo2hi0
1192
		e.Words[0][i] = w_i
1193

1194
		bits := 2
1195
		for j := 1; j < 5; j++ {
1196
			hi2lo_j := w_i | ((w_i & kHiMasks[j]) >> bits)
1197
			lo2hi_j := (w_i & (kHiMasks[j] >> bits)) << bits
1198
			w_i = hi2lo_j | lo2hi_j
1199
			e.Words[j][i] = w_i
1200
			bits <<= 1
1201
		}
1202
	}
1203

1204
	for i := 0; i < kWords8; i++ {
1205
		w := e.Words[4][i]
1206
		w |= w >> 32
1207
		w |= w << 32
1208
		e.Words[5][i] = w
1209
	}
1210

1211
	for i := 0; i < kWords8; i += 2 {
1212
		t := e.Words[5][i] | e.Words[5][i+1]
1213
		e.Words[6][i] = t
1214
		e.Words[6][i+1] = t
1215
	}
1216
}
1217

1218
func (e *errorBitfield8) fftDIT8(work [][]byte, mtrunc, m int, skewLUT []ffe8, o *options) {
1219
	// Decimation in time: Unroll 2 layers at a time
1220
	mipLevel := bits.Len32(uint32(m)) - 1
1221

1222
	dist4 := m
1223
	dist := m >> 2
1224
	for dist != 0 {
1225
		// For each set of dist*4 elements:
1226
		for r := 0; r < mtrunc; r += dist4 {
1227
			if !e.isNeeded(mipLevel, r) {
1228
				continue
1229
			}
1230
			iEnd := r + dist
1231
			logM01 := skewLUT[iEnd-1]
1232
			logM02 := skewLUT[iEnd+dist-1]
1233
			logM23 := skewLUT[iEnd+dist*2-1]
1234

1235
			// For each set of dist elements:
1236
			for i := r; i < iEnd; i++ {
1237
				fftDIT48(
1238
					work[i:],
1239
					dist,
1240
					logM01,
1241
					logM23,
1242
					logM02,
1243
					o)
1244
			}
1245
		}
1246
		dist4 = dist
1247
		dist >>= 2
1248
		mipLevel -= 2
1249
	}
1250

1251
	// If there is one layer left:
1252
	if dist4 == 2 {
1253
		for r := 0; r < mtrunc; r += 2 {
1254
			if !e.isNeeded(mipLevel, r) {
1255
				continue
1256
			}
1257
			logM := skewLUT[r+1-1]
1258

1259
			if logM == modulus8 {
1260
				sliceXor(work[r], work[r+1], o)
1261
			} else {
1262
				fftDIT28(work[r], work[r+1], logM, o)
1263
			}
1264
		}
1265
	}
1266
}
1267

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.