reedsolomon.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. /**
  2. * Reed-Solomon Coding over 8-bit values.
  3. *
  4. * Copyright 2015, Klaus Post
  5. * Copyright 2015, Backblaze, Inc.
  6. */
  7. // Package reedsolomon enables Erasure Coding in Go
  8. //
  9. // For usage and examples, see https://github.com/klauspost/reedsolomon
  10. //
  11. package reedsolomon
  12. import (
  13. "bytes"
  14. "errors"
  15. "io"
  16. "sync"
  17. )
  18. // Encoder is an interface to encode Reed-Salomon parity sets for your data.
  19. type Encoder interface {
  20. // Encodes parity for a set of data shards.
  21. // Input is 'shards' containing data shards followed by parity shards.
  22. // The number of shards must match the number given to New().
  23. // Each shard is a byte array, and they must all be the same size.
  24. // The parity shards will always be overwritten and the data shards
  25. // will remain the same, so it is safe for you to read from the
  26. // data shards while this is running.
  27. Encode(shards [][]byte) error
  28. // Verify returns true if the parity shards contain correct data.
  29. // The data is the same format as Encode. No data is modified, so
  30. // you are allowed to read from data while this is running.
  31. Verify(shards [][]byte) (bool, error)
  32. // Reconstruct will recreate the missing shards if possible.
  33. //
  34. // Given a list of shards, some of which contain data, fills in the
  35. // ones that don't have data.
  36. //
  37. // The length of the array must be equal to the total number of shards.
  38. // You indicate that a shard is missing by setting it to nil.
  39. //
  40. // If there are too few shards to reconstruct the missing
  41. // ones, ErrTooFewShards will be returned.
  42. //
  43. // The reconstructed shard set is complete, but integrity is not verified.
  44. // Use the Verify function to check if data set is ok.
  45. Reconstruct(shards [][]byte) error
  46. // Split a data slice into the number of shards given to the encoder,
  47. // and create empty parity shards.
  48. //
  49. // The data will be split into equally sized shards.
  50. // If the data size isn't dividable by the number of shards,
  51. // the last shard will contain extra zeros.
  52. //
  53. // There must be at least 1 byte otherwise ErrShortData will be
  54. // returned.
  55. //
  56. // The data will not be copied, except for the last shard, so you
  57. // should not modify the data of the input slice afterwards.
  58. Split(data []byte) ([][]byte, error)
  59. // Join the shards and write the data segment to dst.
  60. //
  61. // Only the data shards are considered.
  62. // You must supply the exact output size you want.
  63. // If there are to few shards given, ErrTooFewShards will be returned.
  64. // If the total data size is less than outSize, ErrShortData will be returned.
  65. Join(dst io.Writer, shards [][]byte, outSize int) error
  66. }
  67. // reedSolomon contains a matrix for a specific
  68. // distribution of datashards and parity shards.
  69. // Construct if using New()
  70. type reedSolomon struct {
  71. DataShards int // Number of data shards, should not be modified.
  72. ParityShards int // Number of parity shards, should not be modified.
  73. Shards int // Total number of shards. Calculated, and should not be modified.
  74. m matrix
  75. tree inversionTree
  76. parity [][]byte
  77. o options
  78. }
  79. // ErrInvShardNum will be returned by New, if you attempt to create
  80. // an Encoder where either data or parity shards is zero or less.
  81. var ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
  82. // ErrMaxShardNum will be returned by New, if you attempt to create
  83. // an Encoder where data and parity shards cannot be bigger than
  84. // Galois field GF(2^8) - 1.
  85. var ErrMaxShardNum = errors.New("cannot create Encoder with 255 or more data+parity shards")
  86. // New creates a new encoder and initializes it to
  87. // the number of data shards and parity shards that
  88. // you want to use. You can reuse this encoder.
  89. // Note that the maximum number of data shards is 256.
  90. // If no options are supplied, default options are used.
  91. func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
  92. r := reedSolomon{
  93. DataShards: dataShards,
  94. ParityShards: parityShards,
  95. Shards: dataShards + parityShards,
  96. o: defaultOptions,
  97. }
  98. for _, opt := range opts {
  99. opt(&r.o)
  100. }
  101. if dataShards <= 0 || parityShards <= 0 {
  102. return nil, ErrInvShardNum
  103. }
  104. if dataShards+parityShards > 255 {
  105. return nil, ErrMaxShardNum
  106. }
  107. // Start with a Vandermonde matrix. This matrix would work,
  108. // in theory, but doesn't have the property that the data
  109. // shards are unchanged after encoding.
  110. vm, err := vandermonde(r.Shards, dataShards)
  111. if err != nil {
  112. return nil, err
  113. }
  114. // Multiply by the inverse of the top square of the matrix.
  115. // This will make the top square be the identity matrix, but
  116. // preserve the property that any square subset of rows is
  117. // invertible.
  118. top, _ := vm.SubMatrix(0, 0, dataShards, dataShards)
  119. top, _ = top.Invert()
  120. r.m, _ = vm.Multiply(top)
  121. // Inverted matrices are cached in a tree keyed by the indices
  122. // of the invalid rows of the data to reconstruct.
  123. // The inversion root node will have the identity matrix as
  124. // its inversion matrix because it implies there are no errors
  125. // with the original data.
  126. r.tree = newInversionTree(dataShards, parityShards)
  127. r.parity = make([][]byte, parityShards)
  128. for i := range r.parity {
  129. r.parity[i] = r.m[dataShards+i]
  130. }
  131. return &r, err
  132. }
  133. // ErrTooFewShards is returned if too few shards where given to
  134. // Encode/Verify/Reconstruct. It will also be returned from Reconstruct
  135. // if there were too few shards to reconstruct the missing data.
  136. var ErrTooFewShards = errors.New("too few shards given")
  137. // Encodes parity for a set of data shards.
  138. // An array 'shards' containing data shards followed by parity shards.
  139. // The number of shards must match the number given to New.
  140. // Each shard is a byte array, and they must all be the same size.
  141. // The parity shards will always be overwritten and the data shards
  142. // will remain the same.
  143. func (r reedSolomon) Encode(shards [][]byte) error {
  144. if len(shards) != r.Shards {
  145. return ErrTooFewShards
  146. }
  147. err := checkShards(shards, false)
  148. if err != nil {
  149. return err
  150. }
  151. // Get the slice of output buffers.
  152. output := shards[r.DataShards:]
  153. // Do the coding.
  154. r.codeSomeShards(r.parity, shards[0:r.DataShards], output, r.ParityShards, len(shards[0]))
  155. return nil
  156. }
  157. // Verify returns true if the parity shards contain the right data.
  158. // The data is the same format as Encode. No data is modified.
  159. func (r reedSolomon) Verify(shards [][]byte) (bool, error) {
  160. if len(shards) != r.Shards {
  161. return false, ErrTooFewShards
  162. }
  163. err := checkShards(shards, false)
  164. if err != nil {
  165. return false, err
  166. }
  167. // Slice of buffers being checked.
  168. toCheck := shards[r.DataShards:]
  169. // Do the checking.
  170. return r.checkSomeShards(r.parity, shards[0:r.DataShards], toCheck, r.ParityShards, len(shards[0])), nil
  171. }
  172. // Multiplies a subset of rows from a coding matrix by a full set of
  173. // input shards to produce some output shards.
  174. // 'matrixRows' is The rows from the matrix to use.
  175. // 'inputs' An array of byte arrays, each of which is one input shard.
  176. // The number of inputs used is determined by the length of each matrix row.
  177. // outputs Byte arrays where the computed shards are stored.
  178. // The number of outputs computed, and the
  179. // number of matrix rows used, is determined by
  180. // outputCount, which is the number of outputs to compute.
  181. func (r reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
  182. if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
  183. r.codeSomeShardsP(matrixRows, inputs, outputs, outputCount, byteCount)
  184. return
  185. }
  186. for c := 0; c < r.DataShards; c++ {
  187. in := inputs[c]
  188. for iRow := 0; iRow < outputCount; iRow++ {
  189. if c == 0 {
  190. galMulSlice(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
  191. } else {
  192. galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
  193. }
  194. }
  195. }
  196. }
  197. // Perform the same as codeSomeShards, but split the workload into
  198. // several goroutines.
  199. func (r reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
  200. var wg sync.WaitGroup
  201. do := byteCount / r.o.maxGoroutines
  202. if do < r.o.minSplitSize {
  203. do = r.o.minSplitSize
  204. }
  205. start := 0
  206. for start < byteCount {
  207. if start+do > byteCount {
  208. do = byteCount - start
  209. }
  210. wg.Add(1)
  211. go func(start, stop int) {
  212. for c := 0; c < r.DataShards; c++ {
  213. in := inputs[c]
  214. for iRow := 0; iRow < outputCount; iRow++ {
  215. if c == 0 {
  216. galMulSlice(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
  217. } else {
  218. galMulSliceXor(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
  219. }
  220. }
  221. }
  222. wg.Done()
  223. }(start, start+do)
  224. start += do
  225. }
  226. wg.Wait()
  227. }
  228. // checkSomeShards is mostly the same as codeSomeShards,
  229. // except this will check values and return
  230. // as soon as a difference is found.
  231. func (r reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
  232. if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
  233. return r.checkSomeShardsP(matrixRows, inputs, toCheck, outputCount, byteCount)
  234. }
  235. outputs := make([][]byte, len(toCheck))
  236. for i := range outputs {
  237. outputs[i] = make([]byte, byteCount)
  238. }
  239. for c := 0; c < r.DataShards; c++ {
  240. in := inputs[c]
  241. for iRow := 0; iRow < outputCount; iRow++ {
  242. galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
  243. }
  244. }
  245. for i, calc := range outputs {
  246. if !bytes.Equal(calc, toCheck[i]) {
  247. return false
  248. }
  249. }
  250. return true
  251. }
  252. func (r reedSolomon) checkSomeShardsP(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
  253. same := true
  254. var mu sync.RWMutex // For above
  255. var wg sync.WaitGroup
  256. do := byteCount / r.o.maxGoroutines
  257. if do < r.o.minSplitSize {
  258. do = r.o.minSplitSize
  259. }
  260. start := 0
  261. for start < byteCount {
  262. if start+do > byteCount {
  263. do = byteCount - start
  264. }
  265. wg.Add(1)
  266. go func(start, do int) {
  267. defer wg.Done()
  268. outputs := make([][]byte, len(toCheck))
  269. for i := range outputs {
  270. outputs[i] = make([]byte, do)
  271. }
  272. for c := 0; c < r.DataShards; c++ {
  273. mu.RLock()
  274. if !same {
  275. mu.RUnlock()
  276. return
  277. }
  278. mu.RUnlock()
  279. in := inputs[c][start : start+do]
  280. for iRow := 0; iRow < outputCount; iRow++ {
  281. galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
  282. }
  283. }
  284. for i, calc := range outputs {
  285. if !bytes.Equal(calc, toCheck[i][start:start+do]) {
  286. mu.Lock()
  287. same = false
  288. mu.Unlock()
  289. return
  290. }
  291. }
  292. }(start, do)
  293. start += do
  294. }
  295. wg.Wait()
  296. return same
  297. }
  298. // ErrShardNoData will be returned if there are no shards,
  299. // or if the length of all shards is zero.
  300. var ErrShardNoData = errors.New("no shard data")
  301. // ErrShardSize is returned if shard length isn't the same for all
  302. // shards.
  303. var ErrShardSize = errors.New("shard sizes does not match")
  304. // checkShards will check if shards are the same size
  305. // or 0, if allowed. An error is returned if this fails.
  306. // An error is also returned if all shards are size 0.
  307. func checkShards(shards [][]byte, nilok bool) error {
  308. size := shardSize(shards)
  309. if size == 0 {
  310. return ErrShardNoData
  311. }
  312. for _, shard := range shards {
  313. if len(shard) != size {
  314. if len(shard) != 0 || !nilok {
  315. return ErrShardSize
  316. }
  317. }
  318. }
  319. return nil
  320. }
  321. // shardSize return the size of a single shard.
  322. // The first non-zero size is returned,
  323. // or 0 if all shards are size 0.
  324. func shardSize(shards [][]byte) int {
  325. for _, shard := range shards {
  326. if len(shard) != 0 {
  327. return len(shard)
  328. }
  329. }
  330. return 0
  331. }
  332. // Reconstruct will recreate the missing shards, if possible.
  333. //
  334. // Given a list of shards, some of which contain data, fills in the
  335. // ones that don't have data.
  336. //
  337. // The length of the array must be equal to Shards.
  338. // You indicate that a shard is missing by setting it to nil.
  339. //
  340. // If there are too few shards to reconstruct the missing
  341. // ones, ErrTooFewShards will be returned.
  342. //
  343. // The reconstructed shard set is complete, but integrity is not verified.
  344. // Use the Verify function to check if data set is ok.
  345. func (r reedSolomon) Reconstruct(shards [][]byte) error {
  346. if len(shards) != r.Shards {
  347. return ErrTooFewShards
  348. }
  349. // Check arguments.
  350. err := checkShards(shards, true)
  351. if err != nil {
  352. return err
  353. }
  354. shardSize := shardSize(shards)
  355. // Quick check: are all of the shards present? If so, there's
  356. // nothing to do.
  357. numberPresent := 0
  358. for i := 0; i < r.Shards; i++ {
  359. if len(shards[i]) != 0 {
  360. numberPresent++
  361. }
  362. }
  363. if numberPresent == r.Shards {
  364. // Cool. All of the shards data data. We don't
  365. // need to do anything.
  366. return nil
  367. }
  368. // More complete sanity check
  369. if numberPresent < r.DataShards {
  370. return ErrTooFewShards
  371. }
  372. // Pull out an array holding just the shards that
  373. // correspond to the rows of the submatrix. These shards
  374. // will be the input to the decoding process that re-creates
  375. // the missing data shards.
  376. //
  377. // Also, create an array of indices of the valid rows we do have
  378. // and the invalid rows we don't have up until we have enough valid rows.
  379. subShards := make([][]byte, r.DataShards)
  380. validIndices := make([]int, r.DataShards)
  381. invalidIndices := make([]int, 0)
  382. subMatrixRow := 0
  383. for matrixRow := 0; matrixRow < r.Shards && subMatrixRow < r.DataShards; matrixRow++ {
  384. if len(shards[matrixRow]) != 0 {
  385. subShards[subMatrixRow] = shards[matrixRow]
  386. validIndices[subMatrixRow] = matrixRow
  387. subMatrixRow++
  388. } else {
  389. invalidIndices = append(invalidIndices, matrixRow)
  390. }
  391. }
  392. // Attempt to get the cached inverted matrix out of the tree
  393. // based on the indices of the invalid rows.
  394. dataDecodeMatrix := r.tree.GetInvertedMatrix(invalidIndices)
  395. // If the inverted matrix isn't cached in the tree yet we must
  396. // construct it ourselves and insert it into the tree for the
  397. // future. In this way the inversion tree is lazily loaded.
  398. if dataDecodeMatrix == nil {
  399. // Pull out the rows of the matrix that correspond to the
  400. // shards that we have and build a square matrix. This
  401. // matrix could be used to generate the shards that we have
  402. // from the original data.
  403. subMatrix, _ := newMatrix(r.DataShards, r.DataShards)
  404. for subMatrixRow, validIndex := range validIndices {
  405. for c := 0; c < r.DataShards; c++ {
  406. subMatrix[subMatrixRow][c] = r.m[validIndex][c]
  407. }
  408. }
  409. // Invert the matrix, so we can go from the encoded shards
  410. // back to the original data. Then pull out the row that
  411. // generates the shard that we want to decode. Note that
  412. // since this matrix maps back to the original data, it can
  413. // be used to create a data shard, but not a parity shard.
  414. dataDecodeMatrix, err = subMatrix.Invert()
  415. if err != nil {
  416. return err
  417. }
  418. // Cache the inverted matrix in the tree for future use keyed on the
  419. // indices of the invalid rows.
  420. err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.Shards)
  421. if err != nil {
  422. return err
  423. }
  424. }
  425. // Re-create any data shards that were missing.
  426. //
  427. // The input to the coding is all of the shards we actually
  428. // have, and the output is the missing data shards. The computation
  429. // is done using the special decode matrix we just built.
  430. outputs := make([][]byte, r.ParityShards)
  431. matrixRows := make([][]byte, r.ParityShards)
  432. outputCount := 0
  433. for iShard := 0; iShard < r.DataShards; iShard++ {
  434. if len(shards[iShard]) == 0 {
  435. shards[iShard] = make([]byte, shardSize)
  436. outputs[outputCount] = shards[iShard]
  437. matrixRows[outputCount] = dataDecodeMatrix[iShard]
  438. outputCount++
  439. }
  440. }
  441. r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], outputCount, shardSize)
  442. // Now that we have all of the data shards intact, we can
  443. // compute any of the parity that is missing.
  444. //
  445. // The input to the coding is ALL of the data shards, including
  446. // any that we just calculated. The output is whichever of the
  447. // data shards were missing.
  448. outputCount = 0
  449. for iShard := r.DataShards; iShard < r.Shards; iShard++ {
  450. if len(shards[iShard]) == 0 {
  451. shards[iShard] = make([]byte, shardSize)
  452. outputs[outputCount] = shards[iShard]
  453. matrixRows[outputCount] = r.parity[iShard-r.DataShards]
  454. outputCount++
  455. }
  456. }
  457. r.codeSomeShards(matrixRows, shards[:r.DataShards], outputs[:outputCount], outputCount, shardSize)
  458. return nil
  459. }
  460. // ErrShortData will be returned by Split(), if there isn't enough data
  461. // to fill the number of shards.
  462. var ErrShortData = errors.New("not enough data to fill the number of requested shards")
  463. // Split a data slice into the number of shards given to the encoder,
  464. // and create empty parity shards.
  465. //
  466. // The data will be split into equally sized shards.
  467. // If the data size isn't divisible by the number of shards,
  468. // the last shard will contain extra zeros.
  469. //
  470. // There must be at least 1 byte otherwise ErrShortData will be
  471. // returned.
  472. //
  473. // The data will not be copied, except for the last shard, so you
  474. // should not modify the data of the input slice afterwards.
  475. func (r reedSolomon) Split(data []byte) ([][]byte, error) {
  476. if len(data) == 0 {
  477. return nil, ErrShortData
  478. }
  479. // Calculate number of bytes per shard.
  480. perShard := (len(data) + r.DataShards - 1) / r.DataShards
  481. // Pad data to r.Shards*perShard.
  482. padding := make([]byte, (r.Shards*perShard)-len(data))
  483. data = append(data, padding...)
  484. // Split into equal-length shards.
  485. dst := make([][]byte, r.Shards)
  486. for i := range dst {
  487. dst[i] = data[:perShard]
  488. data = data[perShard:]
  489. }
  490. return dst, nil
  491. }
  492. // ErrReconstructRequired is returned if too few data shards are intact and a
  493. // reconstruction is required before you can successfully join the shards.
  494. var ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
  495. // Join the shards and write the data segment to dst.
  496. //
  497. // Only the data shards are considered.
  498. // You must supply the exact output size you want.
  499. //
  500. // If there are to few shards given, ErrTooFewShards will be returned.
  501. // If the total data size is less than outSize, ErrShortData will be returned.
  502. // If one or more required data shards are nil, ErrReconstructRequired will be returned.
  503. func (r reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error {
  504. // Do we have enough shards?
  505. if len(shards) < r.DataShards {
  506. return ErrTooFewShards
  507. }
  508. shards = shards[:r.DataShards]
  509. // Do we have enough data?
  510. size := 0
  511. for _, shard := range shards {
  512. if shard == nil {
  513. return ErrReconstructRequired
  514. }
  515. size += len(shard)
  516. // Do we have enough data already?
  517. if size >= outSize {
  518. break
  519. }
  520. }
  521. if size < outSize {
  522. return ErrShortData
  523. }
  524. // Copy data to dst
  525. write := outSize
  526. for _, shard := range shards {
  527. if write < len(shard) {
  528. _, err := dst.Write(shard[:write])
  529. return err
  530. }
  531. n, err := dst.Write(shard)
  532. if err != nil {
  533. return err
  534. }
  535. write -= n
  536. }
  537. return nil
  538. }