fix: erasure index based reading based on actual ParityBlocks (#11792)

in some setups with ordering issues in drive configuration,
we should rely on expected parityBlocks instead of `len(disks)/2`
This commit is contained in:
Harshavardhana
2021-03-15 20:03:13 -07:00
committed by GitHub
parent e5a1a2a974
commit 6160188bf3
5 changed files with 12 additions and 10 deletions

View File

@@ -161,7 +161,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
// For the last shard, the shardsize might be less than previous shard sizes.
// Hence the following statement ensures that the buffer size is reset to the right size.
p.buf[bufIdx] = p.buf[bufIdx][:p.shardSize]
_, err := rr.ReadAt(p.buf[bufIdx], p.offset)
n, err := rr.ReadAt(p.buf[bufIdx], p.offset)
if err != nil {
if errors.Is(err, errFileNotFound) {
atomic.StoreInt32(&missingPartsHeal, 1)
@@ -179,7 +179,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
return
}
newBufLK.Lock()
newBuf[bufIdx] = p.buf[bufIdx]
newBuf[bufIdx] = p.buf[bufIdx][:n]
newBufLK.Unlock()
// Since ReadAt returned success, there is no need to trigger another read.
readTriggerCh <- false