mirror of
https://github.com/pgsty/minio.git
synced 2026-03-16 17:53:43 +01:00
re-use the same local drive used by remote-peer (#18645)
historically, we have always kept storage-rest-server and a local storage API separate without much trouble, since they both can independently operate due to no special state() between them. however, over some time, we have added state() such as - drive monitoring threads now there will be "2" of them per drive instead of just 1. - concurrent tokens available per drive are now twice instead of just single shared, allowing unexpectedly high amount of I/O to go through. - applying serialization by using walkMutexes can now be adequately honored for both remote callers and local callers.
This commit is contained in:
@@ -34,7 +34,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/grid"
|
||||
@@ -55,18 +54,13 @@ var errDiskStale = errors.New("drive stale")
|
||||
|
||||
// To abstract a disk over network.
|
||||
type storageRESTServer struct {
|
||||
storage atomic.Value
|
||||
poolIndex, setIndex, diskIndex int
|
||||
}
|
||||
|
||||
func (s *storageRESTServer) getStorage() *xlStorageDiskIDCheck {
|
||||
if s, ok := s.storage.Load().(*xlStorageDiskIDCheck); ok {
|
||||
return s
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageRESTServer) setStorage(xl *xlStorageDiskIDCheck) {
|
||||
s.storage.Store(xl)
|
||||
func (s *storageRESTServer) getStorage() StorageAPI {
|
||||
globalLocalDrivesMu.RLock()
|
||||
defer globalLocalDrivesMu.RUnlock()
|
||||
return globalLocalSetDrives[s.poolIndex][s.setIndex][s.diskIndex]
|
||||
}
|
||||
|
||||
func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error) {
|
||||
@@ -218,7 +212,6 @@ func (s *storageRESTServer) DiskInfoHandler(params *grid.MSS) (*DiskInfo, *grid.
|
||||
if err != nil {
|
||||
info.Error = err.Error()
|
||||
}
|
||||
info.Scanning = s.getStorage().storage != nil && atomic.LoadInt32(&s.getStorage().storage.scanning) > 0
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
@@ -735,6 +728,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
|
||||
setEventStreamHeaders(w)
|
||||
encoder := gob.NewEncoder(w)
|
||||
done := keepHTTPResponseAlive(w)
|
||||
|
||||
errs := s.getStorage().DeleteVersions(r.Context(), volume, versions)
|
||||
done(nil)
|
||||
for idx := range versions {
|
||||
@@ -1332,25 +1326,36 @@ func (s *storageRESTServer) ReadMultiple(w http.ResponseWriter, r *http.Request)
|
||||
rw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// globalLocalSetDrives is used for local drive as well as remote REST
|
||||
// API caller for other nodes to talk to this node.
|
||||
//
|
||||
// Any updates to this must be serialized via globalLocalDrivesMu (locker)
|
||||
var globalLocalSetDrives [][][]StorageAPI
|
||||
|
||||
// registerStorageRESTHandlers - register storage rpc router.
|
||||
func registerStorageRESTHandlers(router *mux.Router, endpointServerPools EndpointServerPools, gm *grid.Manager) {
|
||||
h := func(f http.HandlerFunc) http.HandlerFunc {
|
||||
return collectInternodeStats(httpTraceHdrs(f))
|
||||
}
|
||||
|
||||
driveHandlers := make([][]*storageRESTServer, len(endpointServerPools))
|
||||
for pool, serverPool := range endpointServerPools {
|
||||
driveHandlers[pool] = make([]*storageRESTServer, len(serverPool.Endpoints))
|
||||
globalLocalSetDrives = make([][][]StorageAPI, len(endpointServerPools))
|
||||
for pool := range globalLocalSetDrives {
|
||||
globalLocalSetDrives[pool] = make([][]StorageAPI, endpointServerPools[pool].SetCount)
|
||||
for set := range globalLocalSetDrives[pool] {
|
||||
globalLocalSetDrives[pool][set] = make([]StorageAPI, endpointServerPools[pool].DrivesPerSet)
|
||||
}
|
||||
}
|
||||
|
||||
for pool, serverPool := range endpointServerPools {
|
||||
for set, endpoint := range serverPool.Endpoints {
|
||||
for _, serverPool := range endpointServerPools {
|
||||
for _, endpoint := range serverPool.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
|
||||
driveHandlers[pool][set] = &storageRESTServer{}
|
||||
server := driveHandlers[pool][set]
|
||||
server := &storageRESTServer{
|
||||
poolIndex: endpoint.PoolIdx,
|
||||
setIndex: endpoint.SetIdx,
|
||||
diskIndex: endpoint.DiskIdx,
|
||||
}
|
||||
|
||||
subrouter := router.PathPrefix(path.Join(storageRESTPrefix, endpoint.Path)).Subrouter()
|
||||
|
||||
@@ -1404,16 +1409,23 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin
|
||||
}
|
||||
storage := newXLStorageDiskIDCheck(xl, true)
|
||||
storage.SetDiskID(xl.diskID)
|
||||
server.setStorage(storage)
|
||||
|
||||
globalLocalDrivesMu.Lock()
|
||||
defer globalLocalDrivesMu.Unlock()
|
||||
|
||||
globalLocalDrives = append(globalLocalDrives, storage)
|
||||
globalLocalSetDrives[endpoint.PoolIdx][endpoint.SetIdx][endpoint.DiskIdx] = storage
|
||||
return true
|
||||
}
|
||||
|
||||
if createStorage(server) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Start async goroutine to create storage.
|
||||
go func(server *storageRESTServer) {
|
||||
for {
|
||||
time.Sleep(5 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
if createStorage(server) {
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user