Skip to content

Commit

Permalink
cli: add flag max-retries for upload-bin
Browse files Browse the repository at this point in the history
Signed-off-by: Ekaterina Pavlova <[email protected]>
  • Loading branch information
AliceInHunterland committed Nov 5, 2024
1 parent 3a945ab commit 6591e12
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 22 deletions.
13 changes: 12 additions & 1 deletion cli/util/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,17 @@ func NewCommands() []*cli.Command {
Name: "skip-blocks-uploading",
Usage: "Skip blocks uploading and upload only index files",
},
&cli.UintFlag{
Name: "retries",
Usage: "Maximum number of Neo/NeoFS node request retries",
Value: 5,
Action: func(context *cli.Context, u uint) error {
if u < 1 {
return cli.Exit("retries should be greater than 0", 1)
}
return nil
},
},
}, options.RPC...)
uploadBinFlags = append(uploadBinFlags, options.Wallet...)
return []*cli.Command{
Expand Down Expand Up @@ -172,7 +183,7 @@ func NewCommands() []*cli.Command {
{
Name: "upload-bin",
Usage: "Fetch blocks from RPC node and upload them to the NeoFS container",
UsageText: "neo-go util upload-bin --fs-rpc-endpoint <address1>[,<address2>[...]] --container <cid> --block-attribute block --index-attribute index --rpc-endpoint <node> [--timeout <time>] --wallet <wallet> [--wallet-config <config>] [--address <address>] [--workers <num>] [--searchers <num>] [--index-file-size <size>] [--skip-blocks-uploading]",
UsageText: "neo-go util upload-bin --fs-rpc-endpoint <address1>[,<address2>[...]] --container <cid> --block-attribute block --index-attribute index --rpc-endpoint <node> [--timeout <time>] --wallet <wallet> [--wallet-config <config>] [--address <address>] [--workers <num>] [--searchers <num>] [--index-file-size <size>] [--skip-blocks-uploading] [--retries <num>]",
Action: uploadBin,
Flags: uploadBinFlags,
},
Expand Down
42 changes: 21 additions & 21 deletions cli/util/uploader.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,6 @@ const (

// Constants related to retry mechanism.
const (
// Maximum number of retries.
maxRetries = 5
// Initial backoff duration.
initialBackoff = 500 * time.Millisecond
// Backoff multiplier.
Expand All @@ -67,6 +65,8 @@ func uploadBin(ctx *cli.Context) error {
attr := ctx.String("block-attribute")
numWorkers := ctx.Int("workers")
maxParallelSearches := ctx.Int("searchers")
maxRetries := int(ctx.Uint("retries"))

acc, _, err := options.GetAccFromContext(ctx)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to load account: %v", err), 1)
Expand Down Expand Up @@ -105,7 +105,7 @@ func uploadBin(ctx *cli.Context) error {
var errNet error
net, errNet = p.NetworkInfo(ctx.Context, client.PrmNetworkInfo{})
return errNet
})
}, maxRetries)
if err != nil {
return cli.Exit(fmt.Errorf("failed to get network info: %w", err), 1)
}
Expand All @@ -115,7 +115,7 @@ func uploadBin(ctx *cli.Context) error {
err = retry(func() error {
containerObj, err = p.ContainerGet(ctx.Context, containerID, client.PrmContainerGet{})
return err
})
}, maxRetries)
if err != nil {
return cli.Exit(fmt.Errorf("failed to get container with ID %s: %w", containerID, err), 1)
}
Expand All @@ -136,28 +136,28 @@ func uploadBin(ctx *cli.Context) error {
}
fmt.Fprintln(ctx.App.Writer, "Chain block height:", currentBlockHeight)

oldestMissingBlockIndex, errBlock := fetchLatestMissingBlockIndex(ctx.Context, p, containerID, acc.PrivateKey(), attr, int(currentBlockHeight), maxParallelSearches)
oldestMissingBlockIndex, errBlock := fetchLatestMissingBlockIndex(ctx.Context, p, containerID, acc.PrivateKey(), attr, int(currentBlockHeight), maxParallelSearches, maxRetries)
if errBlock != nil {
return cli.Exit(fmt.Errorf("failed to fetch the oldest missing block index from container: %w", err), 1)
}
fmt.Fprintln(ctx.App.Writer, "First block of latest incomplete batch uploaded to NeoFS container:", oldestMissingBlockIndex)

if !ctx.Bool("skip-blocks-uploading") {
err = uploadBlocks(ctx, p, rpc, signer, containerID, acc, attr, oldestMissingBlockIndex, uint(currentBlockHeight), homomorphicHashingDisabled, numWorkers)
err = uploadBlocks(ctx, p, rpc, signer, containerID, acc, attr, oldestMissingBlockIndex, uint(currentBlockHeight), homomorphicHashingDisabled, numWorkers, maxRetries)
if err != nil {
return cli.Exit(fmt.Errorf("failed to upload blocks: %w", err), 1)
}
}

err = uploadIndexFiles(ctx, p, containerID, acc, signer, uint(currentBlockHeight), attr, homomorphicHashingDisabled, maxParallelSearches)
err = uploadIndexFiles(ctx, p, containerID, acc, signer, uint(currentBlockHeight), attr, homomorphicHashingDisabled, maxParallelSearches, maxRetries)
if err != nil {
return cli.Exit(fmt.Errorf("failed to upload index files: %w", err), 1)
}
return nil
}

// retry function with exponential backoff.
func retry(action func() error) error {
func retry(action func() error, maxRetries int) error {
var err error
backoff := initialBackoff
for range maxRetries {
Expand All @@ -183,7 +183,7 @@ type searchResult struct {
// fetchLatestMissingBlockIndex searches the container for the latest full batch of blocks
// starting from the currentHeight and going backwards. It returns the index of first block
// in the next batch.
func fetchLatestMissingBlockIndex(ctx context.Context, p *pool.Pool, containerID cid.ID, priv *keys.PrivateKey, attributeKey string, currentHeight int, maxParallelSearches int) (int, error) {
func fetchLatestMissingBlockIndex(ctx context.Context, p *pool.Pool, containerID cid.ID, priv *keys.PrivateKey, attributeKey string, currentHeight int, maxParallelSearches, maxRetries int) (int, error) {
var (
wg sync.WaitGroup
numBatches = currentHeight/searchBatchSize + 1
Expand Down Expand Up @@ -219,7 +219,7 @@ func fetchLatestMissingBlockIndex(ctx context.Context, p *pool.Pool, containerID
err = retry(func() error {
objectIDs, err = neofs.ObjectSearch(ctx, p, priv, containerID.String(), prm)
return err
})
}, maxRetries)
results[i] = searchResult{startIndex: startIndex, endIndex: endIndex, numOIDs: len(objectIDs), err: err}
}(i, startIndex, endIndex)
}
Expand All @@ -242,7 +242,7 @@ func fetchLatestMissingBlockIndex(ctx context.Context, p *pool.Pool, containerID
}

// uploadBlocks uploads the blocks to the container using the pool.
func uploadBlocks(ctx *cli.Context, p *pool.Pool, rpc *rpcclient.Client, signer user.Signer, containerID cid.ID, acc *wallet.Account, attr string, oldestMissingBlockIndex int, currentBlockHeight uint, homomorphicHashingDisabled bool, numWorkers int) error {
func uploadBlocks(ctx *cli.Context, p *pool.Pool, rpc *rpcclient.Client, signer user.Signer, containerID cid.ID, acc *wallet.Account, attr string, oldestMissingBlockIndex int, currentBlockHeight uint, homomorphicHashingDisabled bool, numWorkers, maxRetries int) error {
if oldestMissingBlockIndex > int(currentBlockHeight) {
fmt.Fprintf(ctx.App.Writer, "No new blocks to upload. Need to upload starting from %d, current height %d\n", oldestMissingBlockIndex, currentBlockHeight)
return nil
Expand All @@ -268,7 +268,7 @@ func uploadBlocks(ctx *cli.Context, p *pool.Pool, rpc *rpcclient.Client, signer
return fmt.Errorf("failed to fetch block %d: %w", blockIndex, errGetBlock)
}
return nil
})
}, maxRetries)
if errGet != nil {
select {
case errCh <- errGet:
Expand Down Expand Up @@ -297,7 +297,7 @@ func uploadBlocks(ctx *cli.Context, p *pool.Pool, rpc *rpcclient.Client, signer
objBytes := bw.Bytes()
errRetr := retry(func() error {
return uploadObj(ctx.Context, p, signer, acc.PrivateKey().GetScriptHash(), containerID, objBytes, attrs, homomorphicHashingDisabled)
})
}, maxRetries)
if errRetr != nil {
select {
case errCh <- errRetr:
Expand Down Expand Up @@ -326,7 +326,7 @@ func uploadBlocks(ctx *cli.Context, p *pool.Pool, rpc *rpcclient.Client, signer
}

// uploadIndexFiles uploads missing index files to the container.
func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, account *wallet.Account, signer user.Signer, currentHeight uint, blockAttributeKey string, homomorphicHashingDisabled bool, maxParallelSearches int) error {
func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, account *wallet.Account, signer user.Signer, currentHeight uint, blockAttributeKey string, homomorphicHashingDisabled bool, maxParallelSearches, maxRetries int) error {
attributeKey := ctx.String("index-attribute")
indexFileSize := ctx.Uint("index-file-size")
fmt.Fprintln(ctx.App.Writer, "Uploading index files...")
Expand All @@ -341,7 +341,7 @@ func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, accoun
var errSearchIndex error
objectIDs, errSearchIndex = neofs.ObjectSearch(ctx.Context, p, account.PrivateKey(), containerID.String(), prm)
return errSearchIndex
})
}, maxRetries)
if errSearch != nil {
return fmt.Errorf("index files search failed: %w", errSearch)
}
Expand Down Expand Up @@ -382,7 +382,7 @@ func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, accoun
var errGet error
obj, _, errGet = p.ObjectGetInit(ctx.Context, containerID, id, signer, client.PrmObjectGet{})
return errGet
})
}, maxRetries)
if errRetr != nil {
select {
case errCh <- fmt.Errorf("failed to fetch object %s: %w", id.String(), errRetr):
Expand All @@ -409,7 +409,7 @@ func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, accoun
// Search for blocks within the index file range.
startIndex := i * indexFileSize
endIndex := startIndex + indexFileSize
objIDs := searchObjects(ctx.Context, p, containerID, account, blockAttributeKey, startIndex, endIndex, maxParallelSearches, errCh)
objIDs := searchObjects(ctx.Context, p, containerID, account, blockAttributeKey, startIndex, endIndex, maxParallelSearches, maxRetries, errCh)
for id := range objIDs {
oidCh <- id
}
Expand All @@ -425,7 +425,7 @@ func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, accoun
for idx := range indexFileSize {
if _, ok := processedIndices.Load(idx); !ok {
count++
objIDs = searchObjects(ctx.Context, p, containerID, account, blockAttributeKey, i*indexFileSize+idx, i*indexFileSize+idx+1, 1, errCh)
objIDs = searchObjects(ctx.Context, p, containerID, account, blockAttributeKey, i*indexFileSize+idx, i*indexFileSize+idx+1, 1, maxRetries, errCh)
// Block object duplicates are allowed, we're OK with the first found result.
id, ok := <-objIDs
for range objIDs {
Expand Down Expand Up @@ -462,7 +462,7 @@ func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, accoun
}
err := retry(func() error {
return uploadObj(ctx.Context, p, signer, account.PrivateKey().GetScriptHash(), containerID, buffer, attrs, homomorphicHashingDisabled)
})
}, maxRetries)
if err != nil {
select {
case errCh <- fmt.Errorf("failed to upload index file %d: %w", i, err):
Expand All @@ -487,7 +487,7 @@ func uploadIndexFiles(ctx *cli.Context, p *pool.Pool, containerID cid.ID, accoun
// searchObjects searches in parallel for objects with attribute GE startIndex and LT
// endIndex. It returns a buffered channel of resulting object IDs and closes it once
// OID search is finished. Errors are sent to errCh in a non-blocking way.
func searchObjects(ctx context.Context, p *pool.Pool, containerID cid.ID, account *wallet.Account, blockAttributeKey string, startIndex, endIndex uint, maxParallelSearches int, errCh chan error) chan oid.ID {
func searchObjects(ctx context.Context, p *pool.Pool, containerID cid.ID, account *wallet.Account, blockAttributeKey string, startIndex, endIndex uint, maxParallelSearches, maxRetries int, errCh chan error) chan oid.ID {
var res = make(chan oid.ID, 2*searchBatchSize)
go func() {
var wg sync.WaitGroup
Expand Down Expand Up @@ -520,7 +520,7 @@ func searchObjects(ctx context.Context, p *pool.Pool, containerID cid.ID, accoun
var errBlockSearch error
objIDs, errBlockSearch = neofs.ObjectSearch(ctx, p, account.PrivateKey(), containerID.String(), prm)
return errBlockSearch
})
}, maxRetries)
if err != nil {
select {
case errCh <- fmt.Errorf("failed to search for block(s) from %d to %d: %w", start, end, err):
Expand Down
1 change: 1 addition & 0 deletions docs/neofs-blockstorage.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ OPTIONS:
--workers value Number of workers to fetch, upload and search blocks concurrently (default: 50)
--searchers value Number of concurrent searches for blocks (default: 20)
--skip-blocks-uploading Skip blocks uploading and upload only index files (default: false)
--retries value Maximum number of retries of requests (default: 5)
--rpc-endpoint value, -r value RPC node address
--timeout value, -s value Timeout for the operation (default: 10s)
--wallet value, -w value Wallet to use to get the key for transaction signing; conflicts with --wallet-config flag
Expand Down

0 comments on commit 6591e12

Please sign in to comment.