Skip to content

Commit

Permalink
Merge branch 'main' into split_dirty_mutex
Browse files Browse the repository at this point in the history
  • Loading branch information
AskAlexSharov committed Oct 22, 2024
2 parents a0f4a1e + 09fefa4 commit a1dbe8e
Show file tree
Hide file tree
Showing 63 changed files with 2,640 additions and 1,066 deletions.
3 changes: 1 addition & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@ on:
- 'release/**'
pull_request:
branches:
- main
- 'release/**'
- '**'
types:
- opened
- reopened
Expand Down
44 changes: 44 additions & 0 deletions .github/workflows/docker-image-remove.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
name: Remove docker image
run-name: Request to remove erigontech/erigon:${{ inputs.docker_image_tag }} by @${{ github.actor }}

env:
DOCKERHUB_REPOSITORY: "erigontech/erigon"
API_URL: "https://hub.docker.com/v2/repositories/erigontech/erigon/tags"

on:
push:
branches-ignore:
- '**'
workflow_dispatch:
inputs:
docker_image_tag:
required: true
type: string
default: 'not_yet_defined'
description: 'Docker image tag to remove from hub.docker.com. Works only for erigontech/erigon'

jobs:

build-release:
runs-on: ubuntu-latest
timeout-minutes: 15
name: Remove docker image

steps:

- name: Run API Call
env:
TOKEN: ${{ secrets.ORG_DOCKERHUB_ERIGONTECH_TOKEN }}
run: |
output_code=$(curl --write-out %{http_code} --output curl-output.log \
-s -X DELETE -H "Accept: application/json" \
-H "Authorization: JWT ${{ env.TOKEN }}" \
${{ env.API_URL }}/${{ inputs.docker_image_tag }} )
if [ $output_code -ne 204 ]; then
echo "ERROR: failed to remove docker image ${{ env.DOCKERHUB_REPOSITORY }}:${{ inputs.docker_image_tag }}"
echo "ERROR: API response: $(cat curl-output.log)."
exit 1
else
echo "SUCCESS: docker image ${{ env.DOCKERHUB_REPOSITORY }}:${{ inputs.docker_image_tag }} removed."
exit 0
fi
24 changes: 22 additions & 2 deletions .github/workflows/test-kurtosis-assertoor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,30 @@ jobs:
ref: ${{ inputs.checkout_ref }}
path: 'erigon'

- name: Cleanup some space
run: |
df -h
sudo rm -drf \
/usr/share/dotnet \
/usr/share/swift \
/usr/local/julia* \
/opt/google/chrome \
/opt/microsoft/msedge \
/opt/microsoft/powershell \
/usr/lib/mono \
/usr/local/lib/android \
/usr/local/share/chromium
echo DEBUG current list of docker images
docker image ls
echo DEBUG Removing legacy node:1 matching docker images
sudo docker image rm $(docker image ls --filter=reference='node:1*' -q)
echo DEBUG new disk free output
df -h
- name: Setup go env and cache
uses: actions/setup-go@v5
with:
go-version: '>=1.22'
#go-version: '>=1.22'
go-version-file: 'erigon/go.mod'
cache-dependency-path: |
erigon/go.sum
Expand All @@ -56,4 +76,4 @@ jobs:
enclave_name: "kurtosis-run-${{ github.run_id }}"
ethereum_package_args: "./kurtosis_config.yaml"
#kurtosis_extra_args: --verbosity detailed --cli-log-level trace
enclave_dump: false
enclave_dump: false
2 changes: 1 addition & 1 deletion cl/beacon/handler/block_production.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"io"
"math/big"
"net/http"
"slices"
"sort"
"strconv"
"strings"
Expand All @@ -34,7 +35,6 @@ import (

"github.com/Giulio2002/bls"
"github.com/go-chi/chi/v5"
"golang.org/x/exp/slices"

"github.com/erigontech/erigon-lib/common"
libcommon "github.com/erigontech/erigon-lib/common"
Expand Down
92 changes: 77 additions & 15 deletions cmd/commitment-prefix/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package main

import (
"bytes"
"errors"
"flag"
"fmt"
Expand All @@ -42,6 +43,8 @@ var (
flagConcurrency = flag.Int("j", 4, "amount of concurrently proceeded files")
flagTrieVariant = flag.String("trie", "hex", "commitment trie variant (values are hex and bin)")
flagCompression = flag.String("compression", "none", "compression type (none, k, v, kv)")
flagPrintState = flag.Bool("state", false, "print state of file")
flagDepth = flag.Int("depth", 0, "depth of the prefixes to analyze")
)

func main() {
Expand Down Expand Up @@ -69,9 +72,10 @@ func proceedFiles(files []string) {

for i, fp := range files {
fpath, pos := fp, i
_ = pos
<-sema

fmt.Printf("\r[%d/%d] - %s..", pos+1, len(files), path.Base(fpath))
fmt.Printf("[%d/%d] - %s..", pos+1, len(files), path.Base(fpath))

wg.Add(1)
go func(wg *sync.WaitGroup, mu *sync.Mutex) {
Expand All @@ -86,16 +90,19 @@ func proceedFiles(files []string) {

mu.Lock()
page.AddCharts(

prefixLenCountChart(fpath, stat),
countersChart(fpath, stat),
mediansChart(fpath, stat),
fileContentsMapChart(fpath, stat),
)
mu.Unlock()
}(&wg, &mu)
}
wg.Wait()
fmt.Println()
if *flagPrintState {
return
}

dir := filepath.Dir(files[0])
if *flagOutputDirectory != "" {
Expand Down Expand Up @@ -180,21 +187,39 @@ func extractKVPairFromCompressed(filename string, keysSink chan commitment.Branc
size := dec.Size()
paris := dec.Count() / 2
cpair := 0

depth := *flagDepth
var afterValPos uint64
var key, val []byte
getter := seg.NewReader(dec.MakeGetter(), fc)
for getter.HasNext() {
key, _ := getter.Next(nil)
key, _ = getter.Next(key[:0])
if !getter.HasNext() {
return errors.New("invalid key/value pair during decompression")
}
val, afterValPos := getter.Next(nil)
if *flagPrintState && !bytes.Equal(key, []byte("state")) {
getter.Skip()
continue
}

val, afterValPos = getter.Next(val[:0])
cpair++
if bytes.Equal(key, []byte("state")) {
str, err := commitment.HexTrieStateToString(val)
if err != nil {
fmt.Printf("[ERR] failed to decode state: %v", err)
}
fmt.Printf("\n%s: %s\n", dec.FileName(), str)
continue
}

if cpair%100000 == 0 {
fmt.Printf("\r%s pair %d/%d %s/%s", filename, cpair, paris,
datasize.ByteSize(afterValPos).HumanReadable(), datasize.ByteSize(size).HumanReadable())
}

if depth > len(key) {
continue
}
stat := commitment.DecodeBranchAndCollectStat(key, val, tv)
if stat == nil {
fmt.Printf("failed to decode branch: %x %x\n", key, val)
Expand Down Expand Up @@ -252,7 +277,7 @@ func prefixLenCountChart(fname string, data *overallStat) *charts.Pie {
pie := charts.NewPie()
pie.SetGlobalOptions(
charts.WithTooltipOpts(opts.Tooltip{Show: true}),
charts.WithTitleOpts(opts.Title{Subtitle: fname, Title: "key prefix length distribution (bytes)", Top: "25"}),
charts.WithTitleOpts(opts.Title{Subtitle: filepath.Base(fname), Title: "key prefix length distribution (bytes)", Top: "25"}),
)

pie.AddSeries("prefixLen/count", items)
Expand Down Expand Up @@ -285,13 +310,17 @@ func fileContentsMapChart(fileName string, data *overallStat) *charts.TreeMap {
Value: int(data.branches.ExtSize),
},
{
Name: "apk",
Name: "accountKey",
Value: int(data.branches.APKSize),
},
{
Name: "spk",
Name: "storageKey",
Value: int(data.branches.SPKSize),
},
{
Name: "leafHashes",
Value: int(data.branches.LeafHashSize),
},
}

graph := charts.NewTreeMap()
Expand All @@ -305,12 +334,10 @@ func fileContentsMapChart(fileName string, data *overallStat) *charts.TreeMap {
)

// Add initialized data to graph.
graph.AddSeries(fileName, TreeMap).
graph.AddSeries(filepath.Base(fileName), TreeMap).
SetSeriesOptions(
charts.WithTreeMapOpts(
opts.TreeMapChart{
Animation: true,
//Roam: true,
UpperLabel: &opts.UpperLabel{Show: true, Color: "#fff"},
Levels: &[]opts.TreeMapLevel{
{ // Series
Expand Down Expand Up @@ -383,9 +410,6 @@ func countersChart(fname string, data *overallStat) *charts.Sankey {
sankey.SetGlobalOptions(
charts.WithLegendOpts(opts.Legend{Show: true}),
charts.WithTooltipOpts(opts.Tooltip{Show: true}),
//charts.WithTitleOpts(opts.Title{
// Title: "Sankey-basic-example",
//}),
)

nodes := []opts.SankeyNode{
Expand All @@ -394,15 +418,53 @@ func countersChart(fname string, data *overallStat) *charts.Sankey {
{Name: "SPK"},
{Name: "Hashes"},
{Name: "Extensions"},
{Name: "LeafHashes"},
}
sankeyLink := []opts.SankeyLink{
{Source: nodes[0].Name, Target: nodes[1].Name, Value: float32(data.branches.APKCount)},
{Source: nodes[0].Name, Target: nodes[2].Name, Value: float32(data.branches.SPKCount)},
{Source: nodes[0].Name, Target: nodes[3].Name, Value: float32(data.branches.HashCount)},
{Source: nodes[0].Name, Target: nodes[4].Name, Value: float32(data.branches.ExtCount)},
{Source: nodes[0].Name, Target: nodes[5].Name, Value: float32(data.branches.LeafHashCount)},
}

sankey.AddSeries("Counts "+filepath.Base(fname), nodes, sankeyLink).
SetSeriesOptions(
charts.WithLineStyleOpts(opts.LineStyle{
Color: "source",
Curveness: 0.5,
}),
charts.WithLabelOpts(opts.Label{
Show: true,
}),
)
return sankey
}

func mediansChart(fname string, data *overallStat) *charts.Sankey {
sankey := charts.NewSankey()
sankey.SetGlobalOptions(
charts.WithLegendOpts(opts.Legend{Show: true}),
charts.WithTooltipOpts(opts.Tooltip{Show: true}),
)

nodes := []opts.SankeyNode{
{Name: "Cells"},
{Name: "Addr"},
{Name: "Addr+Storage"},
{Name: "Hashes"},
{Name: "Extensions"},
{Name: "LeafHashes"},
}
sankeyLink := []opts.SankeyLink{
{Source: nodes[0].Name, Target: nodes[1].Name, Value: float32(data.branches.MedianAPK)},
{Source: nodes[0].Name, Target: nodes[2].Name, Value: float32(data.branches.MedianSPK)},
{Source: nodes[0].Name, Target: nodes[3].Name, Value: float32(data.branches.MedianHash)},
{Source: nodes[0].Name, Target: nodes[4].Name, Value: float32(data.branches.MedianExt)},
{Source: nodes[0].Name, Target: nodes[5].Name, Value: float32(data.branches.MedianLH)},
}

sankey.AddSeries(fname, nodes, sankeyLink).
sankey.AddSeries("Medians "+filepath.Base(fname), nodes, sankeyLink).
SetSeriesOptions(
charts.WithLineStyleOpts(opts.LineStyle{
Color: "source",
Expand Down
11 changes: 3 additions & 8 deletions cmd/integration/commands/stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ var cmdStageCustomTrace = &cobra.Command{
}

var cmdStagePatriciaTrie = &cobra.Command{
Use: "rebuild_trie3_files",
Use: "commitment_rebuild",
Short: "",
Run: func(cmd *cobra.Command, args []string) {
logger := debug.SetupCobra(cmd, "integration")
Expand Down Expand Up @@ -1178,20 +1178,15 @@ func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error
if reset {
return reset2.Reset(ctx, db, stages.Execution)
}
tx, err := db.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()

br, _ := blocksIO(db, logger)
historyV3 := true
cfg := stagedsync.StageTrieCfg(db, true /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, br, nil /* hd */, historyV3, agg)

if _, err := stagedsync.RebuildPatriciaTrieBasedOnFiles(tx, cfg, ctx, logger); err != nil {
if _, err := stagedsync.RebuildPatriciaTrieBasedOnFiles(ctx, cfg); err != nil {
return err
}
return tx.Commit()
return nil
}

func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error {
Expand Down
2 changes: 1 addition & 1 deletion cmd/rpcdaemon/cli/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger
return nil
})
} else {
log.Warn("[rpc] download of segments not complete yet (need wait, then RPC will work)")
logger.Debug("[rpc] download of segments not complete yet. please wait StageSnapshots to finish")
}

wg := errgroup.Group{}
Expand Down
14 changes: 14 additions & 0 deletions erigon-lib/chain/snapcfg/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -368,11 +368,18 @@ type Cfg struct {
networkName string
}

// Seedable - can seed it over Bittorrent network to other nodes
func (c Cfg) Seedable(info snaptype.FileInfo) bool {
mergeLimit := c.MergeLimit(info.Type.Enum(), info.From)
return info.To-info.From == mergeLimit
}

// IsFrozen - can't be merged to bigger files
func (c Cfg) IsFrozen(info snaptype.FileInfo) bool {
mergeLimit := c.MergeLimit(info.Type.Enum(), info.From)
return info.To-info.From == mergeLimit
}

func (c Cfg) MergeLimit(t snaptype.Enum, fromBlock uint64) uint64 {
hasType := t == snaptype.MinCoreEnum

Expand Down Expand Up @@ -440,6 +447,13 @@ func Seedable(networkName string, info snaptype.FileInfo) bool {
return KnownCfg(networkName).Seedable(info)
}

func IsFrozen(networkName string, info snaptype.FileInfo) bool {
if networkName == "" {
return false
}
return KnownCfg(networkName).IsFrozen(info)
}

func MergeLimitFromCfg(cfg *Cfg, snapType snaptype.Enum, fromBlock uint64) uint64 {
return cfg.MergeLimit(snapType, fromBlock)
}
Expand Down
Loading

0 comments on commit a1dbe8e

Please sign in to comment.