diff --git a/.gitignore b/.gitignore
index b7b0629ab25..1a18a9063bc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,3 @@
-/dist
/.idea
/.vscode
/tmp
@@ -29,3 +28,6 @@ _testmain.go
*.prof
.DS_Store
+
+dist/bee*
+dist/sw_bundle.js
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 01568f1136b..81946c049d7 100644
--- a/Makefile
+++ b/Makefile
@@ -172,4 +172,9 @@ clean:
$(GO) clean
rm -rf dist/
-FORCE:
\ No newline at end of file
+FORCE:
+
+wasm:
+ GOOS=js GOARCH=wasm go build -o ./dist/bee.wasm ./cmd/bee
+wasm-release:
+ GOOS=js GOARCH=wasm go build -trimpath -o ./dist/bee.wasm -ldflags="-s -w" ./cmd/bee
\ No newline at end of file
diff --git a/cmd/bee/cmd/cmd.go b/cmd/bee/cmd/cmd.go
index e10e015ce81..c302757fce6 100644
--- a/cmd/bee/cmd/cmd.go
+++ b/cmd/bee/cmd/cmd.go
@@ -1,104 +1,17 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package cmd
import (
- "errors"
"fmt"
"io"
- "os"
- "path/filepath"
- "strings"
- "time"
- chaincfg "github.com/ethersphere/bee/v2/pkg/config"
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/node"
- "github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/spf13/cobra"
- "github.com/spf13/viper"
)
-const (
- optionNameDataDir = "data-dir"
- optionNameCacheCapacity = "cache-capacity"
- optionNameDBOpenFilesLimit = "db-open-files-limit"
- optionNameDBBlockCacheCapacity = "db-block-cache-capacity"
- optionNameDBWriteBufferSize = "db-write-buffer-size"
- optionNameDBDisableSeeksCompaction = "db-disable-seeks-compaction"
- optionNamePassword = "password"
- optionNamePasswordFile = "password-file"
- optionNameAPIAddr = "api-addr"
- optionNameP2PAddr = "p2p-addr"
- optionNameNATAddr = "nat-addr"
- optionNameP2PWSEnable = "p2p-ws-enable"
- optionNameBootnodes = "bootnode"
- optionNameNetworkID = "network-id"
- optionWelcomeMessage = "welcome-message"
- optionCORSAllowedOrigins = "cors-allowed-origins"
- optionNameTracingEnabled = "tracing-enable"
- optionNameTracingEndpoint = "tracing-endpoint"
- optionNameTracingHost = "tracing-host"
- optionNameTracingPort = "tracing-port"
- optionNameTracingServiceName = "tracing-service-name"
- optionNameVerbosity = "verbosity"
- optionNamePaymentThreshold = "payment-threshold"
- optionNamePaymentTolerance = "payment-tolerance-percent"
- optionNamePaymentEarly = "payment-early-percent"
- optionNameResolverEndpoints = "resolver-options"
- optionNameBootnodeMode = "bootnode-mode"
- optionNameBlockchainRpcEndpoint = "blockchain-rpc-endpoint"
- optionNameSwapFactoryAddress = "swap-factory-address"
- optionNameSwapInitialDeposit = "swap-initial-deposit"
- optionNameSwapEnable = "swap-enable"
- optionNameChequebookEnable = "chequebook-enable"
- optionNameFullNode = "full-node"
- optionNamePostageContractAddress = "postage-stamp-address"
- optionNamePostageContractStartBlock = "postage-stamp-start-block"
- optionNamePriceOracleAddress = "price-oracle-address"
- optionNameRedistributionAddress = "redistribution-address"
- optionNameStakingAddress = "staking-address"
- optionNameBlockTime = "block-time"
- optionWarmUpTime = "warmup-time"
- optionNameMainNet = "mainnet"
- optionNameRetrievalCaching = "cache-retrieval"
- optionNameDevReserveCapacity = "dev-reserve-capacity"
- optionNameResync = "resync"
- optionNamePProfBlock = "pprof-profile"
- optionNamePProfMutex = "pprof-mutex"
- optionNameStaticNodes = "static-nodes"
- optionNameAllowPrivateCIDRs = "allow-private-cidrs"
- optionNameSleepAfter = "sleep-after"
- optionNameUsePostageSnapshot = "use-postage-snapshot"
- optionNameStorageIncentivesEnable = "storage-incentives-enable"
- optionNameStateStoreCacheCapacity = "statestore-cache-capacity"
- optionNameTargetNeighborhood = "target-neighborhood"
- optionNameNeighborhoodSuggester = "neighborhood-suggester"
- optionNameWhitelistedWithdrawalAddress = "withdrawal-addresses-whitelist"
- optionNameTransactionDebugMode = "transaction-debug-mode"
- optionMinimumStorageRadius = "minimum-storage-radius"
- optionReserveCapacityDoubling = "reserve-capacity-doubling"
- optionSkipPostageSnapshot = "skip-postage-snapshot"
-)
-
-// nolint:gochecknoinits
-func init() {
- cobra.EnableCommandSorting = false
-}
-
-type command struct {
- root *cobra.Command
- config *viper.Viper
- passwordReader passwordReader
- cfgFile string
- homeDir string
- isWindowsService bool
-}
-
-type option func(*command)
-
func newCommand(opts ...option) (c *command, err error) {
c = &command{
root: &cobra.Command{
@@ -126,10 +39,6 @@ func newCommand(opts ...option) (c *command, err error) {
c.initGlobalFlags()
- if err := c.initCommandVariables(); err != nil {
- return nil, err
- }
-
if err := c.initStartCmd(); err != nil {
return nil, err
}
@@ -159,139 +68,6 @@ func newCommand(opts ...option) (c *command, err error) {
return c, nil
}
-func (c *command) Execute() (err error) {
- return c.root.Execute()
-}
-
-// Execute parses command line arguments and runs appropriate functions.
-func Execute() (err error) {
- c, err := newCommand()
- if err != nil {
- return err
- }
- return c.Execute()
-}
-
-func (c *command) initGlobalFlags() {
- globalFlags := c.root.PersistentFlags()
- globalFlags.StringVar(&c.cfgFile, "config", "", "config file (default is $HOME/.bee.yaml)")
-}
-
-func (c *command) initCommandVariables() error {
- isWindowsService, err := isWindowsService()
- if err != nil {
- return fmt.Errorf("failed to determine if we are running in service: %w", err)
- }
-
- c.isWindowsService = isWindowsService
-
- return nil
-}
-
-func (c *command) initConfig() (err error) {
- config := viper.New()
- configName := ".bee"
- if c.cfgFile != "" {
- // Use config file from the flag.
- config.SetConfigFile(c.cfgFile)
- } else {
- // Search config in home directory with name ".bee" (without extension).
- config.AddConfigPath(c.homeDir)
- config.SetConfigName(configName)
- }
-
- // Environment
- config.SetEnvPrefix("bee")
- config.AutomaticEnv() // read in environment variables that match
- config.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
-
- if c.homeDir != "" && c.cfgFile == "" {
- c.cfgFile = filepath.Join(c.homeDir, configName+".yaml")
- }
-
- // If a config file is found, read it in.
- if err := config.ReadInConfig(); err != nil {
- var e viper.ConfigFileNotFoundError
- if !errors.As(err, &e) {
- return err
- }
- }
- c.config = config
- return nil
-}
-
-func (c *command) setHomeDir() (err error) {
- if c.homeDir != "" {
- return
- }
- dir, err := os.UserHomeDir()
- if err != nil {
- return err
- }
- c.homeDir = dir
- return nil
-}
-
-func (c *command) setAllFlags(cmd *cobra.Command) {
- cmd.Flags().String(optionNameDataDir, filepath.Join(c.homeDir, ".bee"), "data directory")
- cmd.Flags().Uint64(optionNameCacheCapacity, 1_000_000, fmt.Sprintf("cache capacity in chunks, multiply by %d to get approximate capacity in bytes", swarm.ChunkSize))
- cmd.Flags().Uint64(optionNameDBOpenFilesLimit, 200, "number of open files allowed by database")
- cmd.Flags().Uint64(optionNameDBBlockCacheCapacity, 32*1024*1024, "size of block cache of the database in bytes")
- cmd.Flags().Uint64(optionNameDBWriteBufferSize, 32*1024*1024, "size of the database write buffer in bytes")
- cmd.Flags().Bool(optionNameDBDisableSeeksCompaction, true, "disables db compactions triggered by seeks")
- cmd.Flags().String(optionNamePassword, "", "password for decrypting keys")
- cmd.Flags().String(optionNamePasswordFile, "", "path to a file that contains password for decrypting keys")
- cmd.Flags().String(optionNameAPIAddr, "127.0.0.1:1633", "HTTP API listen address")
- cmd.Flags().String(optionNameP2PAddr, ":1634", "P2P listen address")
- cmd.Flags().String(optionNameNATAddr, "", "NAT exposed address")
- cmd.Flags().Bool(optionNameP2PWSEnable, false, "enable P2P WebSocket transport")
- cmd.Flags().StringSlice(optionNameBootnodes, []string{"/dnsaddr/mainnet.ethswarm.org"}, "initial nodes to connect to")
- cmd.Flags().Uint64(optionNameNetworkID, chaincfg.Mainnet.NetworkID, "ID of the Swarm network")
- cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled")
- cmd.Flags().Bool(optionNameTracingEnabled, false, "enable tracing")
- cmd.Flags().String(optionNameTracingEndpoint, "127.0.0.1:6831", "endpoint to send tracing data")
- cmd.Flags().String(optionNameTracingHost, "", "host to send tracing data")
- cmd.Flags().String(optionNameTracingPort, "", "port to send tracing data")
- cmd.Flags().String(optionNameTracingServiceName, "bee", "service name identifier for tracing")
- cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
- cmd.Flags().String(optionWelcomeMessage, "", "send a welcome message string during handshakes")
- cmd.Flags().String(optionNamePaymentThreshold, "13500000", "threshold in BZZ where you expect to get paid from your peers")
- cmd.Flags().Int64(optionNamePaymentTolerance, 25, "excess debt above payment threshold in percentages where you disconnect from your peer")
- cmd.Flags().Int64(optionNamePaymentEarly, 50, "percentage below the peers payment threshold when we initiate settlement")
- cmd.Flags().StringSlice(optionNameResolverEndpoints, []string{}, "ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url")
- cmd.Flags().Bool(optionNameBootnodeMode, false, "cause the node to always accept incoming connections")
- cmd.Flags().String(optionNameBlockchainRpcEndpoint, "", "rpc blockchain endpoint")
- cmd.Flags().String(optionNameSwapFactoryAddress, "", "swap factory addresses")
- cmd.Flags().String(optionNameSwapInitialDeposit, "0", "initial deposit if deploying a new chequebook")
- cmd.Flags().Bool(optionNameSwapEnable, false, "enable swap")
- cmd.Flags().Bool(optionNameChequebookEnable, true, "enable chequebook")
- cmd.Flags().Bool(optionNameFullNode, false, "cause the node to start in full mode")
- cmd.Flags().String(optionNamePostageContractAddress, "", "postage stamp contract address")
- cmd.Flags().Uint64(optionNamePostageContractStartBlock, 0, "postage stamp contract start block number")
- cmd.Flags().String(optionNamePriceOracleAddress, "", "price oracle contract address")
- cmd.Flags().String(optionNameRedistributionAddress, "", "redistribution contract address")
- cmd.Flags().String(optionNameStakingAddress, "", "staking contract address")
- cmd.Flags().Uint64(optionNameBlockTime, 5, "chain block time")
- cmd.Flags().Duration(optionWarmUpTime, time.Minute*5, "maximum node warmup duration; proceeds when stable or after this time")
- cmd.Flags().Bool(optionNameMainNet, true, "triggers connect to main net bootnodes.")
- cmd.Flags().Bool(optionNameRetrievalCaching, true, "enable forwarded content caching")
- cmd.Flags().Bool(optionNameResync, false, "forces the node to resync postage contract data")
- cmd.Flags().Bool(optionNamePProfBlock, false, "enable pprof block profile")
- cmd.Flags().Bool(optionNamePProfMutex, false, "enable pprof mutex profile")
- cmd.Flags().StringSlice(optionNameStaticNodes, []string{}, "protect nodes from getting kicked out on bootnode")
- cmd.Flags().Bool(optionNameAllowPrivateCIDRs, false, "allow to advertise private CIDRs to the public network")
- cmd.Flags().Bool(optionNameUsePostageSnapshot, false, "bootstrap node using postage snapshot from the network")
- cmd.Flags().Bool(optionNameStorageIncentivesEnable, true, "enable storage incentives feature")
- cmd.Flags().Uint64(optionNameStateStoreCacheCapacity, 100_000, "lru memory caching capacity in number of statestore entries")
- cmd.Flags().String(optionNameTargetNeighborhood, "", "neighborhood to target in binary format (ex: 111111001) for mining the initial overlay")
- cmd.Flags().String(optionNameNeighborhoodSuggester, "https://api.swarmscan.io/v1/network/neighborhoods/suggestion", "suggester for target neighborhood")
- cmd.Flags().StringSlice(optionNameWhitelistedWithdrawalAddress, []string{}, "withdrawal target addresses")
- cmd.Flags().Bool(optionNameTransactionDebugMode, false, "skips the gas estimate step for contract transactions")
- cmd.Flags().Uint(optionMinimumStorageRadius, 0, "minimum radius storage threshold")
- cmd.Flags().Int(optionReserveCapacityDoubling, 0, "reserve capacity doubling")
- cmd.Flags().Bool(optionSkipPostageSnapshot, false, "skip postage snapshot")
-}
-
func newLogger(cmd *cobra.Command, verbosity string) (log.Logger, error) {
var (
sink = cmd.OutOrStdout()
@@ -326,21 +102,3 @@ func newLogger(cmd *cobra.Command, verbosity string) (log.Logger, error) {
log.WithVerbosity(vLevel),
).Register(), nil
}
-
-func (c *command) CheckUnknownParams(cmd *cobra.Command, args []string) error {
- if err := c.initConfig(); err != nil {
- return err
- }
- var unknownParams []string
- for _, v := range c.config.AllKeys() {
- if cmd.Flags().Lookup(v) == nil {
- unknownParams = append(unknownParams, v)
- }
- }
-
- if len(unknownParams) > 0 {
- return fmt.Errorf("unknown parameters:\n\t%v", strings.Join(unknownParams, "\n\t"))
- }
-
- return nil
-}
diff --git a/cmd/bee/cmd/cmd_js.go b/cmd/bee/cmd/cmd_js.go
new file mode 100644
index 00000000000..976fa7eaab8
--- /dev/null
+++ b/cmd/bee/cmd/cmd_js.go
@@ -0,0 +1,91 @@
+//go:build js
+// +build js
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/node"
+ "github.com/spf13/cobra"
+)
+
+func newCommand(opts ...option) (c *command, err error) {
+ c = &command{
+ root: &cobra.Command{
+ Use: "bee",
+ Short: "Ethereum Swarm Bee",
+ SilenceErrors: true,
+ SilenceUsage: true,
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ return c.initConfig()
+ },
+ },
+ }
+
+ for _, o := range opts {
+ o(c)
+ }
+ if c.passwordReader == nil {
+ c.passwordReader = new(stdInPasswordReader)
+ }
+
+ // Find home directory.
+ if err := c.setHomeDir(); err != nil {
+ return nil, err
+ }
+
+ c.initGlobalFlags()
+
+ if err := c.initStartCmd(); err != nil {
+ return nil, err
+ }
+
+ if err := c.initInitCmd(); err != nil {
+ return nil, err
+ }
+
+ c.initVersionCmd()
+ c.initDBCmd()
+ if err := c.initSplitCmd(); err != nil {
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func newLogger(cmd *cobra.Command, verbosity string) (log.Logger, error) {
+ var (
+ sink = cmd.OutOrStdout()
+ vLevel = log.VerbosityNone
+ )
+
+ switch verbosity {
+ case "0", "silent":
+ sink = io.Discard
+ case "1", "error":
+ vLevel = log.VerbosityError
+ case "2", "warn":
+ vLevel = log.VerbosityWarning
+ case "3", "info":
+ vLevel = log.VerbosityInfo
+ case "4", "debug":
+ vLevel = log.VerbosityDebug
+ case "5", "trace":
+ vLevel = log.VerbosityDebug + 1 // For backwards compatibility, just enable v1 debugging as trace.
+ default:
+ return nil, fmt.Errorf("unknown verbosity level %q", verbosity)
+ }
+
+ log.ModifyDefaults(
+ log.WithTimestamp(),
+ )
+
+ return log.NewLogger(
+ node.LoggerName,
+ log.WithSink(sink),
+ log.WithVerbosity(vLevel),
+ ).Register(), nil
+}
diff --git a/cmd/bee/cmd/cmd_shared.go b/cmd/bee/cmd/cmd_shared.go
new file mode 100644
index 00000000000..13352a53b50
--- /dev/null
+++ b/cmd/bee/cmd/cmd_shared.go
@@ -0,0 +1,234 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ chaincfg "github.com/ethersphere/bee/v2/pkg/config"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+const (
+ optionNameDataDir = "data-dir"
+ optionNameCacheCapacity = "cache-capacity"
+ optionNameDBOpenFilesLimit = "db-open-files-limit"
+ optionNameDBBlockCacheCapacity = "db-block-cache-capacity"
+ optionNameDBWriteBufferSize = "db-write-buffer-size"
+ optionNameDBDisableSeeksCompaction = "db-disable-seeks-compaction"
+ optionNamePassword = "password"
+ optionNamePasswordFile = "password-file"
+ optionNameAPIAddr = "api-addr"
+ optionNameP2PAddr = "p2p-addr"
+ optionNameNATAddr = "nat-addr"
+ optionNameP2PWSEnable = "p2p-ws-enable"
+ optionNameBootnodes = "bootnode"
+ optionNameNetworkID = "network-id"
+ optionWelcomeMessage = "welcome-message"
+ optionCORSAllowedOrigins = "cors-allowed-origins"
+ optionNameTracingEnabled = "tracing-enable"
+ optionNameTracingEndpoint = "tracing-endpoint"
+ optionNameTracingHost = "tracing-host"
+ optionNameTracingPort = "tracing-port"
+ optionNameTracingServiceName = "tracing-service-name"
+ optionNameVerbosity = "verbosity"
+ optionNamePaymentThreshold = "payment-threshold"
+ optionNamePaymentTolerance = "payment-tolerance-percent"
+ optionNamePaymentEarly = "payment-early-percent"
+ optionNameResolverEndpoints = "resolver-options"
+ optionNameBootnodeMode = "bootnode-mode"
+ optionNameBlockchainRpcEndpoint = "blockchain-rpc-endpoint"
+ optionNameSwapFactoryAddress = "swap-factory-address"
+ optionNameSwapInitialDeposit = "swap-initial-deposit"
+ optionNameSwapEnable = "swap-enable"
+ optionNameChequebookEnable = "chequebook-enable"
+ optionNameFullNode = "full-node"
+ optionNamePostageContractAddress = "postage-stamp-address"
+ optionNamePostageContractStartBlock = "postage-stamp-start-block"
+ optionNamePriceOracleAddress = "price-oracle-address"
+ optionNameRedistributionAddress = "redistribution-address"
+ optionNameStakingAddress = "staking-address"
+ optionNameBlockTime = "block-time"
+ optionWarmUpTime = "warmup-time"
+ optionNameMainNet = "mainnet"
+ optionNameRetrievalCaching = "cache-retrieval"
+ optionNameDevReserveCapacity = "dev-reserve-capacity"
+ optionNameResync = "resync"
+ optionNamePProfBlock = "pprof-profile"
+ optionNamePProfMutex = "pprof-mutex"
+ optionNameStaticNodes = "static-nodes"
+ optionNameAllowPrivateCIDRs = "allow-private-cidrs"
+ optionNameSleepAfter = "sleep-after"
+ optionNameUsePostageSnapshot = "use-postage-snapshot"
+ optionNameStorageIncentivesEnable = "storage-incentives-enable"
+ optionNameStateStoreCacheCapacity = "statestore-cache-capacity"
+ optionNameTargetNeighborhood = "target-neighborhood"
+ optionNameNeighborhoodSuggester = "neighborhood-suggester"
+ optionNameWhitelistedWithdrawalAddress = "withdrawal-addresses-whitelist"
+ optionNameTransactionDebugMode = "transaction-debug-mode"
+ optionMinimumStorageRadius = "minimum-storage-radius"
+ optionReserveCapacityDoubling = "reserve-capacity-doubling"
+)
+
+// nolint:gochecknoinits
+func init() {
+ cobra.EnableCommandSorting = false
+}
+
+type command struct {
+ root *cobra.Command
+ config *viper.Viper
+ passwordReader passwordReader
+ cfgFile string
+ homeDir string
+}
+
+type option func(*command)
+
+func (c *command) Execute() (err error) {
+ return c.root.Execute()
+}
+
+// Execute parses command line arguments and runs appropriate functions.
+func Execute() (err error) {
+ c, err := newCommand()
+ if err != nil {
+ return err
+ }
+ return c.Execute()
+}
+
+func (c *command) initGlobalFlags() {
+ globalFlags := c.root.PersistentFlags()
+ globalFlags.StringVar(&c.cfgFile, "config", "", "config file (default is $HOME/.bee.yaml)")
+}
+
+func (c *command) initConfig() (err error) {
+ config := viper.New()
+ configName := ".bee"
+ if c.cfgFile != "" {
+ // Use config file from the flag.
+ config.SetConfigFile(c.cfgFile)
+ } else {
+ // Search config in home directory with name ".bee" (without extension).
+ config.AddConfigPath(c.homeDir)
+ config.SetConfigName(configName)
+ }
+
+ // Environment
+ config.SetEnvPrefix("bee")
+ config.AutomaticEnv() // read in environment variables that match
+ config.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
+
+ if c.homeDir != "" && c.cfgFile == "" {
+ c.cfgFile = filepath.Join(c.homeDir, configName+".yaml")
+ }
+
+ // If a config file is found, read it in.
+ if err := config.ReadInConfig(); err != nil {
+ var e viper.ConfigFileNotFoundError
+ if !errors.As(err, &e) {
+ return err
+ }
+ }
+ c.config = config
+ return nil
+}
+
+func (c *command) setHomeDir() (err error) {
+ if c.homeDir != "" {
+ return
+ }
+ dir, err := os.UserHomeDir()
+ if err != nil {
+ return err
+ }
+ c.homeDir = dir
+ return nil
+}
+
+func (c *command) setAllFlags(cmd *cobra.Command) {
+ cmd.Flags().String(optionNameDataDir, filepath.Join(c.homeDir, ".bee"), "data directory")
+ cmd.Flags().Uint64(optionNameCacheCapacity, 1_000_000, fmt.Sprintf("cache capacity in chunks, multiply by %d to get approximate capacity in bytes", swarm.ChunkSize))
+ cmd.Flags().Uint64(optionNameDBOpenFilesLimit, 200, "number of open files allowed by database")
+ cmd.Flags().Uint64(optionNameDBBlockCacheCapacity, 32*1024*1024, "size of block cache of the database in bytes")
+ cmd.Flags().Uint64(optionNameDBWriteBufferSize, 32*1024*1024, "size of the database write buffer in bytes")
+ cmd.Flags().Bool(optionNameDBDisableSeeksCompaction, true, "disables db compactions triggered by seeks")
+ cmd.Flags().String(optionNamePassword, "", "password for decrypting keys")
+ cmd.Flags().String(optionNamePasswordFile, "", "path to a file that contains password for decrypting keys")
+ cmd.Flags().String(optionNameAPIAddr, "127.0.0.1:1633", "HTTP API listen address")
+ cmd.Flags().String(optionNameP2PAddr, ":1634", "P2P listen address")
+ cmd.Flags().String(optionNameNATAddr, "", "NAT exposed address")
+ cmd.Flags().Bool(optionNameP2PWSEnable, true, "enable P2P WebSocket transport")
+ cmd.Flags().StringSlice(optionNameBootnodes, []string{"/dnsaddr/mainnet.ethswarm.org"}, "initial nodes to connect to")
+ cmd.Flags().Uint64(optionNameNetworkID, chaincfg.Mainnet.NetworkID, "ID of the Swarm network")
+ cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled")
+ cmd.Flags().Bool(optionNameTracingEnabled, false, "enable tracing")
+ cmd.Flags().String(optionNameTracingEndpoint, "127.0.0.1:6831", "endpoint to send tracing data")
+ cmd.Flags().String(optionNameTracingHost, "", "host to send tracing data")
+ cmd.Flags().String(optionNameTracingPort, "", "port to send tracing data")
+ cmd.Flags().String(optionNameTracingServiceName, "bee", "service name identifier for tracing")
+ cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
+ cmd.Flags().String(optionWelcomeMessage, "", "send a welcome message string during handshakes")
+ cmd.Flags().String(optionNamePaymentThreshold, "13500000", "threshold in BZZ where you expect to get paid from your peers")
+ cmd.Flags().Int64(optionNamePaymentTolerance, 25, "excess debt above payment threshold in percentages where you disconnect from your peer")
+ cmd.Flags().Int64(optionNamePaymentEarly, 50, "percentage below the peers payment threshold when we initiate settlement")
+ cmd.Flags().StringSlice(optionNameResolverEndpoints, []string{}, "ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url")
+ cmd.Flags().Bool(optionNameBootnodeMode, false, "cause the node to always accept incoming connections")
+ cmd.Flags().String(optionNameBlockchainRpcEndpoint, "", "rpc blockchain endpoint")
+ cmd.Flags().String(optionNameSwapFactoryAddress, "", "swap factory addresses")
+ cmd.Flags().String(optionNameSwapInitialDeposit, "0", "initial deposit if deploying a new chequebook")
+ cmd.Flags().Bool(optionNameSwapEnable, false, "enable swap")
+ cmd.Flags().Bool(optionNameChequebookEnable, true, "enable chequebook")
+ cmd.Flags().Bool(optionNameFullNode, false, "cause the node to start in full mode")
+ cmd.Flags().String(optionNamePostageContractAddress, "", "postage stamp contract address")
+ cmd.Flags().Uint64(optionNamePostageContractStartBlock, 0, "postage stamp contract start block number")
+ cmd.Flags().String(optionNamePriceOracleAddress, "", "price oracle contract address")
+ cmd.Flags().String(optionNameRedistributionAddress, "", "redistribution contract address")
+ cmd.Flags().String(optionNameStakingAddress, "", "staking contract address")
+ cmd.Flags().Uint64(optionNameBlockTime, 5, "chain block time")
+ cmd.Flags().Duration(optionWarmUpTime, time.Minute*5, "maximum node warmup duration; proceeds when stable or after this time")
+ cmd.Flags().Bool(optionNameMainNet, true, "triggers connect to main net bootnodes.")
+ cmd.Flags().Bool(optionNameRetrievalCaching, true, "enable forwarded content caching")
+ cmd.Flags().Bool(optionNameResync, false, "forces the node to resync postage contract data")
+ cmd.Flags().Bool(optionNamePProfBlock, false, "enable pprof block profile")
+ cmd.Flags().Bool(optionNamePProfMutex, false, "enable pprof mutex profile")
+ cmd.Flags().StringSlice(optionNameStaticNodes, []string{}, "protect nodes from getting kicked out on bootnode")
+ cmd.Flags().Bool(optionNameAllowPrivateCIDRs, false, "allow to advertise private CIDRs to the public network")
+ cmd.Flags().Bool(optionNameUsePostageSnapshot, false, "bootstrap node using postage snapshot from the network")
+ cmd.Flags().Bool(optionNameStorageIncentivesEnable, true, "enable storage incentives feature")
+ cmd.Flags().Uint64(optionNameStateStoreCacheCapacity, 100_000, "lru memory caching capacity in number of statestore entries")
+ cmd.Flags().String(optionNameTargetNeighborhood, "", "neighborhood to target in binary format (ex: 111111001) for mining the initial overlay")
+ cmd.Flags().String(optionNameNeighborhoodSuggester, "https://api.swarmscan.io/v1/network/neighborhoods/suggestion", "suggester for target neighborhood")
+ cmd.Flags().StringSlice(optionNameWhitelistedWithdrawalAddress, []string{}, "withdrawal target addresses")
+ cmd.Flags().Bool(optionNameTransactionDebugMode, false, "skips the gas estimate step for contract transactions")
+ cmd.Flags().Uint(optionMinimumStorageRadius, 0, "minimum radius storage threshold")
+ cmd.Flags().Int(optionReserveCapacityDoubling, 0, "reserve capacity doubling")
+}
+
+func (c *command) CheckUnknownParams(cmd *cobra.Command, args []string) error {
+ if err := c.initConfig(); err != nil {
+ return err
+ }
+ var unknownParams []string
+ for _, v := range c.config.AllKeys() {
+ if cmd.Flags().Lookup(v) == nil {
+ unknownParams = append(unknownParams, v)
+ }
+ }
+
+ if len(unknownParams) > 0 {
+ return fmt.Errorf("unknown parameters:\n\t%v", strings.Join(unknownParams, "\n\t"))
+ }
+
+ return nil
+}
diff --git a/cmd/bee/cmd/configurateoptions.go b/cmd/bee/cmd/configurateoptions.go
index b6433d06877..ff4c4a191e1 100644
--- a/cmd/bee/cmd/configurateoptions.go
+++ b/cmd/bee/cmd/configurateoptions.go
@@ -8,7 +8,7 @@ import (
"fmt"
"sort"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
"github.com/spf13/cobra"
)
diff --git a/cmd/bee/cmd/db.go b/cmd/bee/cmd/db.go
index 62eee2591c2..a10f00e285d 100644
--- a/cmd/bee/cmd/db.go
+++ b/cmd/bee/cmd/db.go
@@ -27,6 +27,8 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/util/ioutil"
"github.com/spf13/cobra"
+
+ fs "github.com/ethersphere/bee/v2/pkg/fs"
)
const (
@@ -615,7 +617,7 @@ func dbImportReserveCmd(cmd *cobra.Command) {
if args[0] == "-" {
in = os.Stdin
} else {
- f, err := os.Open(args[0])
+ f, err := fs.Open(args[0])
if err != nil {
return fmt.Errorf("opening input file: %w", err)
}
@@ -699,7 +701,7 @@ func dbImportPinningCmd(cmd *cobra.Command) {
if args[0] == "-" {
in = os.Stdin
} else {
- f, err := os.Open(args[0])
+ f, err := fs.Open(args[0])
if err != nil {
return fmt.Errorf("error opening input file: %w", err)
}
diff --git a/cmd/bee/cmd/split.go b/cmd/bee/cmd/split.go
index ebe798e878d..21b08823370 100644
--- a/cmd/bee/cmd/split.go
+++ b/cmd/bee/cmd/split.go
@@ -18,6 +18,8 @@ import (
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/spf13/cobra"
+
+ fs "github.com/ethersphere/bee/v2/pkg/fs"
)
// putter is a putter that stores all the split chunk addresses of a file
@@ -89,7 +91,7 @@ func splitRefs(cmd *cobra.Command) {
return fmt.Errorf("new logger: %w", err)
}
- reader, err := os.Open(inputFileName)
+ reader, err := fs.Open(inputFileName)
if err != nil {
return fmt.Errorf("open input file: %w", err)
}
@@ -179,7 +181,7 @@ func splitChunks(cmd *cobra.Command) {
if err != nil {
return fmt.Errorf("new logger: %w", err)
}
- reader, err := os.Open(inputFileName)
+ reader, err := fs.Open(inputFileName)
if err != nil {
return fmt.Errorf("open input file: %w", err)
}
diff --git a/cmd/bee/cmd/start.go b/cmd/bee/cmd/start.go
index 1b1baac9503..8461672d979 100644
--- a/cmd/bee/cmd/start.go
+++ b/cmd/bee/cmd/start.go
@@ -13,14 +13,10 @@ import (
"errors"
"fmt"
"os"
- "os/signal"
"path/filepath"
"strings"
- "sync/atomic"
- "syscall"
"time"
- "github.com/ethersphere/bee/v2"
"github.com/ethersphere/bee/v2/pkg/accesscontrol"
chaincfg "github.com/ethersphere/bee/v2/pkg/config"
"github.com/ethersphere/bee/v2/pkg/crypto"
@@ -43,143 +39,6 @@ const (
//go:embed bee-welcome-message.txt
var beeWelcomeMessage string
-func (c *command) initStartCmd() (err error) {
- cmd := &cobra.Command{
- Use: "start",
- Short: "Start a Swarm node",
- PersistentPreRunE: c.CheckUnknownParams,
- RunE: func(cmd *cobra.Command, args []string) (err error) {
- if len(args) > 0 {
- return cmd.Help()
- }
-
- v := strings.ToLower(c.config.GetString(optionNameVerbosity))
-
- logger, err := newLogger(cmd, v)
- if err != nil {
- return fmt.Errorf("new logger: %w", err)
- }
-
- if c.isWindowsService {
- logger, err = createWindowsEventLogger(serviceName, logger)
- if err != nil {
- return fmt.Errorf("failed to create windows logger %w", err)
- }
- }
-
- fmt.Print(beeWelcomeMessage)
- time.Sleep(5 * time.Second)
- fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow https://ethswarm.org/\n\n", bee.Version, endSupportDate())
- logger.Info("bee version", "version", bee.Version)
-
- go startTimeBomb(logger)
-
- // ctx is global context of bee node; which is canceled after interrupt signal is received.
- ctx, cancel := context.WithCancel(context.Background())
- sysInterruptChannel := make(chan os.Signal, 1)
- signal.Notify(sysInterruptChannel, syscall.SIGINT, syscall.SIGTERM)
-
- go func() {
- select {
- case <-sysInterruptChannel:
- logger.Info("received interrupt signal")
- cancel()
- case <-ctx.Done():
- }
- }()
-
- // Building bee node can take up some time (because node.NewBee(...) is compute have function )
- // Because of this we need to do it in background so that program could be terminated when interrupt signal is received
- // while bee node is being constructed.
- respC := buildBeeNodeAsync(ctx, c, cmd, logger)
- var beeNode atomic.Value
-
- p := &program{
- start: func() {
- // Wait for bee node to fully build and initialized
- select {
- case resp := <-respC:
- if resp.err != nil {
- logger.Error(resp.err, "failed to build bee node")
- return
- }
- beeNode.Store(resp.bee)
- case <-ctx.Done():
- return
- }
-
- // Bee has fully started at this point, from now on we
- // block main goroutine until it is interrupted or stopped
- select {
- case <-ctx.Done():
- case <-beeNode.Load().(*node.Bee).SyncingStopped():
- logger.Debug("syncing has stopped")
- }
-
- logger.Info("shutting down...")
- },
- stop: func() {
- // Whenever program is being stopped we need to cancel main context
- // beforehand so that node could be stopped via Shutdown method
- cancel()
-
- // Shutdown node (if node was fully started)
- val := beeNode.Load()
- if val == nil {
- return
- }
-
- done := make(chan struct{})
- go func(beeNode *node.Bee) {
- defer close(done)
-
- if err := beeNode.Shutdown(); err != nil {
- logger.Error(err, "shutdown failed")
- }
- }(val.(*node.Bee))
-
- // If shutdown function is blocking too long,
- // allow process termination by receiving another signal.
- select {
- case <-sysInterruptChannel:
- logger.Info("node shutdown terminated")
- case <-done:
- logger.Info("node shutdown")
- }
- },
- }
-
- if c.isWindowsService {
- s, err := service.New(p, &service.Config{
- Name: serviceName,
- DisplayName: "Bee",
- Description: "Bee, Swarm client.",
- })
- if err != nil {
- return err
- }
-
- if err = s.Run(); err != nil {
- return err
- }
- } else {
- // start blocks until some interrupt is received
- p.start()
- p.stop()
- }
-
- return nil
- },
- PreRunE: func(cmd *cobra.Command, args []string) error {
- return c.config.BindPFlags(cmd.Flags())
- },
- }
-
- c.setAllFlags(cmd)
- c.root.AddCommand(cmd)
- return nil
-}
-
type buildBeeNodeResp struct {
bee *node.Bee
err error
@@ -315,7 +174,6 @@ func buildBeeNode(ctx context.Context, c *command, cmd *cobra.Command, logger lo
ResolverConnectionCfgs: resolverCfgs,
Resync: c.config.GetBool(optionNameResync),
RetrievalCaching: c.config.GetBool(optionNameRetrievalCaching),
- SkipPostageSnapshot: c.config.GetBool(optionSkipPostageSnapshot),
StakingContractAddress: c.config.GetString(optionNameStakingAddress),
StatestoreCacheCapacity: c.config.GetUint64(optionNameStateStoreCacheCapacity),
StaticNodes: staticNodes,
diff --git a/cmd/bee/cmd/start_dev.go b/cmd/bee/cmd/start_dev.go
index 4440b3fe441..53b60e757fb 100644
--- a/cmd/bee/cmd/start_dev.go
+++ b/cmd/bee/cmd/start_dev.go
@@ -1,3 +1,6 @@
+//go:build !windows && !js
+// +build !windows,!js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -12,7 +15,6 @@ import (
"syscall"
"github.com/ethersphere/bee/v2/pkg/node"
- "github.com/kardianos/service"
"github.com/spf13/cobra"
)
@@ -33,14 +35,6 @@ func (c *command) initStartDevCmd() (err error) {
return fmt.Errorf("new logger: %w", err)
}
- if c.isWindowsService {
- var err error
- logger, err = createWindowsEventLogger(serviceName, logger)
- if err != nil {
- return fmt.Errorf("failed to create windows logger %w", err)
- }
- }
-
beeASCII := `
( * ) (
)\ ) ( * ( /( )\ )
@@ -105,24 +99,9 @@ func (c *command) initStartDevCmd() (err error) {
},
}
- if c.isWindowsService {
- s, err := service.New(p, &service.Config{
- Name: serviceName,
- DisplayName: "Bee",
- Description: "Bee, Swarm client.",
- })
- if err != nil {
- return err
- }
-
- if err = s.Run(); err != nil {
- return err
- }
- } else {
- // start blocks until some interrupt is received
- p.start()
- p.stop()
- }
+ // start blocks until some interrupt is received
+ p.start()
+ p.stop()
return nil
},
diff --git a/cmd/bee/cmd/start_dev_windows.go b/cmd/bee/cmd/start_dev_windows.go
new file mode 100644
index 00000000000..84c3dcb12bd
--- /dev/null
+++ b/cmd/bee/cmd/start_dev_windows.go
@@ -0,0 +1,135 @@
+//go:build windows
+// +build windows
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "strings"
+ "syscall"
+
+ "github.com/ethersphere/bee/v2/pkg/node"
+ "github.com/kardianos/service"
+ "github.com/spf13/cobra"
+)
+
+func (c *command) initStartDevCmd() (err error) {
+
+ cmd := &cobra.Command{
+ Use: "dev",
+ Short: "Start a Swarm node in development mode",
+ PersistentPreRunE: c.CheckUnknownParams,
+ RunE: func(cmd *cobra.Command, args []string) (err error) {
+ if len(args) > 0 {
+ return cmd.Help()
+ }
+
+ v := strings.ToLower(c.config.GetString(optionNameVerbosity))
+ logger, err := newLogger(cmd, v)
+ if err != nil {
+ return fmt.Errorf("new logger: %w", err)
+ }
+
+ logger, err = createWindowsEventLogger(serviceName, logger)
+ if err != nil {
+ return fmt.Errorf("failed to create windows logger %w", err)
+ }
+
+ beeASCII := `
+ ( * ) (
+ )\ ) ( * ( /( )\ )
+(()/( ( ( ( )\))( )\())(()/( (
+ /(_)) )\ )\ )\ ((_)()\ ((_)\ /(_)) )\
+(_))_ ((_) ((_)((_) (_()((_) ((_)(_))_ ((_)
+ | \ | __|\ \ / / | \/ | / _ \ | \ | __|
+ | |) || _| \ V / | |\/| || (_) || |) || _|
+ |___/ |___| \_/ |_| |_| \___/ |___/ |___|
+`
+
+ fmt.Println(beeASCII)
+ fmt.Println()
+ fmt.Println("Starting in development mode")
+ fmt.Println()
+
+ // generate signer in here
+ b, err := node.NewDevBee(logger, &node.DevOptions{
+ APIAddr: c.config.GetString(optionNameAPIAddr),
+ Logger: logger,
+ DBOpenFilesLimit: c.config.GetUint64(optionNameDBOpenFilesLimit),
+ DBBlockCacheCapacity: c.config.GetUint64(optionNameDBBlockCacheCapacity),
+ DBWriteBufferSize: c.config.GetUint64(optionNameDBWriteBufferSize),
+ DBDisableSeeksCompaction: c.config.GetBool(optionNameDBDisableSeeksCompaction),
+ CORSAllowedOrigins: c.config.GetStringSlice(optionCORSAllowedOrigins),
+ ReserveCapacity: c.config.GetUint64(optionNameDevReserveCapacity),
+ })
+ if err != nil {
+ return err
+ }
+
+ // Wait for termination or interrupt signals.
+ // We want to clean up things at the end.
+ interruptChannel := make(chan os.Signal, 1)
+ signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM)
+
+ p := &program{
+ start: func() {
+ // Block main goroutine until it is interrupted
+ sig := <-interruptChannel
+
+ logger.Debug("signal received", "signal", sig)
+ logger.Info("shutting down")
+ },
+ stop: func() {
+ // Shutdown
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ if err := b.Shutdown(); err != nil {
+ logger.Error(err, "shutdown failed")
+ }
+ }()
+
+ // If shutdown function is blocking too long,
+ // allow process termination by receiving another signal.
+ select {
+ case sig := <-interruptChannel:
+ logger.Debug("signal received", "signal", sig)
+ case <-done:
+ }
+ },
+ }
+
+ s, err := service.New(p, &service.Config{
+ Name: serviceName,
+ DisplayName: "Bee",
+ Description: "Bee, Swarm client.",
+ })
+ if err != nil {
+ return err
+ }
+
+ if err = s.Run(); err != nil {
+ return err
+ }
+
+ return nil
+ },
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return c.config.BindPFlags(cmd.Flags())
+ },
+ }
+
+ cmd.Flags().String(optionNameAPIAddr, "127.0.0.1:1633", "HTTP API listen address")
+ cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
+ cmd.Flags().Uint64(optionNameDevReserveCapacity, 4194304, "cache reserve capacity")
+ cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled")
+ cmd.Flags().Uint64(optionNameDBOpenFilesLimit, 200, "number of open files allowed by database")
+ cmd.Flags().Uint64(optionNameDBBlockCacheCapacity, 32*1024*1024, "size of block cache of the database in bytes")
+ cmd.Flags().Uint64(optionNameDBWriteBufferSize, 32*1024*1024, "size of the database write buffer in bytes")
+ cmd.Flags().Bool(optionNameDBDisableSeeksCompaction, false, "disables db compactions triggered by seeks")
+
+ c.root.AddCommand(cmd)
+ return nil
+}
diff --git a/cmd/bee/cmd/start_unix.go b/cmd/bee/cmd/start_unix.go
index 19e3d5b7807..44f13d2641a 100644
--- a/cmd/bee/cmd/start_unix.go
+++ b/cmd/bee/cmd/start_unix.go
@@ -7,15 +7,131 @@
package cmd
import (
- "errors"
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
- "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2"
+ "github.com/ethersphere/bee/v2/pkg/node"
+ "github.com/spf13/cobra"
)
-func isWindowsService() (bool, error) {
- return false, nil
-}
+func (c *command) initStartCmd() (err error) {
+ cmd := &cobra.Command{
+ Use: "start",
+ Short: "Start a Swarm node",
+ PersistentPreRunE: c.CheckUnknownParams,
+ RunE: func(cmd *cobra.Command, args []string) (err error) {
+ if len(args) > 0 {
+ return cmd.Help()
+ }
+
+ v := strings.ToLower(c.config.GetString(optionNameVerbosity))
+
+ logger, err := newLogger(cmd, v)
+ if err != nil {
+ return fmt.Errorf("new logger: %w", err)
+ }
+
+ fmt.Print(beeWelcomeMessage)
+ time.Sleep(5 * time.Second)
+ fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow https://ethswarm.org/\n\n", bee.Version, endSupportDate())
+ logger.Info("bee version", "version", bee.Version)
+
+ go startTimeBomb(logger)
+
+ // ctx is global context of bee node; which is canceled after interrupt signal is received.
+ ctx, cancel := context.WithCancel(context.Background())
+ sysInterruptChannel := make(chan os.Signal, 1)
+ signal.Notify(sysInterruptChannel, syscall.SIGINT, syscall.SIGTERM)
+
+ go func() {
+ select {
+ case <-sysInterruptChannel:
+ logger.Info("received interrupt signal")
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+
+ // Building bee node can take up some time (because node.NewBee(...) is compute have function )
+ // Because of this we need to do it in background so that program could be terminated when interrupt signal is received
+ // while bee node is being constructed.
+ respC := buildBeeNodeAsync(ctx, c, cmd, logger)
+ var beeNode atomic.Value
+
+ p := &program{
+ start: func() {
+ // Wait for bee node to fully build and initialized
+ select {
+ case resp := <-respC:
+ if resp.err != nil {
+ logger.Error(resp.err, "failed to build bee node")
+ return
+ }
+ beeNode.Store(resp.bee)
+ case <-ctx.Done():
+ return
+ }
+
+ // Bee has fully started at this point, from now on we
+ // block main goroutine until it is interrupted or stopped
+ select {
+ case <-ctx.Done():
+ case <-beeNode.Load().(*node.Bee).SyncingStopped():
+ logger.Debug("syncing has stopped")
+ }
+
+ logger.Info("shutting down...")
+ },
+ stop: func() {
+ // Whenever program is being stopped we need to cancel main context
+ // beforehand so that node could be stopped via Shutdown method
+ cancel()
+
+ // Shutdown node (if node was fully started)
+ val := beeNode.Load()
+ if val == nil {
+ return
+ }
+
+ done := make(chan struct{})
+ go func(beeNode *node.Bee) {
+ defer close(done)
+
+ if err := beeNode.Shutdown(); err != nil {
+ logger.Error(err, "shutdown failed")
+ }
+ }(val.(*node.Bee))
+
+ // If shutdown function is blocking too long,
+ // allow process termination by receiving another signal.
+ select {
+ case <-sysInterruptChannel:
+ logger.Info("node shutdown terminated")
+ case <-done:
+ logger.Info("node shutdown")
+ }
+ },
+ }
+
+ // start blocks until some interrupt is received
+ p.start()
+ p.stop()
+
+ return nil
+ },
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return c.config.BindPFlags(cmd.Flags())
+ },
+ }
-func createWindowsEventLogger(_ string, _ log.Logger) (log.Logger, error) {
- return nil, errors.New("cannot create Windows event logger")
+ c.setAllFlags(cmd)
+ c.root.AddCommand(cmd)
+ return nil
}
diff --git a/cmd/bee/cmd/start_windows.go b/cmd/bee/cmd/start_windows.go
index 3153e80041c..1ff03926a81 100644
--- a/cmd/bee/cmd/start_windows.go
+++ b/cmd/bee/cmd/start_windows.go
@@ -7,13 +7,24 @@
package cmd
import (
+ "context"
"fmt"
+ "os"
+ "os/signal"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/debug"
"golang.org/x/sys/windows/svc/eventlog"
+ "github.com/ethersphere/bee/v2"
"github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/node"
+ "github.com/kardianos/service"
+ "github.com/spf13/cobra"
)
func isWindowsService() (bool, error) {
@@ -55,3 +66,132 @@ func (l windowsEventLogger) Error(err error, msg string, keysAndValues ...interf
}
_ = l.winlog.Error(1633, fmt.Sprintf("%s %s", msg, fmt.Sprintln(keysAndValues...)))
}
+
+func (c *command) initStartCmd() (err error) {
+ cmd := &cobra.Command{
+ Use: "start",
+ Short: "Start a Swarm node",
+ PersistentPreRunE: c.CheckUnknownParams,
+ RunE: func(cmd *cobra.Command, args []string) (err error) {
+ if len(args) > 0 {
+ return cmd.Help()
+ }
+
+ v := strings.ToLower(c.config.GetString(optionNameVerbosity))
+
+ logger, err := newLogger(cmd, v)
+ if err != nil {
+ return fmt.Errorf("new logger: %w", err)
+ }
+
+ logger, err = createWindowsEventLogger(serviceName, logger)
+ if err != nil {
+ return fmt.Errorf("failed to create windows logger %w", err)
+ }
+
+ fmt.Print(beeWelcomeMessage)
+ time.Sleep(5 * time.Second)
+ fmt.Printf("\n\nversion: %v - planned to be supported until %v, please follow https://ethswarm.org/\n\n", bee.Version, endSupportDate())
+ logger.Info("bee version", "version", bee.Version)
+
+ go startTimeBomb(logger)
+
+ // ctx is global context of bee node; which is canceled after interrupt signal is received.
+ ctx, cancel := context.WithCancel(context.Background())
+ sysInterruptChannel := make(chan os.Signal, 1)
+ signal.Notify(sysInterruptChannel, syscall.SIGINT, syscall.SIGTERM)
+
+ go func() {
+ select {
+ case <-sysInterruptChannel:
+ logger.Info("received interrupt signal")
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+
+ // Building bee node can take up some time (because node.NewBee(...) is compute have function )
+ // Because of this we need to do it in background so that program could be terminated when interrupt signal is received
+ // while bee node is being constructed.
+ respC := buildBeeNodeAsync(ctx, c, cmd, logger)
+ var beeNode atomic.Value
+
+ p := &program{
+ start: func() {
+ // Wait for bee node to fully build and initialized
+ select {
+ case resp := <-respC:
+ if resp.err != nil {
+ logger.Error(resp.err, "failed to build bee node")
+ return
+ }
+ beeNode.Store(resp.bee)
+ case <-ctx.Done():
+ return
+ }
+
+ // Bee has fully started at this point, from now on we
+ // block main goroutine until it is interrupted or stopped
+ select {
+ case <-ctx.Done():
+ case <-beeNode.Load().(*node.Bee).SyncingStopped():
+ logger.Debug("syncing has stopped")
+ }
+
+ logger.Info("shutting down...")
+ },
+ stop: func() {
+ // Whenever program is being stopped we need to cancel main context
+ // beforehand so that node could be stopped via Shutdown method
+ cancel()
+
+ // Shutdown node (if node was fully started)
+ val := beeNode.Load()
+ if val == nil {
+ return
+ }
+
+ done := make(chan struct{})
+ go func(beeNode *node.Bee) {
+ defer close(done)
+
+ if err := beeNode.Shutdown(); err != nil {
+ logger.Error(err, "shutdown failed")
+ }
+ }(val.(*node.Bee))
+
+ // If shutdown function is blocking too long,
+ // allow process termination by receiving another signal.
+ select {
+ case <-sysInterruptChannel:
+ logger.Info("node shutdown terminated")
+ case <-done:
+ logger.Info("node shutdown")
+ }
+ },
+ }
+
+ s, err := service.New(p, &service.Config{
+ Name: serviceName,
+ DisplayName: "Bee",
+ Description: "Bee, Swarm client.",
+ })
+ if err != nil {
+ return err
+ }
+
+ if err = s.Run(); err != nil {
+ return err
+ }
+
+ return nil
+ },
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ return c.config.BindPFlags(cmd.Flags())
+ },
+ }
+
+ c.setAllFlags(cmd)
+ c.root.AddCommand(cmd)
+ return nil
+}
diff --git a/dist/.gitignore b/dist/.gitignore
new file mode 100644
index 00000000000..a14702c409d
--- /dev/null
+++ b/dist/.gitignore
@@ -0,0 +1,34 @@
+# dependencies (bun install)
+node_modules
+
+# output
+out
+dist
+*.tgz
+
+# code coverage
+coverage
+*.lcov
+
+# logs
+logs
+_.log
+report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
+
+# dotenv environment variable files
+.env
+.env.development.local
+.env.test.local
+.env.production.local
+.env.local
+
+# caches
+.eslintcache
+.cache
+*.tsbuildinfo
+
+# IntelliJ based IDEs
+.idea
+
+# Finder (MacOS) folder config
+.DS_Store
diff --git a/dist/README.md b/dist/README.md
new file mode 100644
index 00000000000..7cb8c1fe50e
--- /dev/null
+++ b/dist/README.md
@@ -0,0 +1,16 @@
+# dist
+
+To install dependencies:
+
+```bash
+bun install
+```
+
+To run:
+
+```bash
+bun run
+```
+
+This project was created using `bun init` in bun v1.2.11. [Bun](https://bun.sh)
+is a fast all-in-one JavaScript runtime.
diff --git a/dist/bun.lock b/dist/bun.lock
new file mode 100644
index 00000000000..0a459743828
--- /dev/null
+++ b/dist/bun.lock
@@ -0,0 +1,113 @@
+{
+ "lockfileVersion": 1,
+ "workspaces": {
+ "": {
+ "name": "dist",
+ "dependencies": {
+ "@zenfs/core": "^2.2.0",
+ },
+ "devDependencies": {
+ "@types/bun": "latest",
+ "esbuild": "^0.25.3",
+ },
+ "peerDependencies": {
+ "typescript": "^5",
+ },
+ },
+ },
+ "packages": {
+ "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-W8bFfPA8DowP8l//sxjJLSLkD8iEjMc7cBVyP+u4cEv9sM7mdUCkgsj+t0n/BWPFtv7WWCN5Yzj0N6FJNUUqBQ=="],
+
+ "@esbuild/android-arm": ["@esbuild/android-arm@0.25.3", "", { "os": "android", "cpu": "arm" }, "sha512-PuwVXbnP87Tcff5I9ngV0lmiSu40xw1At6i3GsU77U7cjDDB4s0X2cyFuBiDa1SBk9DnvWwnGvVaGBqoFWPb7A=="],
+
+ "@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.3", "", { "os": "android", "cpu": "arm64" }, "sha512-XelR6MzjlZuBM4f5z2IQHK6LkK34Cvv6Rj2EntER3lwCBFdg6h2lKbtRjpTTsdEjD/WSe1q8UyPBXP1x3i/wYQ=="],
+
+ "@esbuild/android-x64": ["@esbuild/android-x64@0.25.3", "", { "os": "android", "cpu": "x64" }, "sha512-ogtTpYHT/g1GWS/zKM0cc/tIebFjm1F9Aw1boQ2Y0eUQ+J89d0jFY//s9ei9jVIlkYi8AfOjiixcLJSGNSOAdQ=="],
+
+ "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-eESK5yfPNTqpAmDfFWNsOhmIOaQA59tAcF/EfYvo5/QWQCzXn5iUSOnqt3ra3UdzBv073ykTtmeLJZGt3HhA+w=="],
+
+ "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-Kd8glo7sIZtwOLcPbW0yLpKmBNWMANZhrC1r6K++uDR2zyzb6AeOYtI6udbtabmQpFaxJ8uduXMAo1gs5ozz8A=="],
+
+ "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-EJiyS70BYybOBpJth3M0KLOus0n+RRMKTYzhYhFeMwp7e/RaajXvP+BWlmEXNk6uk+KAu46j/kaQzr6au+JcIw=="],
+
+ "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-Q+wSjaLpGxYf7zC0kL0nDlhsfuFkoN+EXrx2KSB33RhinWzejOd6AvgmP5JbkgXKmjhmpfgKZq24pneodYqE8Q=="],
+
+ "@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.3", "", { "os": "linux", "cpu": "arm" }, "sha512-dUOVmAUzuHy2ZOKIHIKHCm58HKzFqd+puLaS424h6I85GlSDRZIA5ycBixb3mFgM0Jdh+ZOSB6KptX30DD8YOQ=="],
+
+ "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-xCUgnNYhRD5bb1C1nqrDV1PfkwgbswTTBRbAd8aH5PhYzikdf/ddtsYyMXFfGSsb/6t6QaPSzxtbfAZr9uox4A=="],
+
+ "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yplPOpczHOO4jTYKmuYuANI3WhvIPSVANGcNUeMlxH4twz/TeXuzEP41tGKNGWJjuMhotpGabeFYGAOU2ummBw=="],
+
+ "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.3", "", { "os": "linux", "cpu": "none" }, "sha512-P4BLP5/fjyihmXCELRGrLd793q/lBtKMQl8ARGpDxgzgIKJDRJ/u4r1A/HgpBpKpKZelGct2PGI4T+axcedf6g=="],
+
+ "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.3", "", { "os": "linux", "cpu": "none" }, "sha512-eRAOV2ODpu6P5divMEMa26RRqb2yUoYsuQQOuFUexUoQndm4MdpXXDBbUoKIc0iPa4aCO7gIhtnYomkn2x+bag=="],
+
+ "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-ZC4jV2p7VbzTlnl8nZKLcBkfzIf4Yad1SJM4ZMKYnJqZFD4rTI+pBG65u8ev4jk3/MPwY9DvGn50wi3uhdaghg=="],
+
+ "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.3", "", { "os": "linux", "cpu": "none" }, "sha512-LDDODcFzNtECTrUUbVCs6j9/bDVqy7DDRsuIXJg6so+mFksgwG7ZVnTruYi5V+z3eE5y+BJZw7VvUadkbfg7QA=="],
+
+ "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-s+w/NOY2k0yC2p9SLen+ymflgcpRkvwwa02fqmAwhBRI3SC12uiS10edHHXlVWwfAagYSY5UpmT/zISXPMW3tQ=="],
+
+ "@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.3", "", { "os": "linux", "cpu": "x64" }, "sha512-nQHDz4pXjSDC6UfOE1Fw9Q8d6GCAd9KdvMZpfVGWSJztYCarRgSDfOVBY5xwhQXseiyxapkiSJi/5/ja8mRFFA=="],
+
+ "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.3", "", { "os": "none", "cpu": "arm64" }, "sha512-1QaLtOWq0mzK6tzzp0jRN3eccmN3hezey7mhLnzC6oNlJoUJz4nym5ZD7mDnS/LZQgkrhEbEiTn515lPeLpgWA=="],
+
+ "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.3", "", { "os": "none", "cpu": "x64" }, "sha512-i5Hm68HXHdgv8wkrt+10Bc50zM0/eonPb/a/OFVfB6Qvpiirco5gBA5bz7S2SHuU+Y4LWn/zehzNX14Sp4r27g=="],
+
+ "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-zGAVApJEYTbOC6H/3QBr2mq3upG/LBEXr85/pTtKiv2IXcgKV0RT0QA/hSXZqSvLEpXeIxah7LczB4lkiYhTAQ=="],
+
+ "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-fpqctI45NnCIDKBH5AXQBsD0NDPbEFczK98hk/aa6HJxbl+UtLkJV2+Bvy5hLSLk3LHmqt0NTkKNso1A9y1a4w=="],
+
+ "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-ROJhm7d8bk9dMCUZjkS8fgzsPAZEjtRJqCAmVgB0gMrvG7hfmPmz9k1rwO4jSiblFjYmNvbECL9uhaPzONMfgA=="],
+
+ "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-YWcow8peiHpNBiIXHwaswPnAXLsLVygFwCB3A7Bh5jRkIBFWHGmNQ48AlX4xDvQNoMZlPYzjVOQDYEzWCqufMQ=="],
+
+ "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-qspTZOIGoXVS4DpNqUYUs9UxVb04khS1Degaw/MnfMe7goQ3lTfQ13Vw4qY/Nj0979BGvMRpAYbs/BAxEvU8ew=="],
+
+ "@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.3", "", { "os": "win32", "cpu": "x64" }, "sha512-ICgUR+kPimx0vvRzf+N/7L7tVSQeE3BYY+NhHRHXS1kBuPO7z2+7ea2HbhDyZdTephgvNvKrlDDKUexuCVBVvg=="],
+
+ "@types/bun": ["@types/bun@1.2.12", "", { "dependencies": { "bun-types": "1.2.12" } }, "sha512-lY/GQTXDGsolT/TiH72p1tuyUORuRrdV7VwOTOjDOt8uTBJQOJc5zz3ufwwDl0VBaoxotSk4LdP0hhjLJ6ypIQ=="],
+
+ "@types/node": ["@types/node@22.15.3", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-lX7HFZeHf4QG/J7tBZqrCAXwz9J5RD56Y6MpP0eJkka8p+K0RY/yBTW7CYFJ4VGCclxqOLKmiGP5juQc6MKgcw=="],
+
+ "@xterm/xterm": ["@xterm/xterm@5.5.0", "", {}, "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A=="],
+
+ "@zenfs/core": ["@zenfs/core@2.2.0", "", { "dependencies": { "@types/node": "^22.15.2", "buffer": "^6.0.3", "eventemitter3": "^5.0.1", "kerium": "^1.3.4", "memium": "^0.2.0", "readable-stream": "^4.5.2", "utilium": "^2.3.3" }, "bin": { "make-index": "scripts/make-index.js", "zenfs-test": "scripts/test.js", "zci": "scripts/ci-cli.js" } }, "sha512-OF/Y+Tbj3kuTowiahBtVw13+wHgAsqhL1spkPDnWxFlTrnWEgWhBaDbAeC/0pbH1zQBQUBrvU2bKmguYdMVl4g=="],
+
+ "abort-controller": ["abort-controller@3.0.0", "", { "dependencies": { "event-target-shim": "^5.0.0" } }, "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="],
+
+ "base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
+
+ "buffer": ["buffer@6.0.3", "", { "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.2.1" } }, "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA=="],
+
+ "bun-types": ["bun-types@1.2.12", "", { "dependencies": { "@types/node": "*" } }, "sha512-tvWMx5vPqbRXgE8WUZI94iS1xAYs8bkqESR9cxBB1Wi+urvfTrF1uzuDgBHFAdO0+d2lmsbG3HmeKMvUyj6pWA=="],
+
+ "esbuild": ["esbuild@0.25.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.3", "@esbuild/android-arm": "0.25.3", "@esbuild/android-arm64": "0.25.3", "@esbuild/android-x64": "0.25.3", "@esbuild/darwin-arm64": "0.25.3", "@esbuild/darwin-x64": "0.25.3", "@esbuild/freebsd-arm64": "0.25.3", "@esbuild/freebsd-x64": "0.25.3", "@esbuild/linux-arm": "0.25.3", "@esbuild/linux-arm64": "0.25.3", "@esbuild/linux-ia32": "0.25.3", "@esbuild/linux-loong64": "0.25.3", "@esbuild/linux-mips64el": "0.25.3", "@esbuild/linux-ppc64": "0.25.3", "@esbuild/linux-riscv64": "0.25.3", "@esbuild/linux-s390x": "0.25.3", "@esbuild/linux-x64": "0.25.3", "@esbuild/netbsd-arm64": "0.25.3", "@esbuild/netbsd-x64": "0.25.3", "@esbuild/openbsd-arm64": "0.25.3", "@esbuild/openbsd-x64": "0.25.3", "@esbuild/sunos-x64": "0.25.3", "@esbuild/win32-arm64": "0.25.3", "@esbuild/win32-ia32": "0.25.3", "@esbuild/win32-x64": "0.25.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-qKA6Pvai73+M2FtftpNKRxJ78GIjmFXFxd/1DVBqGo/qNhLSfv+G12n9pNoWdytJC8U00TrViOwpjT0zgqQS8Q=="],
+
+ "event-target-shim": ["event-target-shim@5.0.1", "", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="],
+
+ "eventemitter3": ["eventemitter3@5.0.1", "", {}, "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA=="],
+
+ "events": ["events@3.3.0", "", {}, "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q=="],
+
+ "ieee754": ["ieee754@1.2.1", "", {}, "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="],
+
+ "kerium": ["kerium@1.3.5", "", { "dependencies": { "utilium": "^2.0.0" } }, "sha512-V4hITm/a5l9SyPCPg//ENmbs24hHWVtEOQlyQaZX9bH/WBVY4kXwXaWPSobWGpNNOMOwe1FU/WgTd5dC/bIDKA=="],
+
+ "memium": ["memium@0.2.0", "", { "dependencies": { "kerium": "^1.3.2", "utilium": "^2.0.0" } }, "sha512-BFNZHfk+zIFWmZ3zMr50S3KXVvw53E/kzlPy48aw9c493XyH8u13c3P6vufKj150P/8Qtre5sxbwfNWXkLUXYA=="],
+
+ "process": ["process@0.11.10", "", {}, "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A=="],
+
+ "readable-stream": ["readable-stream@4.7.0", "", { "dependencies": { "abort-controller": "^3.0.0", "buffer": "^6.0.3", "events": "^3.3.0", "process": "^0.11.10", "string_decoder": "^1.3.0" } }, "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg=="],
+
+ "safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="],
+
+ "string_decoder": ["string_decoder@1.3.0", "", { "dependencies": { "safe-buffer": "~5.2.0" } }, "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA=="],
+
+ "typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="],
+
+ "undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="],
+
+ "utilium": ["utilium@2.3.3", "", { "dependencies": { "eventemitter3": "^5.0.1" }, "optionalDependencies": { "@xterm/xterm": "^5.5.0" } }, "sha512-uk/9WFeSzSSNoKIVtBJvc+VhAX8MTyhp4HTM7L4DuW31D+DqPE8T+nPkIxG/Ae/sMplCn8X7CC3r5WxfdMaoFw=="],
+ }
+}
diff --git a/dist/index.html b/dist/index.html
new file mode 100644
index 00000000000..efa3e787bc8
--- /dev/null
+++ b/dist/index.html
@@ -0,0 +1,12 @@
+
+
+
+
+
+ Bee is flying
+
+
+
+
+
+
diff --git a/dist/main.js b/dist/main.js
new file mode 100644
index 00000000000..ebac80502b6
--- /dev/null
+++ b/dist/main.js
@@ -0,0 +1,11 @@
+if ('serviceWorker' in navigator) {
+ navigator.serviceWorker.register('/sw_bundle.js', { scope: '/' })
+ .then((registration) => {
+ console.log('Service Worker registered successfully:', registration)
+ })
+ .catch((error) => {
+ console.error('Service Worker registration failed:', error)
+ })
+} else {
+ console.log('Service Worker is not supported in this browser.')
+}
diff --git a/dist/package.json b/dist/package.json
new file mode 100644
index 00000000000..3cf1d4fe3e8
--- /dev/null
+++ b/dist/package.json
@@ -0,0 +1,17 @@
+{
+ "name": "dist",
+ "private": true,
+ "devDependencies": {
+ "@types/bun": "latest",
+ "esbuild": "^0.25.3"
+ },
+ "peerDependencies": {
+ "typescript": "^5"
+ },
+ "dependencies": {
+ "@zenfs/core": "^2.2.0"
+ },
+ "scripts": {
+ "build": "bunx esbuild sw.js --bundle --platform=browser > sw_bundle.js"
+ }
+}
diff --git a/dist/style.css b/dist/style.css
new file mode 100644
index 00000000000..9c0cd695523
--- /dev/null
+++ b/dist/style.css
@@ -0,0 +1,3 @@
+body {
+ background-color: rgb(71, 55, 24);
+}
diff --git a/dist/sw.js b/dist/sw.js
new file mode 100644
index 00000000000..b537e9a17ea
--- /dev/null
+++ b/dist/sw.js
@@ -0,0 +1,159 @@
+import './wasm_exec.js'
+
+import { configure, fs, InMemory } from '@zenfs/core'
+
+const WASM_PATH = 'bee.wasm'
+
+self.addEventListener('activate', (event) => {
+ event.waitUntil(
+ clients.claim().catch((error) => {
+ console.error('Error during service worker activate:', error)
+ }),
+ )
+})
+
+// Handle fetch events to serve the WASM module if needed
+const path = new URL(self.registration.scope).pathname
+const handlerPromise = new Promise((setHandler) => {
+ self.wasmhttp = { path, setHandler }
+})
+
+self.addEventListener('fetch', (e) => {
+ const { pathname } = new URL(e.request.url)
+ if (!pathname.startsWith(path)) return
+
+ e.respondWith(handlerPromise.then((handler) => handler(e.request)))
+})
+
+async function main() {
+ // Set up ZenFS in-memory filesystem
+ await configure({
+ mounts: {
+ '/tmp': InMemory,
+ '/home/user': InMemory,
+ },
+ })
+
+ // Create necessary directories
+ await fs.promises.mkdir('/home/user/.bee/keys', {
+ recursive: true,
+ mode: 0o700,
+ })
+
+ // Write key files
+ await fs.promises.writeFile(
+ '/home/user/.bee/keys/libp2p_v2.key',
+ JSON.stringify({
+ address:
+ '049886e5793c6261f59e7b047a91c27226cdbc2ba5af60c9e26705c15441ec9e3f7daa7085a2a7665c338171eb2bf1b65a173636137405d825d0385bc4defacaf4',
+ crypto: {
+ cipher: 'aes-128-ctr',
+ ciphertext:
+ 'e35f6f83893bc6186119b85244b43d42b08f92891b6cb7c81f695c0a94ea2536c84fb84e3410618ddee7c814acdf35f1facc79597540e6fa3d460278ffa414311880676ef5fad8b06362b422c139ffb5cdbad530d371e645dc8e496b7b04f93c2ae23554cfc1452a414bf0c1324d326d45980d190ff784ebd9',
+ cipherparams: { iv: 'f917c56ec7e2aa36fd592c63894aa18a' },
+ kdf: 'scrypt',
+ kdfparams: {
+ n: 32768,
+ r: 8,
+ p: 1,
+ dklen: 32,
+ salt:
+ 'dcbc48279045788f9b12ffa7989880290b190e50506e2d9596b4d476528cedd0',
+ },
+ mac: '1482a352544e9cc13c1954acf9c313c9e25901c530262c7e198d4f221b76027a',
+ },
+ version: 3,
+ id: '5117e84d-0a2b-4c4c-808d-1e9676903c8a',
+ }),
+ { mode: 0o600 },
+ )
+
+ await fs.promises.writeFile(
+ '/home/user/.bee/keys/swarm.key',
+ JSON.stringify({
+ address: 'ed48f21d97fd09d08584f42c97f737bc549c49bf',
+ crypto: {
+ cipher: 'aes-128-ctr',
+ ciphertext:
+ '85221a9ec6ff8328f80686ddaa6afe9c1da4b74e8494515cc34e1ff2b9567285',
+ cipherparams: { iv: 'e01d72acdfb68338adcf99ae44f7aeb0' },
+ kdf: 'scrypt',
+ kdfparams: {
+ n: 32768,
+ r: 8,
+ p: 1,
+ dklen: 32,
+ salt:
+ '7ac4dd27cfe9b796793270a6b4c84e9f717533161105a6425576da20aff0f554',
+ },
+ mac: 'ccaa689b4f09bab5580b515dfcb1a6fcbc7ced6d769a5b8232b1508eaa9c6dc3',
+ },
+ version: 3,
+ id: '2f567b5f-122d-4625-a9f5-25c3285550e1',
+ }),
+ { mode: 0o600 },
+ )
+
+ // Expose ZenFS for debugging purposes
+ self.ZenFS = fs
+
+ // Initialize Go runtime and set environment variables
+ const go = new Go()
+
+ go.env = {
+ HOME: '/home/user',
+ PATH: '/usr/bin:/usr/local/bin',
+ }
+
+ // Bootstrap addresses
+ const bootstrapMultiaddrs = [
+ '/ip4/127.0.0.1/tcp/1634/ws/p2p/QmbPUii1SGZ6hKiSQjgWV9WuGCmSGSA2L2RBpFQBHPHrTF',
+ // '/ip4/172.26.20.131/tcp/1634/ws/p2p/QmVQhnaTT84UBfW5EPivTd6SP2Rq62pCLKawtugPtd3yXH',
+ // '/ip4/188.245.222.246/tcp/1634/ws/p2p/QmccBpjXGFUS8ZydhUnZYLcMvTNcNSjF5ntyMS8sCfz3o2'
+ ]
+
+ go.argv = [
+ 'bee.wasm',
+ 'start',
+ '--password',
+ 'testing',
+ '--bootnode',
+ bootstrapMultiaddrs[0],
+ '--data-dir',
+ '/home/user/.bee/sepolia',
+ '--verbosity',
+ 'debug',
+ // '--blockchain-rpc-endpoint',
+ // 'https://ethereum-sepolia-rpc.publicnode.com/ac5b7f52aabd778861c2588f872f15c5fc34f0b343ec3d18ac2e91f5526e9c2b',
+ '--mainnet=false',
+ '--network-id=5',
+ ]
+
+ // Override Go's `fs.readFile` with ZenFS readFile functionality
+ const goImportObject = {
+ ...go.importObject,
+ fs: {
+ ...fs,
+ readFile: async (path) => {
+ try {
+ const data = await fs.promises.readFile(path, 'utf8')
+ return new TextEncoder().encode(data) // Return Uint8Array
+ } catch (error) {
+ console.error('Error reading file:', path, error)
+ throw error
+ }
+ },
+ },
+ }
+
+ // Load and run the WASM binary
+ await WebAssembly.instantiateStreaming(fetch(WASM_PATH), goImportObject).then(
+ (result) => {
+ go.run(result.instance)
+ },
+ )
+}
+
+main().catch((error) => {
+ console.error('Error running worker:', error)
+})
diff --git a/dist/tsconfig.json b/dist/tsconfig.json
new file mode 100644
index 00000000000..9c62f74b91e
--- /dev/null
+++ b/dist/tsconfig.json
@@ -0,0 +1,28 @@
+{
+ "compilerOptions": {
+ // Environment setup & latest features
+ "lib": ["ESNext"],
+ "target": "ESNext",
+ "module": "ESNext",
+ "moduleDetection": "force",
+ "jsx": "react-jsx",
+ "allowJs": true,
+
+ // Bundler mode
+ "moduleResolution": "bundler",
+ "allowImportingTsExtensions": true,
+ "verbatimModuleSyntax": true,
+ "noEmit": true,
+
+ // Best practices
+ "strict": true,
+ "skipLibCheck": true,
+ "noFallthroughCasesInSwitch": true,
+ "noUncheckedIndexedAccess": true,
+
+ // Some stricter flags (disabled by default)
+ "noUnusedLocals": false,
+ "noUnusedParameters": false,
+ "noPropertyAccessFromIndexSignature": false
+ }
+}
diff --git a/dist/wasm_exec.js b/dist/wasm_exec.js
new file mode 100644
index 00000000000..d71af9e97e8
--- /dev/null
+++ b/dist/wasm_exec.js
@@ -0,0 +1,575 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+"use strict";
+
+(() => {
+ const enosys = () => {
+ const err = new Error("not implemented");
+ err.code = "ENOSYS";
+ return err;
+ };
+
+ if (!globalThis.fs) {
+ let outputBuf = "";
+ globalThis.fs = {
+ constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
+ writeSync(fd, buf) {
+ outputBuf += decoder.decode(buf);
+ const nl = outputBuf.lastIndexOf("\n");
+ if (nl != -1) {
+ console.log(outputBuf.substring(0, nl));
+ outputBuf = outputBuf.substring(nl + 1);
+ }
+ return buf.length;
+ },
+ write(fd, buf, offset, length, position, callback) {
+ if (offset !== 0 || length !== buf.length || position !== null) {
+ callback(enosys());
+ return;
+ }
+ const n = this.writeSync(fd, buf);
+ callback(null, n);
+ },
+ chmod(path, mode, callback) { callback(enosys()); },
+ chown(path, uid, gid, callback) { callback(enosys()); },
+ close(fd, callback) { callback(enosys()); },
+ fchmod(fd, mode, callback) { callback(enosys()); },
+ fchown(fd, uid, gid, callback) { callback(enosys()); },
+ fstat(fd, callback) { callback(enosys()); },
+ fsync(fd, callback) { callback(null); },
+ ftruncate(fd, length, callback) { callback(enosys()); },
+ lchown(path, uid, gid, callback) { callback(enosys()); },
+ link(path, link, callback) { callback(enosys()); },
+ lstat(path, callback) { callback(enosys()); },
+ mkdir(path, perm, callback) { callback(enosys()); },
+ open(path, flags, mode, callback) { callback(enosys()); },
+ read(fd, buffer, offset, length, position, callback) { callback(enosys()); },
+ readdir(path, callback) { callback(enosys()); },
+ readlink(path, callback) { callback(enosys()); },
+ rename(from, to, callback) { callback(enosys()); },
+ rmdir(path, callback) { callback(enosys()); },
+ stat(path, callback) { callback(enosys()); },
+ symlink(path, link, callback) { callback(enosys()); },
+ truncate(path, length, callback) { callback(enosys()); },
+ unlink(path, callback) { callback(enosys()); },
+ utimes(path, atime, mtime, callback) { callback(enosys()); },
+ };
+ }
+
+ if (!globalThis.process) {
+ globalThis.process = {
+ getuid() { return -1; },
+ getgid() { return -1; },
+ geteuid() { return -1; },
+ getegid() { return -1; },
+ getgroups() { throw enosys(); },
+ pid: -1,
+ ppid: -1,
+ umask() { throw enosys(); },
+ cwd() { throw enosys(); },
+ chdir() { throw enosys(); },
+ }
+ }
+
+ if (!globalThis.path) {
+ globalThis.path = {
+ resolve(...pathSegments) {
+ return pathSegments.join("/");
+ }
+ }
+ }
+
+ if (!globalThis.crypto) {
+ throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
+ }
+
+ if (!globalThis.performance) {
+ throw new Error("globalThis.performance is not available, polyfill required (performance.now only)");
+ }
+
+ if (!globalThis.TextEncoder) {
+ throw new Error("globalThis.TextEncoder is not available, polyfill required");
+ }
+
+ if (!globalThis.TextDecoder) {
+ throw new Error("globalThis.TextDecoder is not available, polyfill required");
+ }
+
+ const encoder = new TextEncoder("utf-8");
+ const decoder = new TextDecoder("utf-8");
+
+ globalThis.Go = class {
+ constructor() {
+ this.argv = ["js"];
+ this.env = {};
+ this.exit = (code) => {
+ if (code !== 0) {
+ console.warn("exit code:", code);
+ }
+ };
+ this._exitPromise = new Promise((resolve) => {
+ this._resolveExitPromise = resolve;
+ });
+ this._pendingEvent = null;
+ this._scheduledTimeouts = new Map();
+ this._nextCallbackTimeoutID = 1;
+
+ const setInt64 = (addr, v) => {
+ this.mem.setUint32(addr + 0, v, true);
+ this.mem.setUint32(addr + 4, Math.floor(v / 4294967296), true);
+ }
+
+ const setInt32 = (addr, v) => {
+ this.mem.setUint32(addr + 0, v, true);
+ }
+
+ const getInt64 = (addr) => {
+ const low = this.mem.getUint32(addr + 0, true);
+ const high = this.mem.getInt32(addr + 4, true);
+ return low + high * 4294967296;
+ }
+
+ const loadValue = (addr) => {
+ const f = this.mem.getFloat64(addr, true);
+ if (f === 0) {
+ return undefined;
+ }
+ if (!isNaN(f)) {
+ return f;
+ }
+
+ const id = this.mem.getUint32(addr, true);
+ return this._values[id];
+ }
+
+ const storeValue = (addr, v) => {
+ const nanHead = 0x7FF80000;
+
+ if (typeof v === "number" && v !== 0) {
+ if (isNaN(v)) {
+ this.mem.setUint32(addr + 4, nanHead, true);
+ this.mem.setUint32(addr, 0, true);
+ return;
+ }
+ this.mem.setFloat64(addr, v, true);
+ return;
+ }
+
+ if (v === undefined) {
+ this.mem.setFloat64(addr, 0, true);
+ return;
+ }
+
+ let id = this._ids.get(v);
+ if (id === undefined) {
+ id = this._idPool.pop();
+ if (id === undefined) {
+ id = this._values.length;
+ }
+ this._values[id] = v;
+ this._goRefCounts[id] = 0;
+ this._ids.set(v, id);
+ }
+ this._goRefCounts[id]++;
+ let typeFlag = 0;
+ switch (typeof v) {
+ case "object":
+ if (v !== null) {
+ typeFlag = 1;
+ }
+ break;
+ case "string":
+ typeFlag = 2;
+ break;
+ case "symbol":
+ typeFlag = 3;
+ break;
+ case "function":
+ typeFlag = 4;
+ break;
+ }
+ this.mem.setUint32(addr + 4, nanHead | typeFlag, true);
+ this.mem.setUint32(addr, id, true);
+ }
+
+ const loadSlice = (addr) => {
+ const array = getInt64(addr + 0);
+ const len = getInt64(addr + 8);
+ return new Uint8Array(this._inst.exports.mem.buffer, array, len);
+ }
+
+ const loadSliceOfValues = (addr) => {
+ const array = getInt64(addr + 0);
+ const len = getInt64(addr + 8);
+ const a = new Array(len);
+ for (let i = 0; i < len; i++) {
+ a[i] = loadValue(array + i * 8);
+ }
+ return a;
+ }
+
+ const loadString = (addr) => {
+ const saddr = getInt64(addr + 0);
+ const len = getInt64(addr + 8);
+ return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
+ }
+
+ const testCallExport = (a, b) => {
+ this._inst.exports.testExport0();
+ return this._inst.exports.testExport(a, b);
+ }
+
+ const timeOrigin = Date.now() - performance.now();
+ this.importObject = {
+ _gotest: {
+ add: (a, b) => a + b,
+ callExport: testCallExport,
+ },
+ gojs: {
+ // Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
+ // may synchronously trigger a Go event handler. This makes Go code get executed in the middle of the imported
+ // function. A goroutine can switch to a new stack if the current stack is too small (see morestack function).
+ // This changes the SP, thus we have to update the SP used by the imported function.
+
+ // func wasmExit(code int32)
+ "runtime.wasmExit": (sp) => {
+ sp >>>= 0;
+ const code = this.mem.getInt32(sp + 8, true);
+ this.exited = true;
+ delete this._inst;
+ delete this._values;
+ delete this._goRefCounts;
+ delete this._ids;
+ delete this._idPool;
+ this.exit(code);
+ },
+
+ // func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)
+ "runtime.wasmWrite": (sp) => {
+ sp >>>= 0;
+ const fd = getInt64(sp + 8);
+ const p = getInt64(sp + 16);
+ const n = this.mem.getInt32(sp + 24, true);
+ fs.writeSync(fd, new Uint8Array(this._inst.exports.mem.buffer, p, n));
+ },
+
+ // func resetMemoryDataView()
+ "runtime.resetMemoryDataView": (sp) => {
+ sp >>>= 0;
+ this.mem = new DataView(this._inst.exports.mem.buffer);
+ },
+
+ // func nanotime1() int64
+ "runtime.nanotime1": (sp) => {
+ sp >>>= 0;
+ setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000);
+ },
+
+ // func walltime() (sec int64, nsec int32)
+ "runtime.walltime": (sp) => {
+ sp >>>= 0;
+ const msec = (new Date).getTime();
+ setInt64(sp + 8, msec / 1000);
+ this.mem.setInt32(sp + 16, (msec % 1000) * 1000000, true);
+ },
+
+ // func scheduleTimeoutEvent(delay int64) int32
+ "runtime.scheduleTimeoutEvent": (sp) => {
+ sp >>>= 0;
+ const id = this._nextCallbackTimeoutID;
+ this._nextCallbackTimeoutID++;
+ this._scheduledTimeouts.set(id, setTimeout(
+ () => {
+ this._resume();
+ while (this._scheduledTimeouts.has(id)) {
+ // for some reason Go failed to register the timeout event, log and try again
+ // (temporary workaround for https://github.com/golang/go/issues/28975)
+ console.warn("scheduleTimeoutEvent: missed timeout event");
+ this._resume();
+ }
+ },
+ getInt64(sp + 8),
+ ));
+ this.mem.setInt32(sp + 16, id, true);
+ },
+
+ // func clearTimeoutEvent(id int32)
+ "runtime.clearTimeoutEvent": (sp) => {
+ sp >>>= 0;
+ const id = this.mem.getInt32(sp + 8, true);
+ clearTimeout(this._scheduledTimeouts.get(id));
+ this._scheduledTimeouts.delete(id);
+ },
+
+ // func getRandomData(r []byte)
+ "runtime.getRandomData": (sp) => {
+ sp >>>= 0;
+ crypto.getRandomValues(loadSlice(sp + 8));
+ },
+
+ // func finalizeRef(v ref)
+ "syscall/js.finalizeRef": (sp) => {
+ sp >>>= 0;
+ const id = this.mem.getUint32(sp + 8, true);
+ this._goRefCounts[id]--;
+ if (this._goRefCounts[id] === 0) {
+ const v = this._values[id];
+ this._values[id] = null;
+ this._ids.delete(v);
+ this._idPool.push(id);
+ }
+ },
+
+ // func stringVal(value string) ref
+ "syscall/js.stringVal": (sp) => {
+ sp >>>= 0;
+ storeValue(sp + 24, loadString(sp + 8));
+ },
+
+ // func valueGet(v ref, p string) ref
+ "syscall/js.valueGet": (sp) => {
+ sp >>>= 0;
+ const result = Reflect.get(loadValue(sp + 8), loadString(sp + 16));
+ sp = this._inst.exports.getsp() >>> 0; // see comment above
+ storeValue(sp + 32, result);
+ },
+
+ // func valueSet(v ref, p string, x ref)
+ "syscall/js.valueSet": (sp) => {
+ sp >>>= 0;
+ Reflect.set(loadValue(sp + 8), loadString(sp + 16), loadValue(sp + 32));
+ },
+
+ // func valueDelete(v ref, p string)
+ "syscall/js.valueDelete": (sp) => {
+ sp >>>= 0;
+ Reflect.deleteProperty(loadValue(sp + 8), loadString(sp + 16));
+ },
+
+ // func valueIndex(v ref, i int) ref
+ "syscall/js.valueIndex": (sp) => {
+ sp >>>= 0;
+ storeValue(sp + 24, Reflect.get(loadValue(sp + 8), getInt64(sp + 16)));
+ },
+
+ // valueSetIndex(v ref, i int, x ref)
+ "syscall/js.valueSetIndex": (sp) => {
+ sp >>>= 0;
+ Reflect.set(loadValue(sp + 8), getInt64(sp + 16), loadValue(sp + 24));
+ },
+
+ // func valueCall(v ref, m string, args []ref) (ref, bool)
+ "syscall/js.valueCall": (sp) => {
+ sp >>>= 0;
+ try {
+ const v = loadValue(sp + 8);
+ const m = Reflect.get(v, loadString(sp + 16));
+ const args = loadSliceOfValues(sp + 32);
+ const result = Reflect.apply(m, v, args);
+ sp = this._inst.exports.getsp() >>> 0; // see comment above
+ storeValue(sp + 56, result);
+ this.mem.setUint8(sp + 64, 1);
+ } catch (err) {
+ sp = this._inst.exports.getsp() >>> 0; // see comment above
+ storeValue(sp + 56, err);
+ this.mem.setUint8(sp + 64, 0);
+ }
+ },
+
+ // func valueInvoke(v ref, args []ref) (ref, bool)
+ "syscall/js.valueInvoke": (sp) => {
+ sp >>>= 0;
+ try {
+ const v = loadValue(sp + 8);
+ const args = loadSliceOfValues(sp + 16);
+ const result = Reflect.apply(v, undefined, args);
+ sp = this._inst.exports.getsp() >>> 0; // see comment above
+ storeValue(sp + 40, result);
+ this.mem.setUint8(sp + 48, 1);
+ } catch (err) {
+ sp = this._inst.exports.getsp() >>> 0; // see comment above
+ storeValue(sp + 40, err);
+ this.mem.setUint8(sp + 48, 0);
+ }
+ },
+
+ // func valueNew(v ref, args []ref) (ref, bool)
+ "syscall/js.valueNew": (sp) => {
+ sp >>>= 0;
+ try {
+ const v = loadValue(sp + 8);
+ const args = loadSliceOfValues(sp + 16);
+ const result = Reflect.construct(v, args);
+ sp = this._inst.exports.getsp() >>> 0; // see comment above
+ storeValue(sp + 40, result);
+ this.mem.setUint8(sp + 48, 1);
+ } catch (err) {
+ sp = this._inst.exports.getsp() >>> 0; // see comment above
+ storeValue(sp + 40, err);
+ this.mem.setUint8(sp + 48, 0);
+ }
+ },
+
+ // func valueLength(v ref) int
+ "syscall/js.valueLength": (sp) => {
+ sp >>>= 0;
+ setInt64(sp + 16, parseInt(loadValue(sp + 8).length));
+ },
+
+ // valuePrepareString(v ref) (ref, int)
+ "syscall/js.valuePrepareString": (sp) => {
+ sp >>>= 0;
+ const str = encoder.encode(String(loadValue(sp + 8)));
+ storeValue(sp + 16, str);
+ setInt64(sp + 24, str.length);
+ },
+
+ // valueLoadString(v ref, b []byte)
+ "syscall/js.valueLoadString": (sp) => {
+ sp >>>= 0;
+ const str = loadValue(sp + 8);
+ loadSlice(sp + 16).set(str);
+ },
+
+ // func valueInstanceOf(v ref, t ref) bool
+ "syscall/js.valueInstanceOf": (sp) => {
+ sp >>>= 0;
+ this.mem.setUint8(sp + 24, (loadValue(sp + 8) instanceof loadValue(sp + 16)) ? 1 : 0);
+ },
+
+ // func copyBytesToGo(dst []byte, src ref) (int, bool)
+ "syscall/js.copyBytesToGo": (sp) => {
+ sp >>>= 0;
+ const dst = loadSlice(sp + 8);
+ const src = loadValue(sp + 32);
+ if (!(src instanceof Uint8Array || src instanceof Uint8ClampedArray)) {
+ this.mem.setUint8(sp + 48, 0);
+ return;
+ }
+ const toCopy = src.subarray(0, dst.length);
+ dst.set(toCopy);
+ setInt64(sp + 40, toCopy.length);
+ this.mem.setUint8(sp + 48, 1);
+ },
+
+ // func copyBytesToJS(dst ref, src []byte) (int, bool)
+ "syscall/js.copyBytesToJS": (sp) => {
+ sp >>>= 0;
+ const dst = loadValue(sp + 8);
+ const src = loadSlice(sp + 16);
+ if (!(dst instanceof Uint8Array || dst instanceof Uint8ClampedArray)) {
+ this.mem.setUint8(sp + 48, 0);
+ return;
+ }
+ const toCopy = src.subarray(0, dst.length);
+ dst.set(toCopy);
+ setInt64(sp + 40, toCopy.length);
+ this.mem.setUint8(sp + 48, 1);
+ },
+
+ "debug": (value) => {
+ console.log(value);
+ },
+ }
+ };
+ }
+
+ async run(instance) {
+ if (!(instance instanceof WebAssembly.Instance)) {
+ throw new Error("Go.run: WebAssembly.Instance expected");
+ }
+ this._inst = instance;
+ this.mem = new DataView(this._inst.exports.mem.buffer);
+ this._values = [ // JS values that Go currently has references to, indexed by reference id
+ NaN,
+ 0,
+ null,
+ true,
+ false,
+ globalThis,
+ this,
+ ];
+ this._goRefCounts = new Array(this._values.length).fill(Infinity); // number of references that Go has to a JS value, indexed by reference id
+ this._ids = new Map([ // mapping from JS values to reference ids
+ [0, 1],
+ [null, 2],
+ [true, 3],
+ [false, 4],
+ [globalThis, 5],
+ [this, 6],
+ ]);
+ this._idPool = []; // unused ids that have been garbage collected
+ this.exited = false; // whether the Go program has exited
+
+ // Pass command line arguments and environment variables to WebAssembly by writing them to the linear memory.
+ let offset = 4096;
+
+ const strPtr = (str) => {
+ const ptr = offset;
+ const bytes = encoder.encode(str + "\0");
+ new Uint8Array(this.mem.buffer, offset, bytes.length).set(bytes);
+ offset += bytes.length;
+ if (offset % 8 !== 0) {
+ offset += 8 - (offset % 8);
+ }
+ return ptr;
+ };
+
+ const argc = this.argv.length;
+
+ const argvPtrs = [];
+ this.argv.forEach((arg) => {
+ argvPtrs.push(strPtr(arg));
+ });
+ argvPtrs.push(0);
+
+ const keys = Object.keys(this.env).sort();
+ keys.forEach((key) => {
+ argvPtrs.push(strPtr(`${key}=${this.env[key]}`));
+ });
+ argvPtrs.push(0);
+
+ const argv = offset;
+ argvPtrs.forEach((ptr) => {
+ this.mem.setUint32(offset, ptr, true);
+ this.mem.setUint32(offset + 4, 0, true);
+ offset += 8;
+ });
+
+ // The linker guarantees global data starts from at least wasmMinDataAddr.
+ // Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr.
+ const wasmMinDataAddr = 4096 + 8192;
+ if (offset >= wasmMinDataAddr) {
+ throw new Error("total length of command line and environment variables exceeds limit");
+ }
+
+ this._inst.exports.run(argc, argv);
+ if (this.exited) {
+ this._resolveExitPromise();
+ }
+ await this._exitPromise;
+ }
+
+ _resume() {
+ if (this.exited) {
+ throw new Error("Go program has already exited");
+ }
+ this._inst.exports.resume();
+ if (this.exited) {
+ this._resolveExitPromise();
+ }
+ }
+
+ _makeFuncWrapper(id) {
+ const go = this;
+ return function () {
+ const event = { id: id, this: this, args: arguments };
+ go._pendingEvent = event;
+ go._resume();
+ return event.result;
+ };
+ }
+ }
+})();
diff --git a/go.mod b/go.mod
index 508115eb1b7..194c6ca4316 100644
--- a/go.mod
+++ b/go.mod
@@ -2,181 +2,198 @@ module github.com/ethersphere/bee/v2
go 1.24.0
-toolchain go1.24.2
-
require (
contrib.go.opencensus.io/exporter/prometheus v0.4.2
github.com/armon/go-radix v1.0.0
- github.com/btcsuite/btcd/btcec/v2 v2.3.2
- github.com/coreos/go-semver v0.3.0
- github.com/ethereum/go-ethereum v1.14.13
- github.com/ethersphere/batch-archive v0.0.3
+ github.com/btcsuite/btcd/btcec/v2 v2.3.5
+ github.com/coreos/go-semver v0.3.1
+ github.com/ethereum/go-ethereum v1.16.1
github.com/ethersphere/go-price-oracle-abi v0.6.9
- github.com/ethersphere/go-storage-incentives-abi v0.9.3-rc3
+ github.com/ethersphere/go-storage-incentives-abi v0.9.4
github.com/ethersphere/go-sw3-abi v0.6.9
github.com/ethersphere/langos v1.0.0
- github.com/go-playground/validator/v10 v10.11.1
+ github.com/go-playground/validator/v10 v10.27.0
github.com/gogo/protobuf v1.3.2
- github.com/google/go-cmp v0.6.0
- github.com/google/uuid v1.4.0
- github.com/gorilla/handlers v1.4.2
- github.com/gorilla/mux v1.8.0
- github.com/gorilla/websocket v1.5.1
+ github.com/google/go-cmp v0.7.0
+ github.com/google/uuid v1.6.0
+ github.com/gorilla/handlers v1.5.2
+ github.com/gorilla/mux v1.8.1
+ github.com/gorilla/websocket v1.5.3
github.com/hashicorp/go-multierror v1.1.1
- github.com/hashicorp/golang-lru/v2 v2.0.5
- github.com/ipfs/go-cid v0.4.1
- github.com/kardianos/service v1.2.2
- github.com/klauspost/reedsolomon v1.11.8
- github.com/libp2p/go-libp2p v0.33.2
- github.com/multiformats/go-multiaddr v0.12.3
- github.com/multiformats/go-multiaddr-dns v0.3.1
+ github.com/hashicorp/golang-lru/v2 v2.0.7
+ github.com/ipfs/go-cid v0.5.0
+ github.com/kardianos/service v1.2.4
+ github.com/klauspost/reedsolomon v1.12.5
+ github.com/libp2p/go-libp2p v0.42.1
+ github.com/multiformats/go-multiaddr v0.16.0
+ github.com/multiformats/go-multiaddr-dns v0.4.1
github.com/multiformats/go-multihash v0.2.3
- github.com/multiformats/go-multistream v0.5.0
+ github.com/multiformats/go-multistream v0.6.1
+ github.com/nlepage/go-wasm-http-server/v2 v2.2.1
github.com/opentracing/opentracing-go v1.2.0
- github.com/prometheus/client_golang v1.21.1
- github.com/spf13/afero v1.6.0
- github.com/spf13/cobra v1.5.0
- github.com/spf13/viper v1.7.0
+ github.com/prometheus/client_golang v1.23.0
+ github.com/spf13/afero v1.14.0
+ github.com/spf13/cobra v1.9.1
+ github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
- github.com/uber/jaeger-client-go v2.24.0+incompatible
- github.com/vmihailenco/msgpack/v5 v5.3.4
- github.com/wealdtech/go-ens/v3 v3.5.1
+ github.com/talentlessguy/go-libp2p-wasmws v0.0.0-20250511171608-cdbf28c9b557
+ github.com/uber/jaeger-client-go v2.30.0+incompatible
+ github.com/vmihailenco/msgpack/v5 v5.4.1
+ github.com/wealdtech/go-ens/v3 v3.6.0
gitlab.com/nolash/go-mockbytes v0.0.7
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
- golang.org/x/crypto v0.36.0
- golang.org/x/net v0.38.0
- golang.org/x/sync v0.12.0
- golang.org/x/sys v0.31.0
- golang.org/x/term v0.30.0
- golang.org/x/time v0.5.0
- gopkg.in/yaml.v2 v2.4.0
- resenje.org/feed v0.1.2
- resenje.org/multex v0.1.0
- resenje.org/singleflight v0.4.0
- resenje.org/web v0.4.3
+ golang.org/x/crypto v0.40.0
+ golang.org/x/net v0.42.0
+ golang.org/x/sync v0.16.0
+ golang.org/x/sys v0.34.0
+ golang.org/x/term v0.33.0
+ golang.org/x/time v0.12.0
+ resenje.org/feed v0.1.3
+ resenje.org/multex v0.2.0
+ resenje.org/singleflight v0.4.3
+ resenje.org/web v0.9.7
+)
+
+require (
+ github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
+ github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect
+ github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect
+ github.com/ethereum/c-kzg-4844/v2 v2.1.1 // indirect
+ github.com/ethereum/go-verkle v0.2.2 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/filecoin-project/go-clock v0.1.0 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.9 // indirect
+ github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
+ github.com/hack-pad/safejs v0.1.1 // indirect
+ github.com/libp2p/go-yamux/v5 v5.1.0 // indirect
+ github.com/nlepage/go-js-promise v1.1.0 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.4 // indirect
+ github.com/pion/dtls/v3 v3.0.6 // indirect
+ github.com/pion/ice/v4 v4.0.10 // indirect
+ github.com/pion/mdns/v2 v2.0.7 // indirect
+ github.com/pion/srtp/v3 v3.0.6 // indirect
+ github.com/pion/stun/v3 v3.0.0 // indirect
+ github.com/pion/turn/v4 v4.0.2 // indirect
+ github.com/pion/webrtc/v4 v4.1.3 // indirect
+ github.com/sagikazarmark/locafero v0.10.0 // indirect
+ github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
+ github.com/yusufpapurcu/wmi v1.2.4 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
)
require (
- github.com/BurntSushi/toml v1.1.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
- github.com/StackExchange/wmi v1.2.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/bits-and-blooms/bitset v1.13.0 // indirect
- github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
+ github.com/bits-and-blooms/bitset v1.22.0 // indirect
+ github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/codahale/hdrhistogram v0.0.0-00010101000000-000000000000 // indirect
- github.com/consensys/bavard v0.1.13 // indirect
- github.com/consensys/gnark-crypto v0.12.1 // indirect
- github.com/containerd/cgroups v1.1.0 // indirect
- github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect
- github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
+ github.com/coder/websocket v1.8.13 // indirect
+ github.com/consensys/bavard v0.2.1 // indirect
+ github.com/consensys/gnark-crypto v0.18.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
- github.com/deckarep/golang-set/v2 v2.6.0 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
- github.com/docker/go-units v0.5.0 // indirect
- github.com/elastic/gosigar v0.14.2 // indirect
- github.com/ethereum/c-kzg-4844 v1.0.0 // indirect
- github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 // indirect
+ github.com/deckarep/golang-set/v2 v2.8.0 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/felixge/fgprof v0.9.5
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
- github.com/fsnotify/fsnotify v1.6.0 // indirect
- github.com/go-kit/log v0.2.1 // indirect
- github.com/go-logfmt/logfmt v0.5.1 // indirect
+ github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
- github.com/go-playground/locales v0.14.0 // indirect
- github.com/go-playground/universal-translator v0.18.0 // indirect
- github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
- github.com/godbus/dbus/v5 v5.1.0 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
+ github.com/go-playground/locales v0.14.1 // indirect
+ github.com/go-playground/universal-translator v0.18.1 // indirect
+ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
+ github.com/golang/snappy v1.0.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
- github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
- github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect
- github.com/hashicorp/errwrap v1.0.0 // indirect
- github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/holiman/uint256 v1.3.1 // indirect
+ github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/holiman/uint256 v1.3.2 // indirect
github.com/huin/goupnp v1.3.0 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/ipfs/go-log/v2 v2.5.1 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/ipfs/go-log/v2 v2.8.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
- github.com/klauspost/compress v1.17.11 // indirect
- github.com/klauspost/cpuid/v2 v2.2.7 // indirect
- github.com/koron/go-ssdp v0.0.4 // indirect
- github.com/leodido/go-urn v1.2.1 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
+ github.com/klauspost/cpuid/v2 v2.3.0 // indirect
+ github.com/koron/go-ssdp v0.0.6 // indirect
+ github.com/leodido/go-urn v1.4.0 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
- github.com/libp2p/go-flow-metrics v0.1.0 // indirect
+ github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
- github.com/libp2p/go-nat v0.2.0 // indirect
- github.com/libp2p/go-netroute v0.2.1 // indirect
+ github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
- github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
- github.com/magiconair/properties v1.8.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/miekg/dns v1.1.58 // indirect
+ github.com/miekg/dns v1.1.68 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
- github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
- github.com/multiformats/go-multicodec v0.9.0 // indirect
+ github.com/multiformats/go-multicodec v0.9.2 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/onsi/ginkgo/v2 v2.15.0 // indirect
- github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
- github.com/pelletier/go-toml v1.8.0 // indirect
+ github.com/pion/datachannel v1.5.10 // indirect
+ github.com/pion/dtls/v2 v2.2.12 // indirect
+ github.com/pion/interceptor v0.1.40 // indirect
+ github.com/pion/logging v0.2.4 // indirect
+ github.com/pion/randutil v0.1.0 // indirect
+ github.com/pion/rtcp v1.2.15 // indirect
+ github.com/pion/rtp v1.8.21 // indirect
+ github.com/pion/sctp v1.8.39 // indirect
+ github.com/pion/sdp/v3 v3.0.15 // indirect
+ github.com/pion/stun v0.6.1 // indirect
+ github.com/pion/transport/v2 v2.2.10 // indirect
+ github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.62.0
- github.com/prometheus/procfs v0.15.1 // indirect
- github.com/prometheus/statsd_exporter v0.22.7 // indirect
- github.com/quic-go/qpack v0.4.0 // indirect
- github.com/quic-go/quic-go v0.42.0 // indirect
- github.com/quic-go/webtransport-go v0.6.0 // indirect
- github.com/raulk/go-watchdog v1.3.0 // indirect
- github.com/shirou/gopsutil v3.21.5+incompatible // indirect
- github.com/smartystreets/assertions v1.1.1 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.65.0
+ github.com/prometheus/procfs v0.17.0 // indirect
+ github.com/prometheus/statsd_exporter v0.28.0 // indirect
+ github.com/quic-go/qpack v0.5.1 // indirect
+ github.com/quic-go/quic-go v0.54.0 // indirect
+ github.com/quic-go/webtransport-go v0.9.0 // indirect
+ github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
- github.com/spf13/cast v1.3.0 // indirect
- github.com/spf13/jwalterweatherman v1.0.0 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
- github.com/subosito/gotenv v1.2.0 // indirect
- github.com/supranational/blst v0.3.13 // indirect
- github.com/tklauser/go-sysconf v0.3.12 // indirect
- github.com/tklauser/numcpus v0.6.1 // indirect
- github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
+ github.com/spf13/cast v1.9.2 // indirect
+ github.com/spf13/pflag v1.0.7 // indirect
+ github.com/subosito/gotenv v1.6.0 // indirect
+ github.com/supranational/blst v0.3.15 // indirect
+ github.com/tklauser/go-sysconf v0.3.15 // indirect
+ github.com/tklauser/numcpus v0.10.0 // indirect
+ github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wealdtech/go-multicodec v1.4.0 // indirect
+ github.com/wlynxg/anet v0.0.5 // indirect
go.opencensus.io v0.24.0 // indirect
- go.uber.org/dig v1.17.1 // indirect
- go.uber.org/fx v1.20.1 // indirect
- go.uber.org/mock v0.4.0 // indirect
+ go.uber.org/dig v1.19.0 // indirect
+ go.uber.org/fx v1.24.0 // indirect
+ go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
- golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
- golang.org/x/mod v0.17.0 // indirect
- golang.org/x/text v0.23.0 // indirect
- golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
- google.golang.org/protobuf v1.36.1 // indirect
- gopkg.in/ini.v1 v1.57.0 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
- lukechampine.com/blake3 v1.2.1 // indirect
+ golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
+ golang.org/x/mod v0.26.0 // indirect
+ golang.org/x/text v0.27.0 // indirect
+ golang.org/x/tools v0.35.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
+ gopkg.in/yaml.v3 v3.0.1
+ lukechampine.com/blake3 v1.4.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
replace github.com/codahale/hdrhistogram => github.com/HdrHistogram/hdrhistogram-go v0.0.0-20200919145931-8dac23c8dac1
+
+replace github.com/syndtr/goleveldb => ../goleveldb
+
+replace github.com/libp2p/go-libp2p => ../go-libp2p
+
+// replace github.com/ethereum/go-ethereum => ../go-ethereum
diff --git a/go.sum b/go.sum
index 700d78dfb2f..d47debea131 100644
--- a/go.sum
+++ b/go.sum
@@ -3,13 +3,11 @@ cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
@@ -23,10 +21,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -36,7 +32,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
@@ -45,110 +40,49 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
-github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
-github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
-github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
-github.com/HdrHistogram/hdrhistogram-go v0.0.0-20200919145931-8dac23c8dac1 h1:nEjGZtKHMK92888VT6XkzKwyiW14v5FFRGeWq2uV7N0=
-github.com/HdrHistogram/hdrhistogram-go v0.0.0-20200919145931-8dac23c8dac1/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
+github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
+github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
-github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
-github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
-github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
-github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
-github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
-github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo=
-github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y=
-github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8=
-github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4=
-github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0=
-github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM=
-github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
-github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
-github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
+github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=
+github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
-github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
-github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
-github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
-github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
-github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
-github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
-github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
-github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
-github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
-github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
-github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
-github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
-github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
-github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
+github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
+github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
+github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU=
+github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
-github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
-github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
-github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
@@ -158,9 +92,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
@@ -174,108 +106,95 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
-github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
-github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
-github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
-github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
-github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
-github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
-github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
-github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
+github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
+github.com/consensys/bavard v0.1.30 h1:wwAj9lSnMLFXjEclKwyhf7Oslg8EoaFz9u1QGgt0bsk=
+github.com/consensys/bavard v0.1.30/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
+github.com/consensys/bavard v0.2.1 h1:i2/ZeLXpp7eblPWzUIWf+dtfBocKQIxuiqy9XZlNSfQ=
+github.com/consensys/bavard v0.2.1/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
+github.com/consensys/gnark-crypto v0.17.0 h1:vKDhZMOrySbpZDCvGMOELrHFv/A9mJ7+9I8HEfRZSkI=
+github.com/consensys/gnark-crypto v0.17.0/go.mod h1:A2URlMHUT81ifJ0UlLzSlm7TmnE3t7VxEThApdMukJw=
+github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0=
+github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
-github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I=
-github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
-github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
-github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
+github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI=
+github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
+github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg=
+github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM=
+github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
+github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
-github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
-github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
-github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
-github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
-github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
-github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
-github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
-github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
-github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
+github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ=
+github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
+github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
+github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
+github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0=
+github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
-github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
-github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
-github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA=
-github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
-github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg=
-github.com/ethereum/go-ethereum v1.14.13 h1:L81Wmv0OUP6cf4CW6wtXsr23RUrDhKs2+Y9Qto+OgHU=
-github.com/ethereum/go-ethereum v1.14.13/go.mod h1:RAC2gVMWJ6FkxSPESfbshrcKpIokgQKsVKmAuqdekDY=
-github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A=
-github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
-github.com/ethersphere/batch-archive v0.0.3 h1:rAzvixdDkxLV5A6XdbG3uxts8ciJ+1ShgZUKE2+qsqI=
-github.com/ethersphere/batch-archive v0.0.3/go.mod h1:41BPb192NoK9CYjNB8BAE1J2MtiI/5aq0Wtas5O7A7Q=
+github.com/ethereum/c-kzg-4844/v2 v2.1.1 h1:KhzBVjmURsfr1+S3k/VE35T02+AW2qU9t9gr4R6YpSo=
+github.com/ethereum/c-kzg-4844/v2 v2.1.1/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E=
+github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU=
+github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI=
+github.com/ethereum/go-ethereum v1.16.1 h1:7684NfKCb1+IChudzdKyZJ12l1Tq4ybPZOITiCDXqCk=
+github.com/ethereum/go-ethereum v1.16.1/go.mod h1:ngYIvmMAYdo4sGW9cGzLvSsPGhDOOzL0jK5S5iXpj0g=
+github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
+github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
+github.com/ethersphere/go-price-oracle-abi v0.6.8 h1:23Y0msO4ZRvB9o1NRdFDd0eewlnx37XxQm2DKbL6Qk8=
+github.com/ethersphere/go-price-oracle-abi v0.6.8/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk=
github.com/ethersphere/go-price-oracle-abi v0.6.9 h1:bseen6he3PZv5GHOm+KD6s4awaFmVSD9LFx+HpB6rCU=
github.com/ethersphere/go-price-oracle-abi v0.6.9/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk=
github.com/ethersphere/go-storage-incentives-abi v0.9.3-rc3 h1:uuowc0ekcipVwYkn1Rud9LySZ094hrDq2/YfRKyjbbQ=
github.com/ethersphere/go-storage-incentives-abi v0.9.3-rc3/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc=
+github.com/ethersphere/go-storage-incentives-abi v0.9.4 h1:mSIWXQXg5OQmH10QvXMV5w0vbSibFMaRlBL37gPLTM0=
+github.com/ethersphere/go-storage-incentives-abi v0.9.4/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc=
+github.com/ethersphere/go-sw3-abi v0.6.5 h1:M5dcIe1zQYvGpY2K07UNkNU9Obc4U+A1fz68Ho/Q+XE=
+github.com/ethersphere/go-sw3-abi v0.6.5/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU=
github.com/ethersphere/go-sw3-abi v0.6.9 h1:TnWLnYkWE5UvC17mQBdUmdkzhPhO8GcqvWy4wvd1QJQ=
github.com/ethersphere/go-sw3-abi v0.6.9/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU=
github.com/ethersphere/langos v1.0.0 h1:NBtNKzXTTRSue95uOlzPN4py7Aofs0xWPzyj4AI1Vcc=
github.com/ethersphere/langos v1.0.0/go.mod h1:dlcN2j4O8sQ+BlCaxeBu43bgr4RQ+inJ+pHwLeZg5Tw=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
-github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk=
+github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
+github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU=
+github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
+github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
+github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
-github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
-github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -284,58 +203,52 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
-github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
-github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
-github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
-github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
-github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
-github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
-github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
-github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
+github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
+github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
+github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
+github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
-github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
-github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -360,17 +273,10 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
-github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
-github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
+github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -378,17 +284,15 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
@@ -402,153 +306,107 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
+github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
-github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
-github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
-github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
-github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
+github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
+github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
+github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hack-pad/safejs v0.1.1 h1:d5qPO0iQ7h2oVtpzGnLExE+Wn9AtytxIfltcS2b9KD8=
+github.com/hack-pad/safejs v0.1.1/go.mod h1:HdS+bKF1NrE72VoXZeWzxFOVQVUSqZJAG0xNCnb+Tio=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
-github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
-github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
-github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
-github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs=
-github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo=
+github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
+github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
-github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
-github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
-github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
-github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
-github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
-github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
-github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
-github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
-github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
-github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
-github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
-github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
-github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
-github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
+github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
+github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=
+github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=
+github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
+github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
+github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
+github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
+github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
+github.com/ipfs/go-log/v2 v2.8.0 h1:SptNTPJQV3s5EF4FdrTu/yVdOKfGbDgn1EBZx4til2o=
+github.com/ipfs/go-log/v2 v2.8.0/go.mod h1:2LEEhdv8BGubPeSFTyzbqhCqrwqxCbuTNTLWqgNAipo=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
-github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
-github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
-github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
-github.com/karalabe/usb v0.0.0-20210518091819-4ea20957c210/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
-github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kardianos/service v1.2.4 h1:XNlGtZOYNx2u91urOdg/Kfmc+gfmuIo1Dd3rEi2OgBk=
+github.com/kardianos/service v1.2.4/go.mod h1:E4V9ufUuY82F7Ztlu1eN9VXWIQxg8NoLQlmFe0MtrXc=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
-github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
-github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
-github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
-github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
-github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
-github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY=
-github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
+github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA=
+github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU=
+github.com/klauspost/reedsolomon v1.12.5 h1:4cJuyH926If33BeDgiZpI5OU0pE+wUHZvMSyNGqN73Y=
+github.com/klauspost/reedsolomon v1.12.5/go.mod h1:LkXRjLYGM8K/iQfujYnaPeDmhZLqkrGUyG9p7zs5L68=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
-github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
+github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -558,71 +416,48 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
-github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
+github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
+github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
-github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
-github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
-github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
-github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
-github.com/libp2p/go-libp2p v0.33.2 h1:vCdwnFxoGOXMKmaGHlDSnL4bM3fQeW8pgIa9DECnb40=
-github.com/libp2p/go-libp2p v0.33.2/go.mod h1:zTeppLuCvUIkT118pFVzA8xzP/p2dJYOMApCkFh0Yww=
+github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
+github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
+github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
+github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
-github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
-github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
-github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
-github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
-github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
+github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
+github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
-github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
-github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
-github.com/lucas-clemente/quic-go v0.15.2/go.mod h1:qxmO5Y4ZMhdNkunGfxuZnZXnJwYpW9vjQkyrZ7BsgUI=
+github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
+github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
+github.com/libp2p/go-yamux/v5 v5.1.0 h1:8Qlxj4E9JGJAQVW6+uj2o7mqkqsIVlSUGmTWhlXzoHE=
+github.com/libp2p/go-yamux/v5 v5.1.0/go.mod h1:tgIQ07ObtRR/I0IWsFOyQIL9/dR5UXgc2s8xKmNZv1o=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
-github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
-github.com/marten-seemann/qtls v0.8.0/go.mod h1:Lao6jDqlCfxyLKYFmZXGm2LSHBgVn+P+ROOex6YkT+k=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
-github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
-github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
+github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
+github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
+github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
+github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@@ -631,16 +466,8 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
-github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
@@ -653,187 +480,186 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
-github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
-github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
-github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
-github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
-github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
-github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y=
-github.com/multiformats/go-multiaddr v0.3.2/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0=
-github.com/multiformats/go-multiaddr v0.12.3 h1:hVBXvPRcKG0w80VinQ23P5t7czWgg65BmIvQKjDydU8=
-github.com/multiformats/go-multiaddr v0.12.3/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
-github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
-github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
+github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
+github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
+github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
-github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
-github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
-github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
+github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
+github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
+github.com/multiformats/go-multicodec v0.9.2 h1:YrlXCuqxjqm3bXl+vBq5LKz5pz4mvAsugdqy78k0pXQ=
+github.com/multiformats/go-multicodec v0.9.2/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
-github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
-github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
-github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
-github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
-github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
-github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
-github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
-github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
+github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
-github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nlepage/go-js-promise v1.1.0 h1:BfvywsIMo4cpNOKyoReBWkxEW8f9HMwXqGc45wEKPRs=
+github.com/nlepage/go-js-promise v1.1.0/go.mod h1:bdOP0wObXu34euibyK39K1hoBCtlgTKXGc56AGflaRo=
+github.com/nlepage/go-wasm-http-server/v2 v2.2.1 h1:4tzhSb3HKQ3Ykt2TPfqEnmcPfw8n1E8agv4OzAyckr8=
+github.com/nlepage/go-wasm-http-server/v2 v2.2.1/go.mod h1:r8j7cEOeUqNp+c+C52sNuWaFTvvT/cNqIwBuEtA36HA=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
-github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
-github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
-github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
-github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
+github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
+github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
+github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw=
-github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs=
-github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
+github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
+github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
+github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
-github.com/peterh/liner v1.2.1/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
-github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
+github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
+github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
+github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
+github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
+github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
+github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
+github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
+github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
+github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
+github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
+github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
+github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
+github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
+github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
+github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
+github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
+github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
+github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
+github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
+github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
+github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
+github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
+github.com/pion/rtp v1.8.21 h1:3yrOwmZFyUpcIosNcWRpQaU+UXIJ6yxLuJ8Bx0mw37Y=
+github.com/pion/rtp v1.8.21/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
+github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
+github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
+github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
+github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
+github.com/pion/sdp/v3 v3.0.15 h1:F0I1zds+K/+37ZrzdADmx2Q44OFDOPRLhPnNTaUX9hk=
+github.com/pion/sdp/v3 v3.0.15/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
+github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
+github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
+github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
+github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
+github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0=
+github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ=
+github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
+github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
+github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
+github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
+github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
+github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
+github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
+github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
+github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
+github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
+github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
+github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
+github.com/pion/webrtc/v4 v4.1.3 h1:YZ67Boj9X/hk190jJZ8+HFGQ6DqSZ/fYP3sLAZv7c3c=
+github.com/pion/webrtc/v4 v4.1.3/go.mod h1:rsq+zQ82ryfR9vbb0L1umPJ6Ogq7zm8mcn9fcGnxomM=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
-github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
-github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
+github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
+github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
+github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
+github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
+github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
-github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
-github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
-github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM=
-github.com/quic-go/quic-go v0.42.0/go.mod h1:132kz4kL3F9vxhW3CtQJLDVwcFe5wdWeJXXijhsO57M=
-github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
-github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
-github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
-github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
-github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
+github.com/prometheus/statsd_exporter v0.28.0 h1:S3ZLyLm/hOKHYZFOF0h4zYmd0EeKyPF9R1pFBYXUgYY=
+github.com/prometheus/statsd_exporter v0.28.0/go.mod h1:Lq41vNkMLfiPANmI+uHb5/rpFFUTxPXiiNpmsAYLvDI=
+github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
+github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
+github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
+github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
+github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
+github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
-github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
-github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
+github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
+github.com/sagikazarmark/locafero v0.10.0 h1:FM8Cv6j2KqIhM2ZK7HZjm4mpj9NBktLgowT1aN9q5Cc=
+github.com/sagikazarmark/locafero v0.10.0/go.mod h1:Ieo3EUsjifvQu4NZwV5sPd4dwvu0OCgEQV7vjc9yDjw=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc=
-github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
+github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
@@ -854,46 +680,39 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v1.1.1 h1:T/YLemO5Yp7KPzS+lVtu+WsHn8yoSwTfItdAd1r3cck=
-github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
+github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
-github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
-github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
-github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
+github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
+github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
+github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
+github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE=
+github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
+github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
+github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -902,65 +721,56 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
-github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
-github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
-github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
-github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo=
+github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
+github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
+github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
+github.com/talentlessguy/go-libp2p-wasmws v0.0.0-20250511171608-cdbf28c9b557 h1:g8/2rrNAnmIb3My8fo+d6Nk7YB8vKFfL0UP+rziEzuU=
+github.com/talentlessguy/go-libp2p-wasmws v0.0.0-20250511171608-cdbf28c9b557/go.mod h1:VMDhyipoXH+ISg5oyGayEJlu4Ecgy2N5YJFjjFfT9Uw=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
-github.com/tdewolff/minify/v2 v2.7.3/go.mod h1:BkDSm8aMMT0ALGmpt7j3Ra7nLUgZL0qhyrAHXwxcy5w=
-github.com/tdewolff/parse/v2 v2.4.2/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho=
-github.com/tdewolff/test v1.0.6/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
-github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
-github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
-github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
-github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
-github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
-github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
-github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
-github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
-github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
-github.com/uber/jaeger-client-go v2.24.0+incompatible h1:CGchgJcHsDd2jWnaL4XngByMrXoGHh3n8oCqAKx0uMo=
-github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
-github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
-github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
-github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
+github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
+github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
+github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
+github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
+github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
+github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
+github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
+github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
-github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2ellBfvnqc=
-github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
+github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
+github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
-github.com/wealdtech/go-ens/v3 v3.5.1 h1:0VqkCjIGfIVdwHIf2QqYWWt3bbR1UE7RwBGx7YPpufQ=
-github.com/wealdtech/go-ens/v3 v3.5.1/go.mod h1:bVuYoWYEEeEu7Zy95rIMjPR34QFJarxt8p84ywSo0YM=
+github.com/wealdtech/go-ens/v3 v3.6.0 h1:EAByZlHRQ3vxqzzwNi0GvEq1AjVozfWO4DMldHcoVg8=
+github.com/wealdtech/go-ens/v3 v3.6.0/go.mod h1:hcmMr9qPoEgVSEXU2Bwzrn/9NczTWZ1rE53jIlqUpzw=
github.com/wealdtech/go-multicodec v1.4.0 h1:iq5PgxwssxnXGGPTIK1srvt6U5bJwIp7k6kBrudIWxg=
github.com/wealdtech/go-multicodec v1.4.0/go.mod h1:aedGMaTeYkIqi/KCPre1ho5rTb3hGpu/snBOS3GQLw4=
-github.com/wealdtech/go-string2eth v1.1.0 h1:USJQmysUrBYYmZs7d45pMb90hRSyEwizP7lZaOZLDAw=
-github.com/wealdtech/go-string2eth v1.1.0/go.mod h1:RUzsLjJtbZaJ/3UKn9kY19a/vCCUHtEWoUW3uiK6yGU=
-github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
-github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
-github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
+github.com/wealdtech/go-string2eth v1.2.1 h1:u9sofvGFkp+uvTg4Nvsvy5xBaiw8AibGLLngfC4F76g=
+github.com/wealdtech/go-string2eth v1.2.1/go.mod h1:9uwxm18zKZfrReXrGIbdiRYJtbE91iGcj6TezKKEx80=
+github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
+github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
+github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
+github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
gitlab.com/nolash/go-mockbytes v0.0.7 h1:9XVFpEfY67kGBVJve3uV19kzqORdlo7V+q09OE6Yo54=
gitlab.com/nolash/go-mockbytes v0.0.7/go.mod h1:KKOpNTT39j2Eo+P6uUTOncntfeKY6AFh/2CxuD5MpgE=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@@ -970,56 +780,44 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
-go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
-go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk=
-go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg=
-go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
+go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
+go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
+go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
-go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
+go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
-golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200320181102-891825fb96df/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
+golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
+golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1033,8 +831,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
-golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
+golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
+golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -1057,23 +857,20 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
+golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -1090,30 +887,31 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
-golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
+golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1136,21 +934,18 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
-golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181031143558-9b800f95dbbc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1159,21 +954,14 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1183,79 +971,77 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
+golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
+golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
+golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
+golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
+golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -1265,7 +1051,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1273,7 +1058,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1294,18 +1078,18 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
+golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
+golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
-gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
@@ -1344,7 +1128,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -1352,7 +1135,6 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
@@ -1400,45 +1182,29 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
-google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
-gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
-gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
-gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1448,24 +1214,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
-lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
-lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
-resenje.org/daemon v0.1.2/go.mod h1:mF5JRpH3EbrxI9WoeKY78e6PqSsbBtX9jAQL5vj/GBA=
-resenje.org/email v0.1.3/go.mod h1:OhAVLRG3vqd9NSgayN3pAgzxTmc2B6mAefgShZvEgf0=
+lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
+lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
resenje.org/feed v0.1.2 h1:3OianQkoI4EalWx1SlzHtGjUMsoB4XTJQbeehWiyeFI=
resenje.org/feed v0.1.2/go.mod h1:ABlv4P3svuZY3dkZq3un+XIEoX+TDwbGEkjLcSP8TnM=
-resenje.org/jsonhttp v0.2.0/go.mod h1:EDyeguyTWj2fU3D3SCE0qNTgthzyEkHYLM1uu0uikHU=
-resenje.org/logging v0.1.5/go.mod h1:1IdoCm3+UwYfsplxDGV2pHCkUrLlQzlWwp4r28XfPx4=
-resenje.org/marshal v0.1.1/go.mod h1:P7Cla6Ju5CFvW4Y8JbRgWX1Hcy4L1w4qcCsyadO7G94=
-resenje.org/multex v0.1.0 h1:am9Ndt8dIAeGVaztD8ClsSX+e0EP3mj6UdsvjukKZig=
-resenje.org/multex v0.1.0/go.mod h1:3rHOoMrzqLNzgGWPcl/1GfzN52g7iaPXhbvTQ8TjGaM=
-resenje.org/recovery v0.1.1/go.mod h1:3S6aCVKMJEWsSAb61oZTteaiqkIfQPTr1RdiWnRbhME=
-resenje.org/singleflight v0.4.0 h1:NdOEhCxEikK2S2WxGjZV9EGSsItolQKslOOi6pE1tJc=
-resenje.org/singleflight v0.4.0/go.mod h1:lAgQK7VfjG6/pgredbQfmV0RvG/uVhKo6vSuZ0vCWfk=
-resenje.org/web v0.4.3 h1:G9vceKKGvsVg0WpyafJEEMHfstoxSO8rG/1Bo7fOkhw=
-resenje.org/web v0.4.3/go.mod h1:GZw/Jt7IGIYlytsyGdAV5CytZnaQu7GV2u1LLuViihc=
-resenje.org/x v0.2.4/go.mod h1:1b2Xpo29FRc3IMvg/u46/IyjySl5IjvtuSjXTA/AOnk=
+resenje.org/feed v0.1.3 h1:xKJ4wjw7FUl/KVnUfYcGJbDH2aHnaIPTM8NFRaWOIww=
+resenje.org/feed v0.1.3/go.mod h1:ABlv4P3svuZY3dkZq3un+XIEoX+TDwbGEkjLcSP8TnM=
+resenje.org/multex v0.2.0 h1:y1S8+bItGZo0lberxtQi9IhbWTpvRezhCWIFvt12VmU=
+resenje.org/multex v0.2.0/go.mod h1:z+E+cUHGTgpqYn+P3yFOnC92i3X7rStzSur4rjOZM9s=
+resenje.org/singleflight v0.4.3 h1:l7foFYg8X/VEHPxWs1K/Pw77807RMVzvXgWGb0J1sdM=
+resenje.org/singleflight v0.4.3/go.mod h1:lAgQK7VfjG6/pgredbQfmV0RvG/uVhKo6vSuZ0vCWfk=
+resenje.org/web v0.9.7 h1:a/bBtJk67ahwrfjkJZtUKlE1us49lPlxgtOociRZtaQ=
+resenje.org/web v0.9.7/go.mod h1:sg/qZ4KJcBbqbj7VkRhbE7rXOCugk/l8hpqgFr2WKCE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
diff --git a/packaging/bee.yaml b/packaging/bee.yaml
index 8176dfb47df..9c3bd436f29 100644
--- a/packaging/bee.yaml
+++ b/packaging/bee.yaml
@@ -49,7 +49,7 @@ data-dir: "/var/lib/bee"
## P2P listen address
# p2p-addr: :1634
## enable P2P WebSocket transport
-# p2p-ws-enable: false
+# p2p-ws-enable: true
## password for decrypting keys
# password: ""
## path to a file that contains password for decrypting keys
@@ -78,8 +78,6 @@ password-file: "/var/lib/bee/password"
# resolver-options: []
## forces the node to resync postage contract data
# resync: false
-## skip postage snapshot
-# skip-postage-snapshot: false
## staking contract address
# staking-address: ""
## lru memory caching capacity in number of statestore entries
@@ -117,4 +115,4 @@ password-file: "/var/lib/bee/password"
## send a welcome message string during handshakes
# welcome-message: ""
## withdrawal target addresses
-# withdrawal-addresses-whitelist: []
+# withdrawal-addresses-whitelist: []
\ No newline at end of file
diff --git a/packaging/homebrew-amd64/bee.yaml b/packaging/homebrew-amd64/bee.yaml
index c771e5d72c2..0ebaf0a72f3 100644
--- a/packaging/homebrew-amd64/bee.yaml
+++ b/packaging/homebrew-amd64/bee.yaml
@@ -49,7 +49,7 @@ data-dir: "/usr/local/var/lib/swarm-bee"
## P2P listen address
# p2p-addr: :1634
## enable P2P WebSocket transport
-# p2p-ws-enable: false
+# p2p-ws-enable: true
## password for decrypting keys
# password: ""
## path to a file that contains password for decrypting keys
@@ -78,8 +78,6 @@ password-file: "/usr/local/var/lib/swarm-bee/password"
# resolver-options: []
## forces the node to resync postage contract data
# resync: false
-## skip postage snapshot
-# skip-postage-snapshot: false
## staking contract address
# staking-address: ""
## lru memory caching capacity in number of statestore entries
@@ -117,4 +115,4 @@ password-file: "/usr/local/var/lib/swarm-bee/password"
## send a welcome message string during handshakes
# welcome-message: ""
## withdrawal target addresses
-# withdrawal-addresses-whitelist: []
+# withdrawal-addresses-whitelist: []
\ No newline at end of file
diff --git a/packaging/homebrew-arm64/bee.yaml b/packaging/homebrew-arm64/bee.yaml
index 057d6c54d32..2a66f4ebd71 100644
--- a/packaging/homebrew-arm64/bee.yaml
+++ b/packaging/homebrew-arm64/bee.yaml
@@ -49,7 +49,7 @@ data-dir: "/opt/homebrew/var/lib/swarm-bee"
## P2P listen address
# p2p-addr: :1634
## enable P2P WebSocket transport
-# p2p-ws-enable: false
+# p2p-ws-enable: true
## password for decrypting keys
# password: ""
## path to a file that contains password for decrypting keys
@@ -78,8 +78,6 @@ password-file: "/opt/homebrew/var/lib/swarm-bee/password"
# resolver-options: []
## forces the node to resync postage contract data
# resync: false
-## skip postage snapshot
-# skip-postage-snapshot: false
## staking contract address
# staking-address: ""
## lru memory caching capacity in number of statestore entries
@@ -117,4 +115,4 @@ password-file: "/opt/homebrew/var/lib/swarm-bee/password"
## send a welcome message string during handshakes
# welcome-message: ""
## withdrawal target addresses
-# withdrawal-addresses-whitelist: []
+# withdrawal-addresses-whitelist: []
\ No newline at end of file
diff --git a/packaging/scoop/bee.yaml b/packaging/scoop/bee.yaml
index d2f97fedd33..c6d16a3676e 100644
--- a/packaging/scoop/bee.yaml
+++ b/packaging/scoop/bee.yaml
@@ -49,7 +49,7 @@ data-dir: "./data"
## P2P listen address
# p2p-addr: :1634
## enable P2P WebSocket transport
-# p2p-ws-enable: false
+# p2p-ws-enable: true
## password for decrypting keys
# password: ""
## path to a file that contains password for decrypting keys
@@ -78,8 +78,6 @@ password-file: "./password"
# resolver-options: []
## forces the node to resync postage contract data
# resync: false
-## skip postage snapshot
-# skip-postage-snapshot: false
## staking contract address
# staking-address: ""
## lru memory caching capacity in number of statestore entries
@@ -117,4 +115,4 @@ password-file: "./password"
## send a welcome message string during handshakes
# welcome-message: ""
## withdrawal target addresses
-# withdrawal-addresses-whitelist: []
+# withdrawal-addresses-whitelist: []
\ No newline at end of file
diff --git a/pkg/accounting/accounting.go b/pkg/accounting/accounting.go
index 35316e3a73b..622a5c562d7 100644
--- a/pkg/accounting/accounting.go
+++ b/pkg/accounting/accounting.go
@@ -1,9 +1,6 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package accounting provides functionalities needed
-// to do per-peer accounting.
package accounting
import (
@@ -11,7 +8,6 @@ import (
"errors"
"fmt"
"math/big"
- "strings"
"sync"
"time"
@@ -23,131 +19,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "accounting"
-
-const (
- linearCheckpointNumber = 1800
- linearCheckpointStep = 100
-)
-
-var (
- _ Interface = (*Accounting)(nil)
- balancesPrefix = "accounting_balance_"
- balancesSurplusPrefix = "accounting_surplusbalance_"
- balancesOriginatedPrefix = "accounting_originatedbalance_"
- // fraction of the refresh rate that is the minimum for monetary settlement
- // this value is chosen so that tiny payments are prevented while still allowing small payments in environments with lower payment thresholds
- minimumPaymentDivisor = int64(5)
- failedSettlementInterval = int64(10) // seconds
-)
-
-// Interface is the Accounting interface.
-type Interface interface {
- // PrepareCredit action to prevent overspending in case of concurrent requests.
- PrepareCredit(ctx context.Context, peer swarm.Address, price uint64, originated bool) (Action, error)
- // PrepareDebit returns an accounting Action for the later debit to be executed on and to implement shadowing a possibly credited part of reserve on the other side.
- PrepareDebit(ctx context.Context, peer swarm.Address, price uint64) (Action, error)
- // Balance returns the current balance for the given peer.
- Balance(peer swarm.Address) (*big.Int, error)
- // SurplusBalance returns the current surplus balance for the given peer.
- SurplusBalance(peer swarm.Address) (*big.Int, error)
- // Balances returns balances for all known peers.
- Balances() (map[string]*big.Int, error)
- // CompensatedBalance returns the current balance deducted by current surplus balance for the given peer.
- CompensatedBalance(peer swarm.Address) (*big.Int, error)
- // CompensatedBalances returns the compensated balances for all known peers.
- CompensatedBalances() (map[string]*big.Int, error)
- // PeerAccounting returns the associated values for all known peers
- PeerAccounting() (map[string]PeerInfo, error)
-}
-
-// Action represents an accounting action that can be applied
-type Action interface {
- // Cleanup cleans up an action. Must be called whether it was applied or not.
- Cleanup()
- // Apply applies an action
- Apply() error
-}
-
-// debitAction represents a future debit
-type debitAction struct {
- accounting *Accounting
- price *big.Int
- peer swarm.Address
- accountingPeer *accountingPeer
- applied bool
-}
-
-// creditAction represents a future debit
-type creditAction struct {
- accounting *Accounting
- price *big.Int
- peer swarm.Address
- accountingPeer *accountingPeer
- originated bool
- applied bool
-}
-
-// PayFunc is the function used for async monetary settlement
-type PayFunc func(context.Context, swarm.Address, *big.Int)
-
-// RefreshFunc is the function used for sync time-based settlement
-type RefreshFunc func(context.Context, swarm.Address, *big.Int)
-
-// Mutex is a drop in replacement for the sync.Mutex
-// it will not lock if the context is expired
-type Mutex struct {
- mu chan struct{}
-}
-
-func NewMutex() *Mutex {
- return &Mutex{
- mu: make(chan struct{}, 1), // unlocked by default
- }
-}
-
-var ErrFailToLock = errors.New("failed to lock")
-
-func (m *Mutex) TryLock(ctx context.Context) error {
- select {
- case m.mu <- struct{}{}:
- return nil // locked
- case <-ctx.Done():
- return fmt.Errorf("%w: %w", ctx.Err(), ErrFailToLock)
- }
-}
-
-func (m *Mutex) Lock() {
- m.mu <- struct{}{}
-}
-
-func (m *Mutex) Unlock() {
- <-m.mu
-}
-
-// accountingPeer holds all in-memory accounting information for one peer.
-type accountingPeer struct {
- lock *Mutex // lock to be held during any accounting action for this peer
- reservedBalance *big.Int // amount currently reserved for active peer interaction
- shadowReservedBalance *big.Int // amount potentially to be debited for active peer interaction
- refreshReservedBalance *big.Int // amount debt potentially decreased during an ongoing refreshment
- ghostBalance *big.Int // amount potentially could have been debited for but was not
- paymentThreshold *big.Int // the threshold at which the peer expects us to pay
- earlyPayment *big.Int // individual early payment threshold calculated from payment threshold and early payment percentage
- paymentThresholdForPeer *big.Int // individual payment threshold at which the peer is expected to pay
- disconnectLimit *big.Int // individual disconnect threshold calculated from tolerance and payment threshold for peer
- refreshTimestampMilliseconds int64 // last time we attempted and succeeded time-based settlement
- refreshReceivedTimestamp int64 // last time we accepted time-based settlement
- paymentOngoing bool // indicate if we are currently settling with the peer
- refreshOngoing bool // indicates if we are currently refreshing with the peer
- lastSettlementFailureTimestamp int64 // time of last unsuccessful attempt to issue a cheque
- connected bool // indicates whether the peer is currently connected
- fullNode bool // the peer connected as full node or light node
- totalDebtRepay *big.Int // since being connected, amount of cumulative debt settled by the peer
- thresholdGrowAt *big.Int // cumulative debt to be settled by the peer in order to give threshold upgrade
-}
-
// Accounting is the main implementation of the accounting interface.
type Accounting struct {
// Mutex for accessing the accountingPeers map.
@@ -187,21 +58,6 @@ type Accounting struct {
lightThresholdGrowChange *big.Int
}
-var (
- // ErrOverdraft denotes the expected debt in Reserve would exceed the payment thresholds.
- ErrOverdraft = errors.New("attempted overdraft")
- // ErrDisconnectThresholdExceeded denotes a peer has exceeded the disconnect threshold.
- ErrDisconnectThresholdExceeded = errors.New("disconnect threshold exceeded")
- // ErrPeerNoBalance is the error returned if no balance in store exists for a peer
- ErrPeerNoBalance = errors.New("no balance for peer")
- // ErrInvalidValue denotes an invalid value read from store
- ErrInvalidValue = errors.New("invalid value")
- // ErrOverRelease
- ErrOverRelease = errors.New("attempting to release more balance than was reserved for peer")
- // ErrEnforceRefresh
- ErrEnforceRefresh = errors.New("allowance expectation refused")
-)
-
// NewAccounting creates a new Accounting instance with the provided options.
func NewAccounting(
PaymentThreshold *big.Int,
@@ -241,31 +97,6 @@ func NewAccounting(
}, nil
}
-func (a *Accounting) getIncreasedExpectedDebt(peer swarm.Address, accountingPeer *accountingPeer, bigPrice *big.Int) (*big.Int, *big.Int, error) {
- nextReserved := new(big.Int).Add(accountingPeer.reservedBalance, bigPrice)
-
- currentBalance, err := a.Balance(peer)
- if err != nil && !errors.Is(err, ErrPeerNoBalance) {
- return nil, nil, fmt.Errorf("failed to load balance: %w", err)
- }
- currentDebt := new(big.Int).Neg(currentBalance)
- if currentDebt.Cmp(big.NewInt(0)) < 0 {
- currentDebt.SetInt64(0)
- }
-
- // debt if all reserved operations are successfully credited excluding debt created by surplus balance
- expectedDebt := new(big.Int).Add(currentDebt, nextReserved)
-
- // additionalDebt is debt created by incoming payments which we don't consider debt for monetary settlement purposes
- additionalDebt, err := a.SurplusBalance(peer)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to load surplus balance: %w", err)
- }
-
- // debt if all reserved operations are successfully credited including debt created by surplus balance
- return new(big.Int).Add(expectedDebt, additionalDebt), currentBalance, nil
-}
-
func (a *Accounting) PrepareCredit(ctx context.Context, peer swarm.Address, price uint64, originated bool) (Action, error) {
accountingPeer := a.getAccountingPeer(peer)
@@ -429,539 +260,6 @@ func (c *creditAction) Apply() error {
return nil
}
-func (c *creditAction) Cleanup() {
- if c.applied {
- return
- }
-
- c.accountingPeer.lock.Lock()
- defer c.accountingPeer.lock.Unlock()
-
- if c.price.Cmp(c.accountingPeer.reservedBalance) > 0 {
- c.accounting.logger.Error(nil, "attempting to release more balance than was reserved for peer", "peer_address", c.peer)
- c.accountingPeer.reservedBalance.SetUint64(0)
- } else {
- c.accountingPeer.reservedBalance.Sub(c.accountingPeer.reservedBalance, c.price)
- }
-}
-
-// Settle all debt with a peer. The lock on the accountingPeer must be held when
-// called.
-func (a *Accounting) settle(peer swarm.Address, balance *accountingPeer) error {
- now := a.timeNow()
- timeElapsedInMilliseconds := now.UnixMilli() - balance.refreshTimestampMilliseconds
-
- // get debt towards peer decreased by any amount that is to be debited soon
- paymentAmount, err := a.shadowBalance(peer, balance)
- if err != nil {
- return err
- }
- // Don't do anything if there is not enough actual debt
- // This might be the case if the peer owes us and the total reserve for a peer exceeds the payment threshold.
- // Minimum amount to trigger settlement for is 1 * refresh rate to avoid ineffective use of refreshments
- if paymentAmount.Cmp(a.refreshRate) >= 0 {
- // Only trigger refreshment if last refreshment finished at least 1000 milliseconds ago
- // This is to avoid a peer refusing refreshment because not enough time passed since last refreshment
- if timeElapsedInMilliseconds > 999 {
- if !balance.refreshOngoing {
- balance.refreshOngoing = true
- go a.refreshFunction(context.Background(), peer, paymentAmount)
- }
- }
-
- if a.payFunction != nil && !balance.paymentOngoing {
- // if a settlement failed recently, wait until failedSettlementInterval before trying again
- differenceInSeconds := now.Unix() - balance.lastSettlementFailureTimestamp
- if differenceInSeconds > failedSettlementInterval {
- // if there is no monetary settlement happening, check if there is something to settle
- // compute debt excluding debt created by incoming payments
- originatedBalance, err := a.OriginatedBalance(peer)
- if err != nil {
- if !errors.Is(err, ErrPeerNoBalance) {
- return fmt.Errorf("failed to load originated balance to settle: %w", err)
- }
- }
-
- paymentAmount := new(big.Int).Neg(originatedBalance)
-
- if paymentAmount.Cmp(a.minimumPayment) >= 0 {
- timeElapsedInSeconds := (a.timeNow().UnixMilli() - balance.refreshTimestampMilliseconds) / 1000
- refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate)
- currentBalance, err := a.Balance(peer)
- if err != nil && !errors.Is(err, ErrPeerNoBalance) {
- return fmt.Errorf("failed to load balance: %w", err)
- }
-
- debt := new(big.Int).Neg(currentBalance)
- decreasedDebt := new(big.Int).Sub(debt, refreshDue)
- expectedDecreasedDebt := new(big.Int).Sub(decreasedDebt, balance.shadowReservedBalance)
-
- if paymentAmount.Cmp(expectedDecreasedDebt) > 0 {
- paymentAmount.Set(expectedDecreasedDebt)
- }
-
- // if the remaining debt is still larger than some minimum amount, trigger monetary settlement
- if paymentAmount.Cmp(a.minimumPayment) >= 0 {
- balance.paymentOngoing = true
- // add settled amount to shadow reserve before sending it
- balance.shadowReservedBalance.Add(balance.shadowReservedBalance, paymentAmount)
- // if a refreshment is ongoing, add this amount sent to cumulative potential debt decrease during refreshment
- if balance.refreshOngoing {
- balance.refreshReservedBalance = new(big.Int).Add(balance.refreshReservedBalance, paymentAmount)
- }
- a.wg.Add(1)
- go a.payFunction(context.Background(), peer, paymentAmount)
- }
- }
- }
- }
- }
- return nil
-}
-
-// Balance returns the current balance for the given peer.
-func (a *Accounting) Balance(peer swarm.Address) (balance *big.Int, err error) {
- err = a.store.Get(peerBalanceKey(peer), &balance)
-
- if err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- return big.NewInt(0), ErrPeerNoBalance
- }
- return nil, err
- }
-
- return balance, nil
-}
-
-// OriginatedBalance returns the current balance for the given peer.
-func (a *Accounting) OriginatedBalance(peer swarm.Address) (balance *big.Int, err error) {
- err = a.store.Get(originatedBalanceKey(peer), &balance)
-
- if err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- return big.NewInt(0), ErrPeerNoBalance
- }
- return nil, err
- }
-
- return balance, nil
-}
-
-// SurplusBalance returns the current balance for the given peer.
-func (a *Accounting) SurplusBalance(peer swarm.Address) (balance *big.Int, err error) {
- err = a.store.Get(peerSurplusBalanceKey(peer), &balance)
-
- if err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- return big.NewInt(0), nil
- }
- return nil, err
- }
-
- if balance.Cmp(big.NewInt(0)) < 0 {
- return nil, ErrInvalidValue
- }
-
- return balance, nil
-}
-
-// CompensatedBalance returns balance decreased by surplus balance
-func (a *Accounting) CompensatedBalance(peer swarm.Address) (compensated *big.Int, err error) {
- surplus, err := a.SurplusBalance(peer)
- if err != nil {
- return nil, err
- }
-
- balance, err := a.Balance(peer)
- if err != nil {
- if !errors.Is(err, ErrPeerNoBalance) {
- return nil, err
- }
- }
-
- // if surplus is 0 and peer has no balance, propagate ErrPeerNoBalance
- if surplus.Cmp(big.NewInt(0)) == 0 && errors.Is(err, ErrPeerNoBalance) {
- return nil, err
- }
- // Compensated balance is balance decreased by surplus balance
- compensated = new(big.Int).Sub(balance, surplus)
-
- return compensated, nil
-}
-
-// peerBalanceKey returns the balance storage key for the given peer.
-func peerBalanceKey(peer swarm.Address) string {
- return fmt.Sprintf("%s%s", balancesPrefix, peer.String())
-}
-
-// peerSurplusBalanceKey returns the surplus balance storage key for the given peer
-func peerSurplusBalanceKey(peer swarm.Address) string {
- return fmt.Sprintf("%s%s", balancesSurplusPrefix, peer.String())
-}
-
-func originatedBalanceKey(peer swarm.Address) string {
- return fmt.Sprintf("%s%s", balancesOriginatedPrefix, peer.String())
-}
-
-// getAccountingPeer returns the accountingPeer for a given swarm address.
-// If not found in memory it will initialize it.
-func (a *Accounting) getAccountingPeer(peer swarm.Address) *accountingPeer {
- a.accountingPeersMu.Lock()
- defer a.accountingPeersMu.Unlock()
-
- peerData, ok := a.accountingPeers[peer.String()]
- if !ok {
- peerData = &accountingPeer{
- lock: NewMutex(),
- reservedBalance: big.NewInt(0),
- refreshReservedBalance: big.NewInt(0),
- shadowReservedBalance: big.NewInt(0),
- ghostBalance: big.NewInt(0),
- totalDebtRepay: big.NewInt(0),
- paymentThreshold: new(big.Int).Set(a.paymentThreshold),
- paymentThresholdForPeer: new(big.Int).Set(a.paymentThreshold),
- disconnectLimit: new(big.Int).Set(a.disconnectLimit),
- thresholdGrowAt: new(big.Int).Set(a.thresholdGrowStep),
- // initially assume the peer has the same threshold as us
- earlyPayment: percentOf(100-a.earlyPayment, a.paymentThreshold),
- connected: false,
- }
- a.accountingPeers[peer.String()] = peerData
- }
-
- return peerData
-}
-
-// notifyPaymentThresholdUpgrade is used when cumulative debt settled by peer reaches current checkpoint,
-// to set the next checkpoint and increase the payment threshold given by 1 * refreshment rate
-// must be called under accountingPeer lock
-func (a *Accounting) notifyPaymentThresholdUpgrade(peer swarm.Address, accountingPeer *accountingPeer) {
-
- // get appropriate linear growth limit based on whether the peer is a full node or a light node
- thresholdGrowChange := new(big.Int).Set(a.thresholdGrowChange)
- if !accountingPeer.fullNode {
- thresholdGrowChange.Set(a.lightThresholdGrowChange)
- }
-
- // if current checkpoint already passed linear growth limit, set next checkpoint exponentially
- if accountingPeer.thresholdGrowAt.Cmp(thresholdGrowChange) >= 0 {
- accountingPeer.thresholdGrowAt = new(big.Int).Mul(accountingPeer.thresholdGrowAt, big.NewInt(2))
- } else {
- // otherwise set next linear checkpoint
- if accountingPeer.fullNode {
- accountingPeer.thresholdGrowAt = new(big.Int).Add(accountingPeer.thresholdGrowAt, a.thresholdGrowStep)
- } else {
- accountingPeer.thresholdGrowAt = new(big.Int).Add(accountingPeer.thresholdGrowAt, a.lightThresholdGrowStep)
- }
- }
-
- // get appropriate refresh rate
- refreshRate := new(big.Int).Set(a.refreshRate)
- if !accountingPeer.fullNode {
- refreshRate = new(big.Int).Set(a.lightRefreshRate)
- }
-
- // increase given threshold by refresh rate
- accountingPeer.paymentThresholdForPeer = new(big.Int).Add(accountingPeer.paymentThresholdForPeer, refreshRate)
- // recalculate disconnectLimit for peer
- accountingPeer.disconnectLimit = percentOf(100+a.paymentTolerance, accountingPeer.paymentThresholdForPeer)
-
- // announce new payment threshold to peer
- err := a.pricing.AnnouncePaymentThreshold(context.Background(), peer, accountingPeer.paymentThresholdForPeer)
- if err != nil {
- a.logger.Error(err, "announcing increased payment threshold", "value", accountingPeer.paymentThresholdForPeer, "peer_address", peer)
- }
-}
-
-// Balances gets balances for all peers from store.
-func (a *Accounting) Balances() (map[string]*big.Int, error) {
- s := make(map[string]*big.Int)
-
- err := a.store.Iterate(balancesPrefix, func(key, val []byte) (stop bool, err error) {
- addr, err := balanceKeyPeer(key)
- if err != nil {
- return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
- }
-
- if _, ok := s[addr.String()]; !ok {
- var storevalue *big.Int
- err = a.store.Get(peerBalanceKey(addr), &storevalue)
- if err != nil {
- return false, fmt.Errorf("get peer %s balance: %w", addr.String(), err)
- }
-
- s[addr.String()] = storevalue
- }
-
- return false, nil
- })
-
- if err != nil {
- return nil, err
- }
-
- return s, nil
-}
-
-type PeerInfo struct {
- Balance *big.Int
- ConsumedBalance *big.Int
- ThresholdReceived *big.Int
- ThresholdGiven *big.Int
- CurrentThresholdReceived *big.Int
- CurrentThresholdGiven *big.Int
- SurplusBalance *big.Int
- ReservedBalance *big.Int
- ShadowReservedBalance *big.Int
- GhostBalance *big.Int
-}
-
-func (a *Accounting) PeerAccounting() (map[string]PeerInfo, error) {
- s := make(map[string]PeerInfo)
-
- a.accountingPeersMu.Lock()
- accountingPeersList := make(map[string]*accountingPeer)
- for peer, accountingPeer := range a.accountingPeers {
- accountingPeersList[peer] = accountingPeer
- }
- a.accountingPeersMu.Unlock()
-
- for peer, accountingPeer := range accountingPeersList {
-
- peerAddress := swarm.MustParseHexAddress(peer)
-
- balance, err := a.Balance(peerAddress)
- if errors.Is(err, ErrPeerNoBalance) {
- balance = big.NewInt(0)
- } else if err != nil {
- return nil, err
- }
-
- surplusBalance, err := a.SurplusBalance(peerAddress)
- if err != nil {
- return nil, err
- }
-
- accountingPeer.lock.Lock()
-
- t := a.timeNow()
-
- timeElapsedInSeconds := t.Unix() - accountingPeer.refreshReceivedTimestamp
- if timeElapsedInSeconds > 1 {
- timeElapsedInSeconds = 1
- }
-
- // get appropriate refresh rate
- refreshRate := new(big.Int).Set(a.refreshRate)
- if !accountingPeer.fullNode {
- refreshRate = new(big.Int).Set(a.lightRefreshRate)
- }
-
- refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), refreshRate)
- currentThresholdGiven := new(big.Int).Add(accountingPeer.disconnectLimit, refreshDue)
-
- timeElapsedInSeconds = (t.UnixMilli() - accountingPeer.refreshTimestampMilliseconds) / 1000
- if timeElapsedInSeconds > 1 {
- timeElapsedInSeconds = 1
- }
-
- // get appropriate refresh rate
- refreshDue = new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate)
- currentThresholdReceived := new(big.Int).Add(accountingPeer.paymentThreshold, refreshDue)
-
- s[peer] = PeerInfo{
- Balance: new(big.Int).Sub(balance, surplusBalance),
- ConsumedBalance: new(big.Int).Set(balance),
- ThresholdReceived: new(big.Int).Set(accountingPeer.paymentThreshold),
- CurrentThresholdReceived: currentThresholdReceived,
- CurrentThresholdGiven: currentThresholdGiven,
- ThresholdGiven: new(big.Int).Set(accountingPeer.paymentThresholdForPeer),
- SurplusBalance: new(big.Int).Set(surplusBalance),
- ReservedBalance: new(big.Int).Set(accountingPeer.reservedBalance),
- ShadowReservedBalance: new(big.Int).Set(accountingPeer.shadowReservedBalance),
- GhostBalance: new(big.Int).Set(accountingPeer.ghostBalance),
- }
- accountingPeer.lock.Unlock()
- }
-
- return s, nil
-}
-
-// CompensatedBalances gets balances for all peers from store.
-func (a *Accounting) CompensatedBalances() (map[string]*big.Int, error) {
- s := make(map[string]*big.Int)
-
- err := a.store.Iterate(balancesPrefix, func(key, val []byte) (stop bool, err error) {
- addr, err := balanceKeyPeer(key)
- if err != nil {
- return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
- }
- if _, ok := s[addr.String()]; !ok {
- value, err := a.CompensatedBalance(addr)
- if err != nil {
- return false, fmt.Errorf("get peer %s balance: %w", addr.String(), err)
- }
-
- s[addr.String()] = value
- }
-
- return false, nil
- })
-
- if err != nil {
- return nil, err
- }
-
- err = a.store.Iterate(balancesSurplusPrefix, func(key, val []byte) (stop bool, err error) {
- addr, err := surplusBalanceKeyPeer(key)
- if err != nil {
- return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
- }
- if _, ok := s[addr.String()]; !ok {
- value, err := a.CompensatedBalance(addr)
- if err != nil {
- return false, fmt.Errorf("get peer %s balance: %w", addr.String(), err)
- }
-
- s[addr.String()] = value
- }
-
- return false, nil
- })
-
- if err != nil {
- return nil, err
- }
-
- return s, nil
-}
-
-// balanceKeyPeer returns the embedded peer from the balance storage key.
-func balanceKeyPeer(key []byte) (swarm.Address, error) {
- k := string(key)
-
- split := strings.SplitAfter(k, balancesPrefix)
- if len(split) != 2 {
- return swarm.ZeroAddress, errors.New("no peer in key")
- }
-
- addr, err := swarm.ParseHexAddress(split[1])
- if err != nil {
- return swarm.ZeroAddress, err
- }
-
- return addr, nil
-}
-
-func surplusBalanceKeyPeer(key []byte) (swarm.Address, error) {
- k := string(key)
-
- split := strings.SplitAfter(k, balancesSurplusPrefix)
- if len(split) != 2 {
- return swarm.ZeroAddress, errors.New("no peer in key")
- }
-
- addr, err := swarm.ParseHexAddress(split[1])
- if err != nil {
- return swarm.ZeroAddress, err
- }
-
- return addr, nil
-}
-
-// PeerDebt returns the positive part of the sum of the outstanding balance and the shadow reserve
-func (a *Accounting) PeerDebt(peer swarm.Address) (*big.Int, error) {
- accountingPeer := a.getAccountingPeer(peer)
-
- accountingPeer.lock.Lock()
- defer accountingPeer.lock.Unlock()
-
- balance := new(big.Int)
- zero := big.NewInt(0)
-
- err := a.store.Get(peerBalanceKey(peer), &balance)
- if err != nil {
- if !errors.Is(err, storage.ErrNotFound) {
- return nil, err
- }
- balance = big.NewInt(0)
- }
-
- peerDebt := new(big.Int).Add(balance, accountingPeer.shadowReservedBalance)
-
- if peerDebt.Cmp(zero) < 0 {
- return zero, nil
- }
-
- return peerDebt, nil
-}
-
-// peerLatentDebt returns the sum of the positive part of the outstanding balance, shadow reserve and the ghost balance
-func (a *Accounting) peerLatentDebt(peer swarm.Address) (*big.Int, error) {
-
- accountingPeer := a.getAccountingPeer(peer)
-
- balance := new(big.Int)
- zero := big.NewInt(0)
-
- err := a.store.Get(peerBalanceKey(peer), &balance)
- if err != nil {
- if !errors.Is(err, storage.ErrNotFound) {
- return nil, err
- }
- balance = big.NewInt(0)
- }
-
- if balance.Cmp(zero) < 0 {
- balance.Set(zero)
- }
-
- peerDebt := new(big.Int).Add(balance, accountingPeer.shadowReservedBalance)
- peerLatentDebt := new(big.Int).Add(peerDebt, accountingPeer.ghostBalance)
-
- if peerLatentDebt.Cmp(zero) < 0 {
- return zero, nil
- }
-
- return peerLatentDebt, nil
-}
-
-// shadowBalance returns the current debt reduced by any potentially debitable amount stored in shadowReservedBalance
-// this represents how much less our debt could potentially be seen by the other party if it's ahead with processing credits corresponding to our shadow reserve
-func (a *Accounting) shadowBalance(peer swarm.Address, accountingPeer *accountingPeer) (shadowBalance *big.Int, err error) {
- balance := new(big.Int)
- zero := big.NewInt(0)
-
- err = a.store.Get(peerBalanceKey(peer), &balance)
- if err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- return zero, nil
- }
- return nil, err
- }
-
- if balance.Cmp(zero) >= 0 {
- return zero, nil
- }
-
- negativeBalance := new(big.Int).Neg(balance)
-
- surplusBalance, err := a.SurplusBalance(peer)
- if err != nil {
- return nil, err
- }
-
- debt := new(big.Int).Add(negativeBalance, surplusBalance)
-
- if debt.Cmp(accountingPeer.shadowReservedBalance) < 0 {
- return zero, nil
- }
-
- shadowBalance = new(big.Int).Sub(negativeBalance, accountingPeer.shadowReservedBalance)
-
- return shadowBalance, nil
-}
-
// NotifyPaymentSent is triggered by async monetary settlement to update our balance and remove it's price from the shadow reserve
func (a *Accounting) NotifyPaymentSent(peer swarm.Address, amount *big.Int, receivedError error) {
loggerV2 := a.logger.V(2).Register()
@@ -1006,100 +304,6 @@ func (a *Accounting) NotifyPaymentSent(peer swarm.Address, amount *big.Int, rece
if err != nil {
a.logger.Warning("notify payment sent; failed to decrease originated balance", "error", err)
}
-
-}
-
-// NotifyPaymentThreshold should be called to notify accounting of changes in the payment threshold
-func (a *Accounting) NotifyPaymentThreshold(peer swarm.Address, paymentThreshold *big.Int) error {
- accountingPeer := a.getAccountingPeer(peer)
-
- accountingPeer.lock.Lock()
- defer accountingPeer.lock.Unlock()
-
- accountingPeer.paymentThreshold.Set(paymentThreshold)
- accountingPeer.earlyPayment.Set(percentOf(100-a.earlyPayment, paymentThreshold))
- return nil
-}
-
-// NotifyPaymentReceived is called by Settlement when we receive a payment.
-func (a *Accounting) NotifyPaymentReceived(peer swarm.Address, amount *big.Int) error {
- loggerV2 := a.logger.V(2).Register()
-
- accountingPeer := a.getAccountingPeer(peer)
-
- accountingPeer.lock.Lock()
- defer accountingPeer.lock.Unlock()
-
- accountingPeer.totalDebtRepay = new(big.Int).Add(accountingPeer.totalDebtRepay, amount)
-
- if accountingPeer.totalDebtRepay.Cmp(accountingPeer.thresholdGrowAt) > 0 {
- a.notifyPaymentThresholdUpgrade(peer, accountingPeer)
- }
-
- currentBalance, err := a.Balance(peer)
- if err != nil {
- if !errors.Is(err, ErrPeerNoBalance) {
- return err
- }
- }
-
- // if balance is already negative or zero, we credit full amount received to surplus balance and terminate early
- if currentBalance.Cmp(big.NewInt(0)) <= 0 {
- surplus, err := a.SurplusBalance(peer)
- if err != nil {
- return fmt.Errorf("failed to get surplus balance: %w", err)
- }
- increasedSurplus := new(big.Int).Add(surplus, amount)
-
- loggerV2.Debug("surplus crediting peer", "peer_address", peer, "amount", amount, "new_balance", increasedSurplus)
-
- err = a.store.Put(peerSurplusBalanceKey(peer), increasedSurplus)
- if err != nil {
- return fmt.Errorf("failed to persist surplus balance: %w", err)
- }
-
- return nil
- }
-
- // if current balance is positive, let's make a partial credit to
- newBalance := new(big.Int).Sub(currentBalance, amount)
-
- // Don't allow a payment to put us into debt
- // This is to prevent another node tricking us into settling by settling
- // first (e.g. send a bouncing cheque to trigger an honest cheque in swap).
- nextBalance := newBalance
- if newBalance.Cmp(big.NewInt(0)) < 0 {
- nextBalance = big.NewInt(0)
- }
-
- loggerV2.Debug("crediting peer", "peer_address", peer, "amount", amount, "new_balance", nextBalance)
-
- err = a.store.Put(peerBalanceKey(peer), nextBalance)
- if err != nil {
- return fmt.Errorf("failed to persist balance: %w", err)
- }
-
- // If payment would have put us into debt, rather, let's add to surplusBalance,
- // so as that an oversettlement attempt creates balance for future forwarding services
- // charges to be deducted of
- if newBalance.Cmp(big.NewInt(0)) < 0 {
- surplusGrowth := new(big.Int).Sub(amount, currentBalance)
-
- surplus, err := a.SurplusBalance(peer)
- if err != nil {
- return fmt.Errorf("failed to get surplus balance: %w", err)
- }
- increasedSurplus := new(big.Int).Add(surplus, surplusGrowth)
-
- loggerV2.Debug("surplus crediting peer due to refreshment", "peer_address", peer, "amount", surplusGrowth, "new_balance", increasedSurplus)
-
- err = a.store.Put(peerSurplusBalanceKey(peer), increasedSurplus)
- if err != nil {
- return fmt.Errorf("failed to persist surplus balance: %w", err)
- }
- }
-
- return nil
}
// NotifyRefreshmentSent is called by pseudosettle when refreshment is done or failed
@@ -1189,148 +393,6 @@ func (a *Accounting) NotifyRefreshmentSent(peer swarm.Address, attemptedAmount,
}
-// NotifyRefreshmentReceived is called by pseudosettle when we receive a time based settlement.
-func (a *Accounting) NotifyRefreshmentReceived(peer swarm.Address, amount *big.Int, timestamp int64) error {
- loggerV2 := a.logger.V(2).Register()
-
- accountingPeer := a.getAccountingPeer(peer)
-
- accountingPeer.lock.Lock()
- defer accountingPeer.lock.Unlock()
-
- accountingPeer.totalDebtRepay = new(big.Int).Add(accountingPeer.totalDebtRepay, amount)
-
- if accountingPeer.totalDebtRepay.Cmp(accountingPeer.thresholdGrowAt) > 0 {
- a.notifyPaymentThresholdUpgrade(peer, accountingPeer)
- }
-
- currentBalance, err := a.Balance(peer)
- if err != nil {
- if !errors.Is(err, ErrPeerNoBalance) {
- return err
- }
- }
-
- // Get nextBalance by increasing current balance with amount
- nextBalance := new(big.Int).Sub(currentBalance, amount)
-
- // We allow a refreshment to potentially put us into debt as it was previously negotiated and be limited to the peer's outstanding debt plus shadow reserve
- loggerV2.Debug("crediting peer", "peer_address", peer, "amount", amount, "new_balance", nextBalance)
- err = a.store.Put(peerBalanceKey(peer), nextBalance)
- if err != nil {
- return fmt.Errorf("failed to persist balance: %w", err)
- }
-
- accountingPeer.refreshReceivedTimestamp = timestamp
-
- return nil
-}
-
-// PrepareDebit prepares a debit operation by increasing the shadowReservedBalance
-func (a *Accounting) PrepareDebit(ctx context.Context, peer swarm.Address, price uint64) (Action, error) {
- loggerV2 := a.logger.V(2).Register()
-
- accountingPeer := a.getAccountingPeer(peer)
-
- if err := accountingPeer.lock.TryLock(ctx); err != nil {
- loggerV2.Debug("prepare debit; failed to acquire lock", "error", err)
- return nil, err
- }
-
- defer accountingPeer.lock.Unlock()
-
- if !accountingPeer.connected {
- return nil, errors.New("connection not initialized yet")
- }
-
- bigPrice := new(big.Int).SetUint64(price)
-
- accountingPeer.shadowReservedBalance = new(big.Int).Add(accountingPeer.shadowReservedBalance, bigPrice)
- // if a refreshment is ongoing, add this amount to the potential debt decrease during an ongoing refreshment
- if accountingPeer.refreshOngoing {
- accountingPeer.refreshReservedBalance = new(big.Int).Add(accountingPeer.refreshReservedBalance, bigPrice)
- }
-
- return &debitAction{
- accounting: a,
- price: bigPrice,
- peer: peer,
- accountingPeer: accountingPeer,
- applied: false,
- }, nil
-}
-
-func (a *Accounting) increaseBalance(peer swarm.Address, _ *accountingPeer, price *big.Int) (*big.Int, error) {
- loggerV2 := a.logger.V(2).Register()
-
- cost := new(big.Int).Set(price)
- // see if peer has surplus balance to deduct this transaction of
-
- surplusBalance, err := a.SurplusBalance(peer)
- if err != nil {
- return nil, fmt.Errorf("failed to get surplus balance: %w", err)
- }
-
- if surplusBalance.Cmp(big.NewInt(0)) > 0 {
- // get new surplus balance after deduct
- newSurplusBalance := new(big.Int).Sub(surplusBalance, cost)
-
- // if nothing left for debiting, store new surplus balance and return from debit
- if newSurplusBalance.Cmp(big.NewInt(0)) >= 0 {
- loggerV2.Debug("surplus debiting peer", "peer_address", peer, "price", price, "new_balance", newSurplusBalance)
-
- err = a.store.Put(peerSurplusBalanceKey(peer), newSurplusBalance)
- if err != nil {
- return nil, fmt.Errorf("failed to persist surplus balance: %w", err)
- }
-
- return a.Balance(peer)
- }
-
- // if surplus balance didn't cover full transaction, let's continue with leftover part as cost
- debitIncrease := new(big.Int).Sub(price, surplusBalance)
-
- // a sanity check
- if debitIncrease.Cmp(big.NewInt(0)) <= 0 {
- return nil, errors.New("sanity check failed for partial debit after surplus balance drawn")
- }
- cost.Set(debitIncrease)
-
- // if we still have something to debit, than have run out of surplus balance,
- // let's store 0 as surplus balance
- loggerV2.Debug("surplus debiting peer", "peer_address", peer, "amount", debitIncrease, "new_balance", 0)
-
- err = a.store.Put(peerSurplusBalanceKey(peer), big.NewInt(0))
- if err != nil {
- return nil, fmt.Errorf("failed to persist surplus balance: %w", err)
- }
- }
-
- currentBalance, err := a.Balance(peer)
- if err != nil {
- if !errors.Is(err, ErrPeerNoBalance) {
- return nil, fmt.Errorf("failed to load balance: %w", err)
- }
- }
-
- // Get nextBalance by increasing current balance with price
- nextBalance := new(big.Int).Add(currentBalance, cost)
-
- loggerV2.Debug("debiting peer", "peer_address", peer, "price", price, "new_balance", nextBalance)
-
- err = a.store.Put(peerBalanceKey(peer), nextBalance)
- if err != nil {
- return nil, fmt.Errorf("failed to persist balance: %w", err)
- }
-
- err = a.decreaseOriginatedBalanceTo(peer, nextBalance)
- if err != nil {
- a.logger.Warning("increase balance; failed to decrease originated balance", "error", err)
- }
-
- return nextBalance, nil
-}
-
// Apply applies the debit operation and decreases the shadowReservedBalance
func (d *debitAction) Apply() error {
d.accountingPeer.lock.Lock()
@@ -1382,143 +444,6 @@ func (d *debitAction) Apply() error {
return nil
}
-// Cleanup reduces shadow reserve if and only if debitaction have not been applied
-func (d *debitAction) Cleanup() {
- if d.applied {
- return
- }
-
- d.accountingPeer.lock.Lock()
- defer d.accountingPeer.lock.Unlock()
-
- a := d.accounting
- d.accountingPeer.shadowReservedBalance = new(big.Int).Sub(d.accountingPeer.shadowReservedBalance, d.price)
- d.accountingPeer.ghostBalance = new(big.Int).Add(d.accountingPeer.ghostBalance, d.price)
- if d.accountingPeer.ghostBalance.Cmp(d.accountingPeer.disconnectLimit) > 0 {
- a.metrics.AccountingDisconnectsGhostOverdrawCount.Inc()
- _ = a.blocklist(d.peer, 1, "ghost overdraw")
- }
-}
-
-func (a *Accounting) blocklistUntil(peer swarm.Address, multiplier int64) (int64, error) {
-
- debt, err := a.peerLatentDebt(peer)
- if err != nil {
- return 0, err
- }
-
- if debt.Cmp(a.refreshRate) < 0 {
- debt.Set(a.refreshRate)
- }
-
- additionalDebt := new(big.Int).Add(debt, a.paymentThreshold)
-
- multiplyDebt := new(big.Int).Mul(additionalDebt, big.NewInt(multiplier))
-
- k := new(big.Int).Div(multiplyDebt, a.refreshRate)
-
- kInt := k.Int64()
-
- return kInt, nil
-}
-
-func (a *Accounting) blocklist(peer swarm.Address, multiplier int64, reason string) error {
- disconnectFor, err := a.blocklistUntil(peer, multiplier)
- if err != nil {
- return a.p2p.Blocklist(peer, 1*time.Minute, reason)
- }
-
- return a.p2p.Blocklist(peer, time.Duration(disconnectFor)*time.Second, reason)
-}
-
-func (a *Accounting) Connect(peer swarm.Address, fullNode bool) {
- accountingPeer := a.getAccountingPeer(peer)
- zero := big.NewInt(0)
-
- accountingPeer.lock.Lock()
- defer accountingPeer.lock.Unlock()
-
- paymentThreshold := new(big.Int).Set(a.paymentThreshold)
- thresholdGrowStep := new(big.Int).Set(a.thresholdGrowStep)
- disconnectLimit := new(big.Int).Set(a.disconnectLimit)
-
- if !fullNode {
- paymentThreshold.Set(a.lightPaymentThreshold)
- thresholdGrowStep.Set(a.lightThresholdGrowStep)
- disconnectLimit.Set(a.lightDisconnectLimit)
- }
-
- accountingPeer.connected = true
- accountingPeer.fullNode = fullNode
- accountingPeer.shadowReservedBalance.Set(zero)
- accountingPeer.ghostBalance.Set(zero)
- accountingPeer.reservedBalance.Set(zero)
- accountingPeer.refreshReservedBalance.Set(zero)
- accountingPeer.paymentThresholdForPeer.Set(paymentThreshold)
- accountingPeer.thresholdGrowAt.Set(thresholdGrowStep)
- accountingPeer.disconnectLimit.Set(disconnectLimit)
-
- err := a.store.Put(peerBalanceKey(peer), zero)
- if err != nil {
- a.logger.Error(err, "failed to persist balance")
- }
-
- err = a.store.Put(peerSurplusBalanceKey(peer), zero)
- if err != nil {
- a.logger.Error(err, "failed to persist surplus balance")
- }
-}
-
-// decreaseOriginatedBalanceTo decreases the originated balance to provided limit or 0 if limit is positive
-func (a *Accounting) decreaseOriginatedBalanceTo(peer swarm.Address, limit *big.Int) error {
- loggerV2 := a.logger.V(2).Register()
-
- zero := big.NewInt(0)
-
- toSet := new(big.Int).Set(limit)
-
- originatedBalance, err := a.OriginatedBalance(peer)
- if err != nil && !errors.Is(err, ErrPeerNoBalance) {
- return fmt.Errorf("failed to load originated balance: %w", err)
- }
-
- if toSet.Cmp(zero) > 0 {
- toSet.Set(zero)
- }
-
- // If originated balance is more into the negative domain, set it to limit
- if originatedBalance.Cmp(toSet) < 0 {
- err = a.store.Put(originatedBalanceKey(peer), toSet)
- if err != nil {
- return fmt.Errorf("failed to persist originated balance: %w", err)
- }
- loggerV2.Debug("decreasing originated balance of peer", "peer_address", peer, "new_balance", toSet)
- }
-
- return nil
-}
-
-// decreaseOriginatedBalanceBy decreases the originated balance by provided amount even below 0
-func (a *Accounting) decreaseOriginatedBalanceBy(peer swarm.Address, amount *big.Int) error {
- loggerV2 := a.logger.V(2).Register()
-
- originatedBalance, err := a.OriginatedBalance(peer)
- if err != nil && !errors.Is(err, ErrPeerNoBalance) {
- return fmt.Errorf("failed to load balance: %w", err)
- }
-
- // Move originated balance into the positive domain by amount
- newOriginatedBalance := new(big.Int).Add(originatedBalance, amount)
-
- err = a.store.Put(originatedBalanceKey(peer), newOriginatedBalance)
- if err != nil {
- return fmt.Errorf("failed to persist originated balance: %w", err)
- }
- loggerV2.Debug("decreasing originated balance of peer", "peer_address", peer, "amount", amount, "new_balance", newOriginatedBalance)
-
- return nil
-}
-
func (a *Accounting) Disconnect(peer swarm.Address) {
accountingPeer := a.getAccountingPeer(peer)
@@ -1535,21 +460,3 @@ func (a *Accounting) Disconnect(peer swarm.Address) {
a.metrics.AccountingDisconnectsReconnectCount.Inc()
}
}
-
-func (a *Accounting) SetRefreshFunc(f RefreshFunc) {
- a.refreshFunction = f
-}
-
-func (a *Accounting) SetPayFunc(f PayFunc) {
- a.payFunction = f
-}
-
-// Close hangs up running websockets on shutdown.
-func (a *Accounting) Close() error {
- a.wg.Wait()
- return nil
-}
-
-func percentOf(percent int64, of *big.Int) *big.Int {
- return new(big.Int).Div(new(big.Int).Mul(of, big.NewInt(percent)), big.NewInt(100))
-}
diff --git a/pkg/accounting/accounting_js.go b/pkg/accounting/accounting_js.go
new file mode 100644
index 00000000000..88a536b8087
--- /dev/null
+++ b/pkg/accounting/accounting_js.go
@@ -0,0 +1,432 @@
+//go:build js
+// +build js
+
+package accounting
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/pricing"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// Accounting is the main implementation of the accounting interface.
+type Accounting struct {
+ // Mutex for accessing the accountingPeers map.
+ accountingPeersMu sync.Mutex
+ accountingPeers map[string]*accountingPeer
+ logger log.Logger
+ store storage.StateStorer
+ // The payment threshold in BZZ we communicate to our peers.
+ paymentThreshold *big.Int
+ // The amount in percent we let peers exceed the payment threshold before we
+ // disconnect them.
+ paymentTolerance int64
+ // Start settling when reserve plus debt reaches this close to threshold in percent.
+ earlyPayment int64
+ // Limit to disconnect peer after going in debt over
+ disconnectLimit *big.Int
+ // function used for monetary settlement
+ payFunction PayFunc
+ // function used for time settlement
+ refreshFunction RefreshFunc
+ // allowance based on time used in pseudo settle
+ refreshRate *big.Int
+ lightRefreshRate *big.Int
+ // lower bound for the value of issued cheques
+ minimumPayment *big.Int
+ pricing pricing.Interface
+ wg sync.WaitGroup
+ p2p p2p.Service
+ timeNow func() time.Time
+ thresholdGrowStep *big.Int
+ thresholdGrowChange *big.Int
+ // light node counterparts
+ lightPaymentThreshold *big.Int
+ lightDisconnectLimit *big.Int
+ lightThresholdGrowStep *big.Int
+ lightThresholdGrowChange *big.Int
+}
+
+// NewAccounting creates a new Accounting instance with the provided options.
+func NewAccounting(
+ PaymentThreshold *big.Int,
+ PaymentTolerance,
+ EarlyPayment int64,
+ logger log.Logger,
+ Store storage.StateStorer,
+ Pricing pricing.Interface,
+ refreshRate *big.Int,
+ lightFactor int64,
+ p2pService p2p.Service,
+) (*Accounting, error) {
+
+ lightPaymentThreshold := new(big.Int).Div(PaymentThreshold, big.NewInt(lightFactor))
+ lightRefreshRate := new(big.Int).Div(refreshRate, big.NewInt(lightFactor))
+ return &Accounting{
+ accountingPeers: make(map[string]*accountingPeer),
+ paymentThreshold: new(big.Int).Set(PaymentThreshold),
+ paymentTolerance: PaymentTolerance,
+ earlyPayment: EarlyPayment,
+ disconnectLimit: percentOf(100+PaymentTolerance, PaymentThreshold),
+ logger: logger.WithName(loggerName).Register(),
+ store: Store,
+ pricing: Pricing,
+ refreshRate: new(big.Int).Set(refreshRate),
+ lightRefreshRate: new(big.Int).Div(refreshRate, big.NewInt(lightFactor)),
+ timeNow: time.Now,
+ minimumPayment: new(big.Int).Div(refreshRate, big.NewInt(minimumPaymentDivisor)),
+ p2p: p2pService,
+ thresholdGrowChange: new(big.Int).Mul(refreshRate, big.NewInt(linearCheckpointNumber)),
+ thresholdGrowStep: new(big.Int).Mul(refreshRate, big.NewInt(linearCheckpointStep)),
+ lightPaymentThreshold: new(big.Int).Set(lightPaymentThreshold),
+ lightDisconnectLimit: percentOf(100+PaymentTolerance, lightPaymentThreshold),
+ lightThresholdGrowChange: new(big.Int).Mul(lightRefreshRate, big.NewInt(linearCheckpointNumber)),
+ lightThresholdGrowStep: new(big.Int).Mul(lightRefreshRate, big.NewInt(linearCheckpointStep)),
+ }, nil
+}
+
+func (a *Accounting) PrepareCredit(ctx context.Context, peer swarm.Address, price uint64, originated bool) (Action, error) {
+
+ accountingPeer := a.getAccountingPeer(peer)
+
+ if err := accountingPeer.lock.TryLock(ctx); err != nil {
+ a.logger.Debug("failed to acquire lock when preparing credit", "error", err)
+ return nil, err
+ }
+ defer accountingPeer.lock.Unlock()
+
+ if !accountingPeer.connected {
+ return nil, errors.New("connection not initialized yet")
+ }
+
+ bigPrice := new(big.Int).SetUint64(price)
+
+ threshold := accountingPeer.earlyPayment
+
+ // debt if all reserved operations are successfully credited including debt created by surplus balance
+ increasedExpectedDebt, currentBalance, err := a.getIncreasedExpectedDebt(peer, accountingPeer, bigPrice)
+ if err != nil {
+ return nil, err
+ }
+ // debt if all reserved operations are successfully credited and all shadow reserved operations are debited including debt created by surplus balance
+ // in other words this the debt the other node sees if everything pending is successful
+ increasedExpectedDebtReduced := new(big.Int).Sub(increasedExpectedDebt, accountingPeer.shadowReservedBalance)
+
+ // If our expected debt reduced by what could have been credited on the other side already is less than earlyPayment away from our payment threshold
+ // and we are actually in debt, trigger settlement.
+ // we pay early to avoid needlessly blocking request later when concurrent requests occur and we are already close to the payment threshold.
+
+ if increasedExpectedDebtReduced.Cmp(threshold) >= 0 && currentBalance.Cmp(big.NewInt(0)) < 0 {
+ err = a.settle(peer, accountingPeer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to settle with peer %v: %w", peer, err)
+ }
+
+ increasedExpectedDebt, _, err = a.getIncreasedExpectedDebt(peer, accountingPeer, bigPrice)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ timeElapsedInSeconds := (a.timeNow().UnixMilli() - accountingPeer.refreshTimestampMilliseconds) / 1000
+ if timeElapsedInSeconds > 1 {
+ timeElapsedInSeconds = 1
+ }
+
+ refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate)
+ overdraftLimit := new(big.Int).Add(accountingPeer.paymentThreshold, refreshDue)
+
+ // if expectedDebt would still exceed the paymentThreshold at this point block this request
+ // this can happen if there is a large number of concurrent requests to the same peer
+ if increasedExpectedDebt.Cmp(overdraftLimit) > 0 {
+ return nil, ErrOverdraft
+ }
+
+ accountingPeer.reservedBalance = new(big.Int).Add(accountingPeer.reservedBalance, bigPrice)
+ return &creditAction{
+ accounting: a,
+ price: bigPrice,
+ peer: peer,
+ accountingPeer: accountingPeer,
+ originated: originated,
+ }, nil
+}
+
+func (c *creditAction) Apply() error {
+ loggerV2 := c.accounting.logger.V(2).Register()
+
+ c.accountingPeer.lock.Lock()
+ defer c.accountingPeer.lock.Unlock()
+
+ // debt if all reserved operations are successfully credited including debt created by surplus balance
+ increasedExpectedDebt, currentBalance, err := c.accounting.getIncreasedExpectedDebt(c.peer, c.accountingPeer, c.price)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ return fmt.Errorf("failed to load balance: %w", err)
+ }
+ }
+
+ // Calculate next balance by decreasing current balance with the price we credit
+ nextBalance := new(big.Int).Sub(currentBalance, c.price)
+
+ loggerV2.Debug("credit action apply", "crediting_peer_address", c.peer, "price", c.price, "new_balance", nextBalance)
+
+ err = c.accounting.store.Put(peerBalanceKey(c.peer), nextBalance)
+ if err != nil {
+ return fmt.Errorf("failed to persist balance: %w", err)
+ }
+
+ if c.price.Cmp(c.accountingPeer.reservedBalance) > 0 {
+ c.accounting.logger.Error(nil, "attempting to release more balance than was reserved for peer", "peer_address", c.peer)
+ c.accountingPeer.reservedBalance.SetUint64(0)
+ } else {
+ c.accountingPeer.reservedBalance.Sub(c.accountingPeer.reservedBalance, c.price)
+ }
+
+ c.applied = true
+
+ if !c.originated {
+ // debt if all reserved operations are successfully credited and all shadow reserved operations are debited including debt created by surplus balance
+ // in other words this the debt the other node sees if everything pending is successful
+ increasedExpectedDebtReduced := new(big.Int).Sub(increasedExpectedDebt, c.accountingPeer.shadowReservedBalance)
+ if increasedExpectedDebtReduced.Cmp(c.accountingPeer.earlyPayment) > 0 {
+ err = c.accounting.settle(c.peer, c.accountingPeer)
+ if err != nil {
+ c.accounting.logger.Error(err, "failed to settle with credited peer", "peer_address", c.peer)
+ }
+ }
+
+ return nil
+ }
+
+ originBalance, err := c.accounting.OriginatedBalance(c.peer)
+ if err != nil && !errors.Is(err, ErrPeerNoBalance) {
+ return fmt.Errorf("failed to load originated balance: %w", err)
+ }
+
+ // Calculate next balance by decreasing current balance with the price we credit
+ nextOriginBalance := new(big.Int).Sub(originBalance, c.price)
+
+ loggerV2.Debug("credit action apply", "crediting_peer_address", c.peer, "price", c.price, "new_originated_balance", nextOriginBalance)
+
+ zero := big.NewInt(0)
+ // only consider negative balance for limiting originated balance
+ if nextBalance.Cmp(zero) > 0 {
+ nextBalance.Set(zero)
+ }
+
+ // If originated balance is more into the negative domain, set it to balance
+ if nextOriginBalance.Cmp(nextBalance) < 0 {
+ nextOriginBalance.Set(nextBalance)
+ loggerV2.Debug("credit action apply; decreasing originated balance", "crediting_peer_address", c.peer, "current_balance", nextOriginBalance)
+ }
+
+ err = c.accounting.store.Put(originatedBalanceKey(c.peer), nextOriginBalance)
+ if err != nil {
+ return fmt.Errorf("failed to persist originated balance: %w", err)
+ }
+
+ // debt if all reserved operations are successfully credited and all shadow reserved operations are debited including debt created by surplus balance
+ // in other words this the debt the other node sees if everything pending is successful
+ increasedExpectedDebtReduced := new(big.Int).Sub(increasedExpectedDebt, c.accountingPeer.shadowReservedBalance)
+ if increasedExpectedDebtReduced.Cmp(c.accountingPeer.earlyPayment) > 0 {
+ err = c.accounting.settle(c.peer, c.accountingPeer)
+ if err != nil {
+ c.accounting.logger.Error(err, "failed to settle with credited peer", "peer_address", c.peer)
+ }
+ }
+
+ return nil
+}
+
+// NotifyPaymentSent is triggered by async monetary settlement to update our balance and remove it's price from the shadow reserve
+func (a *Accounting) NotifyPaymentSent(peer swarm.Address, amount *big.Int, receivedError error) {
+ loggerV2 := a.logger.V(2).Register()
+
+ defer a.wg.Done()
+ accountingPeer := a.getAccountingPeer(peer)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ accountingPeer.paymentOngoing = false
+ // decrease shadow reserve by payment value
+ accountingPeer.shadowReservedBalance.Sub(accountingPeer.shadowReservedBalance, amount)
+
+ if receivedError != nil {
+ accountingPeer.lastSettlementFailureTimestamp = a.timeNow().Unix()
+
+ a.logger.Warning("payment failure", "error", receivedError)
+ return
+ }
+
+ currentBalance, err := a.Balance(peer)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ a.logger.Error(err, "notify payment sent; failed to persist balance")
+ return
+ }
+ }
+
+ // Get nextBalance by increasing current balance with price
+ nextBalance := new(big.Int).Add(currentBalance, amount)
+
+ loggerV2.Debug("registering payment sent", "peer_address", peer, "amount", amount, "new_balance", nextBalance)
+
+ err = a.store.Put(peerBalanceKey(peer), nextBalance)
+ if err != nil {
+ a.logger.Error(err, "notify payment sent; failed to persist balance")
+ return
+ }
+
+ err = a.decreaseOriginatedBalanceBy(peer, amount)
+ if err != nil {
+ a.logger.Warning("notify payment sent; failed to decrease originated balance", "error", err)
+ }
+}
+
+// NotifyRefreshmentSent is called by pseudosettle when refreshment is done or failed
+func (a *Accounting) NotifyRefreshmentSent(peer swarm.Address, attemptedAmount, amount *big.Int, timestamp int64, allegedInterval int64, receivedError error) {
+ accountingPeer := a.getAccountingPeer(peer)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ // conclude ongoing refreshment
+ accountingPeer.refreshOngoing = false
+ // save timestamp received in milliseconds of when the refreshment completed locally
+ accountingPeer.refreshTimestampMilliseconds = timestamp
+
+ // if specific error is received increment metrics
+ if receivedError != nil {
+
+ // if refreshment failed with connected peer, blocklist
+ if !errors.Is(receivedError, p2p.ErrPeerNotFound) {
+ _ = a.blocklist(peer, 1, "failed to refresh")
+ }
+ a.logger.Error(receivedError, "notifyrefreshmentsent failed to refresh")
+ return
+ }
+
+ // enforce allowance
+ // calculate expectation decreased by any potential debt decreases occurred during the refreshment
+ checkAllowance := new(big.Int).Sub(attemptedAmount, accountingPeer.refreshReservedBalance)
+
+ // reset cumulative potential debt decrease during an ongoing refreshment as refreshment just completed
+ accountingPeer.refreshReservedBalance.Set(big.NewInt(0))
+
+ // dont expect higher amount accepted than attempted (sanity check)
+ if checkAllowance.Cmp(attemptedAmount) > 0 {
+ checkAllowance.Set(attemptedAmount)
+ }
+
+ // calculate time based allowance
+ expectedAllowance := new(big.Int).Mul(big.NewInt(allegedInterval), a.refreshRate)
+ // expect minimum of time based allowance and debt / attempted amount based expectation
+ if expectedAllowance.Cmp(checkAllowance) > 0 {
+ expectedAllowance = new(big.Int).Set(checkAllowance)
+ }
+
+ // compare received refreshment amount to expectation
+ if expectedAllowance.Cmp(amount) > 0 {
+ // if expectation is not met, blocklist peer
+ a.logger.Error(nil, "accepted lower payment than expected", "pseudosettle peer", peer)
+
+ _ = a.blocklist(peer, 1, "failed to meet expectation for allowance")
+ return
+ }
+
+ // update balance
+ currentBalance, err := a.Balance(peer)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ a.logger.Error(err, "notifyrefreshmentsent failed to get balance")
+ return
+ }
+ }
+
+ newBalance := new(big.Int).Add(currentBalance, amount)
+
+ err = a.store.Put(peerBalanceKey(peer), newBalance)
+ if err != nil {
+ a.logger.Error(err, "notifyrefreshmentsent failed to persist balance")
+ return
+ }
+
+ // update originated balance
+ err = a.decreaseOriginatedBalanceTo(peer, newBalance)
+ if err != nil {
+ a.logger.Warning("accounting: notifyrefreshmentsent failed to decrease originated balance", "error", err)
+ }
+
+}
+
+// Apply applies the debit operation and decreases the shadowReservedBalance
+func (d *debitAction) Apply() error {
+ d.accountingPeer.lock.Lock()
+ defer d.accountingPeer.lock.Unlock()
+
+ a := d.accounting
+
+ cost := new(big.Int).Set(d.price)
+
+ nextBalance, err := d.accounting.increaseBalance(d.peer, d.accountingPeer, cost)
+ if err != nil {
+ return err
+ }
+
+ d.applied = true
+ d.accountingPeer.shadowReservedBalance = new(big.Int).Sub(d.accountingPeer.shadowReservedBalance, d.price)
+
+ timeElapsedInSeconds := a.timeNow().Unix() - d.accountingPeer.refreshReceivedTimestamp
+ if timeElapsedInSeconds > 1 {
+ timeElapsedInSeconds = 1
+ }
+
+ // get appropriate refresh rate
+ refreshRate := new(big.Int).Set(a.refreshRate)
+ if !d.accountingPeer.fullNode {
+ refreshRate = new(big.Int).Set(a.lightRefreshRate)
+ }
+
+ refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), refreshRate)
+ disconnectLimit := new(big.Int).Add(d.accountingPeer.disconnectLimit, refreshDue)
+
+ if nextBalance.Cmp(disconnectLimit) >= 0 {
+
+ disconnectFor, err := a.blocklistUntil(d.peer, 1)
+ if err != nil {
+ disconnectFor = 10
+ }
+ return p2p.NewBlockPeerError(time.Duration(disconnectFor)*time.Second, ErrDisconnectThresholdExceeded)
+
+ }
+
+ return nil
+}
+
+func (a *Accounting) Disconnect(peer swarm.Address) {
+ accountingPeer := a.getAccountingPeer(peer)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ if accountingPeer.connected {
+ disconnectFor, err := a.blocklistUntil(peer, 1)
+ if err != nil {
+ disconnectFor = int64(10)
+ }
+ accountingPeer.connected = false
+ _ = a.p2p.Blocklist(peer, time.Duration(disconnectFor)*time.Second, "accounting disconnect")
+
+ }
+}
diff --git a/pkg/accounting/accounting_shared.go b/pkg/accounting/accounting_shared.go
new file mode 100644
index 00000000000..adf9e2fb9f6
--- /dev/null
+++ b/pkg/accounting/accounting_shared.go
@@ -0,0 +1,1106 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package accounting provides functionalities needed
+// to do per-peer accounting.
+package accounting
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "strings"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "accounting"
+
+const (
+ linearCheckpointNumber = 1800
+ linearCheckpointStep = 100
+)
+
+var (
+ _ Interface = (*Accounting)(nil)
+ balancesPrefix = "accounting_balance_"
+ balancesSurplusPrefix = "accounting_surplusbalance_"
+ balancesOriginatedPrefix = "accounting_originatedbalance_"
+ // fraction of the refresh rate that is the minimum for monetary settlement
+ // this value is chosen so that tiny payments are prevented while still allowing small payments in environments with lower payment thresholds
+ minimumPaymentDivisor = int64(5)
+ failedSettlementInterval = int64(10) // seconds
+)
+
+// Interface is the Accounting interface.
+type Interface interface {
+ // PrepareCredit action to prevent overspending in case of concurrent requests.
+ PrepareCredit(ctx context.Context, peer swarm.Address, price uint64, originated bool) (Action, error)
+ // PrepareDebit returns an accounting Action for the later debit to be executed on and to implement shadowing a possibly credited part of reserve on the other side.
+ PrepareDebit(ctx context.Context, peer swarm.Address, price uint64) (Action, error)
+ // Balance returns the current balance for the given peer.
+ Balance(peer swarm.Address) (*big.Int, error)
+ // SurplusBalance returns the current surplus balance for the given peer.
+ SurplusBalance(peer swarm.Address) (*big.Int, error)
+ // Balances returns balances for all known peers.
+ Balances() (map[string]*big.Int, error)
+ // CompensatedBalance returns the current balance deducted by current surplus balance for the given peer.
+ CompensatedBalance(peer swarm.Address) (*big.Int, error)
+ // CompensatedBalances returns the compensated balances for all known peers.
+ CompensatedBalances() (map[string]*big.Int, error)
+ // PeerAccounting returns the associated values for all known peers
+ PeerAccounting() (map[string]PeerInfo, error)
+}
+
+// Action represents an accounting action that can be applied
+type Action interface {
+ // Cleanup cleans up an action. Must be called whether it was applied or not.
+ Cleanup()
+ // Apply applies an action
+ Apply() error
+}
+
+// debitAction represents a future debit
+type debitAction struct {
+ accounting *Accounting
+ price *big.Int
+ peer swarm.Address
+ accountingPeer *accountingPeer
+ applied bool
+}
+
+// creditAction represents a future debit
+type creditAction struct {
+ accounting *Accounting
+ price *big.Int
+ peer swarm.Address
+ accountingPeer *accountingPeer
+ originated bool
+ applied bool
+}
+
+// PayFunc is the function used for async monetary settlement
+type PayFunc func(context.Context, swarm.Address, *big.Int)
+
+// RefreshFunc is the function used for sync time-based settlement
+type RefreshFunc func(context.Context, swarm.Address, *big.Int)
+
+// Mutex is a drop in replacement for the sync.Mutex
+// it will not lock if the context is expired
+type Mutex struct {
+ mu chan struct{}
+}
+
+func NewMutex() *Mutex {
+ return &Mutex{
+ mu: make(chan struct{}, 1), // unlocked by default
+ }
+}
+
+var ErrFailToLock = errors.New("failed to lock")
+
+func (m *Mutex) TryLock(ctx context.Context) error {
+ select {
+ case m.mu <- struct{}{}:
+ return nil // locked
+ case <-ctx.Done():
+ return fmt.Errorf("%w: %w", ctx.Err(), ErrFailToLock)
+ }
+}
+
+func (m *Mutex) Lock() {
+ m.mu <- struct{}{}
+}
+
+func (m *Mutex) Unlock() {
+ <-m.mu
+}
+
+// accountingPeer holds all in-memory accounting information for one peer.
+type accountingPeer struct {
+ lock *Mutex // lock to be held during any accounting action for this peer
+ reservedBalance *big.Int // amount currently reserved for active peer interaction
+ shadowReservedBalance *big.Int // amount potentially to be debited for active peer interaction
+ refreshReservedBalance *big.Int // amount debt potentially decreased during an ongoing refreshment
+ ghostBalance *big.Int // amount potentially could have been debited for but was not
+ paymentThreshold *big.Int // the threshold at which the peer expects us to pay
+ earlyPayment *big.Int // individual early payment threshold calculated from payment threshold and early payment percentage
+ paymentThresholdForPeer *big.Int // individual payment threshold at which the peer is expected to pay
+ disconnectLimit *big.Int // individual disconnect threshold calculated from tolerance and payment threshold for peer
+ refreshTimestampMilliseconds int64 // last time we attempted and succeeded time-based settlement
+ refreshReceivedTimestamp int64 // last time we accepted time-based settlement
+ paymentOngoing bool // indicate if we are currently settling with the peer
+ refreshOngoing bool // indicates if we are currently refreshing with the peer
+ lastSettlementFailureTimestamp int64 // time of last unsuccessful attempt to issue a cheque
+ connected bool // indicates whether the peer is currently connected
+ fullNode bool // the peer connected as full node or light node
+ totalDebtRepay *big.Int // since being connected, amount of cumulative debt settled by the peer
+ thresholdGrowAt *big.Int // cumulative debt to be settled by the peer in order to give threshold upgrade
+}
+
+var (
+ // ErrOverdraft denotes the expected debt in Reserve would exceed the payment thresholds.
+ ErrOverdraft = errors.New("attempted overdraft")
+ // ErrDisconnectThresholdExceeded denotes a peer has exceeded the disconnect threshold.
+ ErrDisconnectThresholdExceeded = errors.New("disconnect threshold exceeded")
+ // ErrPeerNoBalance is the error returned if no balance in store exists for a peer
+ ErrPeerNoBalance = errors.New("no balance for peer")
+ // ErrInvalidValue denotes an invalid value read from store
+ ErrInvalidValue = errors.New("invalid value")
+ // ErrOverRelease
+ ErrOverRelease = errors.New("attempting to release more balance than was reserved for peer")
+ // ErrEnforceRefresh
+ ErrEnforceRefresh = errors.New("allowance expectation refused")
+)
+
+func (a *Accounting) getIncreasedExpectedDebt(peer swarm.Address, accountingPeer *accountingPeer, bigPrice *big.Int) (*big.Int, *big.Int, error) {
+ nextReserved := new(big.Int).Add(accountingPeer.reservedBalance, bigPrice)
+
+ currentBalance, err := a.Balance(peer)
+ if err != nil && !errors.Is(err, ErrPeerNoBalance) {
+ return nil, nil, fmt.Errorf("failed to load balance: %w", err)
+ }
+ currentDebt := new(big.Int).Neg(currentBalance)
+ if currentDebt.Cmp(big.NewInt(0)) < 0 {
+ currentDebt.SetInt64(0)
+ }
+
+ // debt if all reserved operations are successfully credited excluding debt created by surplus balance
+ expectedDebt := new(big.Int).Add(currentDebt, nextReserved)
+
+ // additionalDebt is debt created by incoming payments which we don't consider debt for monetary settlement purposes
+ additionalDebt, err := a.SurplusBalance(peer)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to load surplus balance: %w", err)
+ }
+
+ // debt if all reserved operations are successfully credited including debt created by surplus balance
+ return new(big.Int).Add(expectedDebt, additionalDebt), currentBalance, nil
+}
+
+func (c *creditAction) Cleanup() {
+ if c.applied {
+ return
+ }
+
+ c.accountingPeer.lock.Lock()
+ defer c.accountingPeer.lock.Unlock()
+
+ if c.price.Cmp(c.accountingPeer.reservedBalance) > 0 {
+ c.accounting.logger.Error(nil, "attempting to release more balance than was reserved for peer", "peer_address", c.peer)
+ c.accountingPeer.reservedBalance.SetUint64(0)
+ } else {
+ c.accountingPeer.reservedBalance.Sub(c.accountingPeer.reservedBalance, c.price)
+ }
+}
+
+// Settle all debt with a peer. The lock on the accountingPeer must be held when
+// called.
+func (a *Accounting) settle(peer swarm.Address, balance *accountingPeer) error {
+ now := a.timeNow()
+ timeElapsedInMilliseconds := now.UnixMilli() - balance.refreshTimestampMilliseconds
+
+ // get debt towards peer decreased by any amount that is to be debited soon
+ paymentAmount, err := a.shadowBalance(peer, balance)
+ if err != nil {
+ return err
+ }
+ // Don't do anything if there is not enough actual debt
+ // This might be the case if the peer owes us and the total reserve for a peer exceeds the payment threshold.
+ // Minimum amount to trigger settlement for is 1 * refresh rate to avoid ineffective use of refreshments
+ if paymentAmount.Cmp(a.refreshRate) >= 0 {
+ // Only trigger refreshment if last refreshment finished at least 1000 milliseconds ago
+ // This is to avoid a peer refusing refreshment because not enough time passed since last refreshment
+ if timeElapsedInMilliseconds > 999 {
+ if !balance.refreshOngoing {
+ balance.refreshOngoing = true
+ go a.refreshFunction(context.Background(), peer, paymentAmount)
+ }
+ }
+
+ if a.payFunction != nil && !balance.paymentOngoing {
+ // if a settlement failed recently, wait until failedSettlementInterval before trying again
+ differenceInSeconds := now.Unix() - balance.lastSettlementFailureTimestamp
+ if differenceInSeconds > failedSettlementInterval {
+ // if there is no monetary settlement happening, check if there is something to settle
+ // compute debt excluding debt created by incoming payments
+ originatedBalance, err := a.OriginatedBalance(peer)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ return fmt.Errorf("failed to load originated balance to settle: %w", err)
+ }
+ }
+
+ paymentAmount := new(big.Int).Neg(originatedBalance)
+
+ if paymentAmount.Cmp(a.minimumPayment) >= 0 {
+ timeElapsedInSeconds := (a.timeNow().UnixMilli() - balance.refreshTimestampMilliseconds) / 1000
+ refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate)
+ currentBalance, err := a.Balance(peer)
+ if err != nil && !errors.Is(err, ErrPeerNoBalance) {
+ return fmt.Errorf("failed to load balance: %w", err)
+ }
+
+ debt := new(big.Int).Neg(currentBalance)
+ decreasedDebt := new(big.Int).Sub(debt, refreshDue)
+ expectedDecreasedDebt := new(big.Int).Sub(decreasedDebt, balance.shadowReservedBalance)
+
+ if paymentAmount.Cmp(expectedDecreasedDebt) > 0 {
+ paymentAmount.Set(expectedDecreasedDebt)
+ }
+
+ // if the remaining debt is still larger than some minimum amount, trigger monetary settlement
+ if paymentAmount.Cmp(a.minimumPayment) >= 0 {
+ balance.paymentOngoing = true
+ // add settled amount to shadow reserve before sending it
+ balance.shadowReservedBalance.Add(balance.shadowReservedBalance, paymentAmount)
+ // if a refreshment is ongoing, add this amount sent to cumulative potential debt decrease during refreshment
+ if balance.refreshOngoing {
+ balance.refreshReservedBalance = new(big.Int).Add(balance.refreshReservedBalance, paymentAmount)
+ }
+ a.wg.Add(1)
+ go a.payFunction(context.Background(), peer, paymentAmount)
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Balance returns the current balance for the given peer.
+func (a *Accounting) Balance(peer swarm.Address) (balance *big.Int, err error) {
+ err = a.store.Get(peerBalanceKey(peer), &balance)
+
+ if err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ return big.NewInt(0), ErrPeerNoBalance
+ }
+ return nil, err
+ }
+
+ return balance, nil
+}
+
+// OriginatedBalance returns the current balance for the given peer.
+func (a *Accounting) OriginatedBalance(peer swarm.Address) (balance *big.Int, err error) {
+ err = a.store.Get(originatedBalanceKey(peer), &balance)
+
+ if err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ return big.NewInt(0), ErrPeerNoBalance
+ }
+ return nil, err
+ }
+
+ return balance, nil
+}
+
+// SurplusBalance returns the current balance for the given peer.
+func (a *Accounting) SurplusBalance(peer swarm.Address) (balance *big.Int, err error) {
+ err = a.store.Get(peerSurplusBalanceKey(peer), &balance)
+
+ if err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ return big.NewInt(0), nil
+ }
+ return nil, err
+ }
+
+ if balance.Cmp(big.NewInt(0)) < 0 {
+ return nil, ErrInvalidValue
+ }
+
+ return balance, nil
+}
+
+// CompensatedBalance returns balance decreased by surplus balance
+func (a *Accounting) CompensatedBalance(peer swarm.Address) (compensated *big.Int, err error) {
+ surplus, err := a.SurplusBalance(peer)
+ if err != nil {
+ return nil, err
+ }
+
+ balance, err := a.Balance(peer)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ return nil, err
+ }
+ }
+
+ // if surplus is 0 and peer has no balance, propagate ErrPeerNoBalance
+ if surplus.Cmp(big.NewInt(0)) == 0 && errors.Is(err, ErrPeerNoBalance) {
+ return nil, err
+ }
+ // Compensated balance is balance decreased by surplus balance
+ compensated = new(big.Int).Sub(balance, surplus)
+
+ return compensated, nil
+}
+
+// peerBalanceKey returns the balance storage key for the given peer.
+func peerBalanceKey(peer swarm.Address) string {
+ return fmt.Sprintf("%s%s", balancesPrefix, peer.String())
+}
+
+// peerSurplusBalanceKey returns the surplus balance storage key for the given peer
+func peerSurplusBalanceKey(peer swarm.Address) string {
+ return fmt.Sprintf("%s%s", balancesSurplusPrefix, peer.String())
+}
+
+func originatedBalanceKey(peer swarm.Address) string {
+ return fmt.Sprintf("%s%s", balancesOriginatedPrefix, peer.String())
+}
+
+// getAccountingPeer returns the accountingPeer for a given swarm address.
+// If not found in memory it will initialize it.
+func (a *Accounting) getAccountingPeer(peer swarm.Address) *accountingPeer {
+ a.accountingPeersMu.Lock()
+ defer a.accountingPeersMu.Unlock()
+
+ peerData, ok := a.accountingPeers[peer.String()]
+ if !ok {
+ peerData = &accountingPeer{
+ lock: NewMutex(),
+ reservedBalance: big.NewInt(0),
+ refreshReservedBalance: big.NewInt(0),
+ shadowReservedBalance: big.NewInt(0),
+ ghostBalance: big.NewInt(0),
+ totalDebtRepay: big.NewInt(0),
+ paymentThreshold: new(big.Int).Set(a.paymentThreshold),
+ paymentThresholdForPeer: new(big.Int).Set(a.paymentThreshold),
+ disconnectLimit: new(big.Int).Set(a.disconnectLimit),
+ thresholdGrowAt: new(big.Int).Set(a.thresholdGrowStep),
+ // initially assume the peer has the same threshold as us
+ earlyPayment: percentOf(100-a.earlyPayment, a.paymentThreshold),
+ connected: false,
+ }
+ a.accountingPeers[peer.String()] = peerData
+ }
+
+ return peerData
+}
+
+// notifyPaymentThresholdUpgrade is used when cumulative debt settled by peer reaches current checkpoint,
+// to set the next checkpoint and increase the payment threshold given by 1 * refreshment rate
+// must be called under accountingPeer lock
+func (a *Accounting) notifyPaymentThresholdUpgrade(peer swarm.Address, accountingPeer *accountingPeer) {
+
+ // get appropriate linear growth limit based on whether the peer is a full node or a light node
+ thresholdGrowChange := new(big.Int).Set(a.thresholdGrowChange)
+ if !accountingPeer.fullNode {
+ thresholdGrowChange.Set(a.lightThresholdGrowChange)
+ }
+
+ // if current checkpoint already passed linear growth limit, set next checkpoint exponentially
+ if accountingPeer.thresholdGrowAt.Cmp(thresholdGrowChange) >= 0 {
+ accountingPeer.thresholdGrowAt = new(big.Int).Mul(accountingPeer.thresholdGrowAt, big.NewInt(2))
+ } else {
+ // otherwise set next linear checkpoint
+ if accountingPeer.fullNode {
+ accountingPeer.thresholdGrowAt = new(big.Int).Add(accountingPeer.thresholdGrowAt, a.thresholdGrowStep)
+ } else {
+ accountingPeer.thresholdGrowAt = new(big.Int).Add(accountingPeer.thresholdGrowAt, a.lightThresholdGrowStep)
+ }
+ }
+
+ // get appropriate refresh rate
+ refreshRate := new(big.Int).Set(a.refreshRate)
+ if !accountingPeer.fullNode {
+ refreshRate = new(big.Int).Set(a.lightRefreshRate)
+ }
+
+ // increase given threshold by refresh rate
+ accountingPeer.paymentThresholdForPeer = new(big.Int).Add(accountingPeer.paymentThresholdForPeer, refreshRate)
+ // recalculate disconnectLimit for peer
+ accountingPeer.disconnectLimit = percentOf(100+a.paymentTolerance, accountingPeer.paymentThresholdForPeer)
+
+ // announce new payment threshold to peer
+ err := a.pricing.AnnouncePaymentThreshold(context.Background(), peer, accountingPeer.paymentThresholdForPeer)
+ if err != nil {
+ a.logger.Error(err, "announcing increased payment threshold", "value", accountingPeer.paymentThresholdForPeer, "peer_address", peer)
+ }
+}
+
+// Balances gets balances for all peers from store.
+func (a *Accounting) Balances() (map[string]*big.Int, error) {
+ s := make(map[string]*big.Int)
+
+ err := a.store.Iterate(balancesPrefix, func(key, val []byte) (stop bool, err error) {
+ addr, err := balanceKeyPeer(key)
+ if err != nil {
+ return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
+ }
+
+ if _, ok := s[addr.String()]; !ok {
+ var storevalue *big.Int
+ err = a.store.Get(peerBalanceKey(addr), &storevalue)
+ if err != nil {
+ return false, fmt.Errorf("get peer %s balance: %w", addr.String(), err)
+ }
+
+ s[addr.String()] = storevalue
+ }
+
+ return false, nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+type PeerInfo struct {
+ Balance *big.Int
+ ConsumedBalance *big.Int
+ ThresholdReceived *big.Int
+ ThresholdGiven *big.Int
+ CurrentThresholdReceived *big.Int
+ CurrentThresholdGiven *big.Int
+ SurplusBalance *big.Int
+ ReservedBalance *big.Int
+ ShadowReservedBalance *big.Int
+ GhostBalance *big.Int
+}
+
+func (a *Accounting) PeerAccounting() (map[string]PeerInfo, error) {
+ s := make(map[string]PeerInfo)
+
+ a.accountingPeersMu.Lock()
+ accountingPeersList := make(map[string]*accountingPeer)
+ for peer, accountingPeer := range a.accountingPeers {
+ accountingPeersList[peer] = accountingPeer
+ }
+ a.accountingPeersMu.Unlock()
+
+ for peer, accountingPeer := range accountingPeersList {
+
+ peerAddress := swarm.MustParseHexAddress(peer)
+
+ balance, err := a.Balance(peerAddress)
+ if errors.Is(err, ErrPeerNoBalance) {
+ balance = big.NewInt(0)
+ } else if err != nil {
+ return nil, err
+ }
+
+ surplusBalance, err := a.SurplusBalance(peerAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ accountingPeer.lock.Lock()
+
+ t := a.timeNow()
+
+ timeElapsedInSeconds := t.Unix() - accountingPeer.refreshReceivedTimestamp
+ if timeElapsedInSeconds > 1 {
+ timeElapsedInSeconds = 1
+ }
+
+ // get appropriate refresh rate
+ refreshRate := new(big.Int).Set(a.refreshRate)
+ if !accountingPeer.fullNode {
+ refreshRate = new(big.Int).Set(a.lightRefreshRate)
+ }
+
+ refreshDue := new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), refreshRate)
+ currentThresholdGiven := new(big.Int).Add(accountingPeer.disconnectLimit, refreshDue)
+
+ timeElapsedInSeconds = (t.UnixMilli() - accountingPeer.refreshTimestampMilliseconds) / 1000
+ if timeElapsedInSeconds > 1 {
+ timeElapsedInSeconds = 1
+ }
+
+ // get appropriate refresh rate
+ refreshDue = new(big.Int).Mul(big.NewInt(timeElapsedInSeconds), a.refreshRate)
+ currentThresholdReceived := new(big.Int).Add(accountingPeer.paymentThreshold, refreshDue)
+
+ s[peer] = PeerInfo{
+ Balance: new(big.Int).Sub(balance, surplusBalance),
+ ConsumedBalance: new(big.Int).Set(balance),
+ ThresholdReceived: new(big.Int).Set(accountingPeer.paymentThreshold),
+ CurrentThresholdReceived: currentThresholdReceived,
+ CurrentThresholdGiven: currentThresholdGiven,
+ ThresholdGiven: new(big.Int).Set(accountingPeer.paymentThresholdForPeer),
+ SurplusBalance: new(big.Int).Set(surplusBalance),
+ ReservedBalance: new(big.Int).Set(accountingPeer.reservedBalance),
+ ShadowReservedBalance: new(big.Int).Set(accountingPeer.shadowReservedBalance),
+ GhostBalance: new(big.Int).Set(accountingPeer.ghostBalance),
+ }
+ accountingPeer.lock.Unlock()
+ }
+
+ return s, nil
+}
+
+// CompensatedBalances gets balances for all peers from store.
+func (a *Accounting) CompensatedBalances() (map[string]*big.Int, error) {
+ s := make(map[string]*big.Int)
+
+ err := a.store.Iterate(balancesPrefix, func(key, val []byte) (stop bool, err error) {
+ addr, err := balanceKeyPeer(key)
+ if err != nil {
+ return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
+ }
+ if _, ok := s[addr.String()]; !ok {
+ value, err := a.CompensatedBalance(addr)
+ if err != nil {
+ return false, fmt.Errorf("get peer %s balance: %w", addr.String(), err)
+ }
+
+ s[addr.String()] = value
+ }
+
+ return false, nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ err = a.store.Iterate(balancesSurplusPrefix, func(key, val []byte) (stop bool, err error) {
+ addr, err := surplusBalanceKeyPeer(key)
+ if err != nil {
+ return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
+ }
+ if _, ok := s[addr.String()]; !ok {
+ value, err := a.CompensatedBalance(addr)
+ if err != nil {
+ return false, fmt.Errorf("get peer %s balance: %w", addr.String(), err)
+ }
+
+ s[addr.String()] = value
+ }
+
+ return false, nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// balanceKeyPeer returns the embedded peer from the balance storage key.
+func balanceKeyPeer(key []byte) (swarm.Address, error) {
+ k := string(key)
+
+ split := strings.SplitAfter(k, balancesPrefix)
+ if len(split) != 2 {
+ return swarm.ZeroAddress, errors.New("no peer in key")
+ }
+
+ addr, err := swarm.ParseHexAddress(split[1])
+ if err != nil {
+ return swarm.ZeroAddress, err
+ }
+
+ return addr, nil
+}
+
+func surplusBalanceKeyPeer(key []byte) (swarm.Address, error) {
+ k := string(key)
+
+ split := strings.SplitAfter(k, balancesSurplusPrefix)
+ if len(split) != 2 {
+ return swarm.ZeroAddress, errors.New("no peer in key")
+ }
+
+ addr, err := swarm.ParseHexAddress(split[1])
+ if err != nil {
+ return swarm.ZeroAddress, err
+ }
+
+ return addr, nil
+}
+
+// PeerDebt returns the positive part of the sum of the outstanding balance and the shadow reserve
+func (a *Accounting) PeerDebt(peer swarm.Address) (*big.Int, error) {
+ accountingPeer := a.getAccountingPeer(peer)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ balance := new(big.Int)
+ zero := big.NewInt(0)
+
+ err := a.store.Get(peerBalanceKey(peer), &balance)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, err
+ }
+ balance = big.NewInt(0)
+ }
+
+ peerDebt := new(big.Int).Add(balance, accountingPeer.shadowReservedBalance)
+
+ if peerDebt.Cmp(zero) < 0 {
+ return zero, nil
+ }
+
+ return peerDebt, nil
+}
+
+// peerLatentDebt returns the sum of the positive part of the outstanding balance, shadow reserve and the ghost balance
+func (a *Accounting) peerLatentDebt(peer swarm.Address) (*big.Int, error) {
+
+ accountingPeer := a.getAccountingPeer(peer)
+
+ balance := new(big.Int)
+ zero := big.NewInt(0)
+
+ err := a.store.Get(peerBalanceKey(peer), &balance)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, err
+ }
+ balance = big.NewInt(0)
+ }
+
+ if balance.Cmp(zero) < 0 {
+ balance.Set(zero)
+ }
+
+ peerDebt := new(big.Int).Add(balance, accountingPeer.shadowReservedBalance)
+ peerLatentDebt := new(big.Int).Add(peerDebt, accountingPeer.ghostBalance)
+
+ if peerLatentDebt.Cmp(zero) < 0 {
+ return zero, nil
+ }
+
+ return peerLatentDebt, nil
+}
+
+// shadowBalance returns the current debt reduced by any potentially debitable amount stored in shadowReservedBalance
+// this represents how much less our debt could potentially be seen by the other party if it's ahead with processing credits corresponding to our shadow reserve
+func (a *Accounting) shadowBalance(peer swarm.Address, accountingPeer *accountingPeer) (shadowBalance *big.Int, err error) {
+ balance := new(big.Int)
+ zero := big.NewInt(0)
+
+ err = a.store.Get(peerBalanceKey(peer), &balance)
+ if err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ return zero, nil
+ }
+ return nil, err
+ }
+
+ if balance.Cmp(zero) >= 0 {
+ return zero, nil
+ }
+
+ negativeBalance := new(big.Int).Neg(balance)
+
+ surplusBalance, err := a.SurplusBalance(peer)
+ if err != nil {
+ return nil, err
+ }
+
+ debt := new(big.Int).Add(negativeBalance, surplusBalance)
+
+ if debt.Cmp(accountingPeer.shadowReservedBalance) < 0 {
+ return zero, nil
+ }
+
+ shadowBalance = new(big.Int).Sub(negativeBalance, accountingPeer.shadowReservedBalance)
+
+ return shadowBalance, nil
+}
+
+// NotifyPaymentThreshold should be called to notify accounting of changes in the payment threshold
+func (a *Accounting) NotifyPaymentThreshold(peer swarm.Address, paymentThreshold *big.Int) error {
+ accountingPeer := a.getAccountingPeer(peer)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ accountingPeer.paymentThreshold.Set(paymentThreshold)
+ accountingPeer.earlyPayment.Set(percentOf(100-a.earlyPayment, paymentThreshold))
+ return nil
+}
+
+// NotifyPaymentReceived is called by Settlement when we receive a payment.
+func (a *Accounting) NotifyPaymentReceived(peer swarm.Address, amount *big.Int) error {
+ loggerV2 := a.logger.V(2).Register()
+
+ accountingPeer := a.getAccountingPeer(peer)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ accountingPeer.totalDebtRepay = new(big.Int).Add(accountingPeer.totalDebtRepay, amount)
+
+ if accountingPeer.totalDebtRepay.Cmp(accountingPeer.thresholdGrowAt) > 0 {
+ a.notifyPaymentThresholdUpgrade(peer, accountingPeer)
+ }
+
+ currentBalance, err := a.Balance(peer)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ return err
+ }
+ }
+
+ // if balance is already negative or zero, we credit full amount received to surplus balance and terminate early
+ if currentBalance.Cmp(big.NewInt(0)) <= 0 {
+ surplus, err := a.SurplusBalance(peer)
+ if err != nil {
+ return fmt.Errorf("failed to get surplus balance: %w", err)
+ }
+ increasedSurplus := new(big.Int).Add(surplus, amount)
+
+ loggerV2.Debug("surplus crediting peer", "peer_address", peer, "amount", amount, "new_balance", increasedSurplus)
+
+ err = a.store.Put(peerSurplusBalanceKey(peer), increasedSurplus)
+ if err != nil {
+ return fmt.Errorf("failed to persist surplus balance: %w", err)
+ }
+
+ return nil
+ }
+
+ // if current balance is positive, let's make a partial credit to
+ newBalance := new(big.Int).Sub(currentBalance, amount)
+
+ // Don't allow a payment to put us into debt
+ // This is to prevent another node tricking us into settling by settling
+ // first (e.g. send a bouncing cheque to trigger an honest cheque in swap).
+ nextBalance := newBalance
+ if newBalance.Cmp(big.NewInt(0)) < 0 {
+ nextBalance = big.NewInt(0)
+ }
+
+ loggerV2.Debug("crediting peer", "peer_address", peer, "amount", amount, "new_balance", nextBalance)
+
+ err = a.store.Put(peerBalanceKey(peer), nextBalance)
+ if err != nil {
+ return fmt.Errorf("failed to persist balance: %w", err)
+ }
+
+ // If payment would have put us into debt, rather, let's add to surplusBalance,
+ // so as that an oversettlement attempt creates balance for future forwarding services
+ // charges to be deducted of
+ if newBalance.Cmp(big.NewInt(0)) < 0 {
+ surplusGrowth := new(big.Int).Sub(amount, currentBalance)
+
+ surplus, err := a.SurplusBalance(peer)
+ if err != nil {
+ return fmt.Errorf("failed to get surplus balance: %w", err)
+ }
+ increasedSurplus := new(big.Int).Add(surplus, surplusGrowth)
+
+ loggerV2.Debug("surplus crediting peer due to refreshment", "peer_address", peer, "amount", surplusGrowth, "new_balance", increasedSurplus)
+
+ err = a.store.Put(peerSurplusBalanceKey(peer), increasedSurplus)
+ if err != nil {
+ return fmt.Errorf("failed to persist surplus balance: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// NotifyRefreshmentReceived is called by pseudosettle when we receive a time based settlement.
+func (a *Accounting) NotifyRefreshmentReceived(peer swarm.Address, amount *big.Int, timestamp int64) error {
+ loggerV2 := a.logger.V(2).Register()
+
+ accountingPeer := a.getAccountingPeer(peer)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ accountingPeer.totalDebtRepay = new(big.Int).Add(accountingPeer.totalDebtRepay, amount)
+
+ if accountingPeer.totalDebtRepay.Cmp(accountingPeer.thresholdGrowAt) > 0 {
+ a.notifyPaymentThresholdUpgrade(peer, accountingPeer)
+ }
+
+ currentBalance, err := a.Balance(peer)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ return err
+ }
+ }
+
+ // Get nextBalance by increasing current balance with amount
+ nextBalance := new(big.Int).Sub(currentBalance, amount)
+
+ // We allow a refreshment to potentially put us into debt as it was previously negotiated and be limited to the peer's outstanding debt plus shadow reserve
+ loggerV2.Debug("crediting peer", "peer_address", peer, "amount", amount, "new_balance", nextBalance)
+ err = a.store.Put(peerBalanceKey(peer), nextBalance)
+ if err != nil {
+ return fmt.Errorf("failed to persist balance: %w", err)
+ }
+
+ accountingPeer.refreshReceivedTimestamp = timestamp
+
+ return nil
+}
+
+// PrepareDebit prepares a debit operation by increasing the shadowReservedBalance
+func (a *Accounting) PrepareDebit(ctx context.Context, peer swarm.Address, price uint64) (Action, error) {
+ loggerV2 := a.logger.V(2).Register()
+
+ accountingPeer := a.getAccountingPeer(peer)
+
+ if err := accountingPeer.lock.TryLock(ctx); err != nil {
+ loggerV2.Debug("prepare debit; failed to acquire lock", "error", err)
+ return nil, err
+ }
+
+ defer accountingPeer.lock.Unlock()
+
+ if !accountingPeer.connected {
+ return nil, errors.New("connection not initialized yet")
+ }
+
+ bigPrice := new(big.Int).SetUint64(price)
+
+ accountingPeer.shadowReservedBalance = new(big.Int).Add(accountingPeer.shadowReservedBalance, bigPrice)
+ // if a refreshment is ongoing, add this amount to the potential debt decrease during an ongoing refreshment
+ if accountingPeer.refreshOngoing {
+ accountingPeer.refreshReservedBalance = new(big.Int).Add(accountingPeer.refreshReservedBalance, bigPrice)
+ }
+
+ return &debitAction{
+ accounting: a,
+ price: bigPrice,
+ peer: peer,
+ accountingPeer: accountingPeer,
+ applied: false,
+ }, nil
+}
+
+func (a *Accounting) increaseBalance(peer swarm.Address, _ *accountingPeer, price *big.Int) (*big.Int, error) {
+ loggerV2 := a.logger.V(2).Register()
+
+ cost := new(big.Int).Set(price)
+ // see if peer has surplus balance to deduct this transaction of
+
+ surplusBalance, err := a.SurplusBalance(peer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get surplus balance: %w", err)
+ }
+
+ if surplusBalance.Cmp(big.NewInt(0)) > 0 {
+ // get new surplus balance after deduct
+ newSurplusBalance := new(big.Int).Sub(surplusBalance, cost)
+
+ // if nothing left for debiting, store new surplus balance and return from debit
+ if newSurplusBalance.Cmp(big.NewInt(0)) >= 0 {
+ loggerV2.Debug("surplus debiting peer", "peer_address", peer, "price", price, "new_balance", newSurplusBalance)
+
+ err = a.store.Put(peerSurplusBalanceKey(peer), newSurplusBalance)
+ if err != nil {
+ return nil, fmt.Errorf("failed to persist surplus balance: %w", err)
+ }
+
+ return a.Balance(peer)
+ }
+
+ // if surplus balance didn't cover full transaction, let's continue with leftover part as cost
+ debitIncrease := new(big.Int).Sub(price, surplusBalance)
+
+ // a sanity check
+ if debitIncrease.Cmp(big.NewInt(0)) <= 0 {
+ return nil, errors.New("sanity check failed for partial debit after surplus balance drawn")
+ }
+ cost.Set(debitIncrease)
+
+ // if we still have something to debit, than have run out of surplus balance,
+ // let's store 0 as surplus balance
+ loggerV2.Debug("surplus debiting peer", "peer_address", peer, "amount", debitIncrease, "new_balance", 0)
+
+ err = a.store.Put(peerSurplusBalanceKey(peer), big.NewInt(0))
+ if err != nil {
+ return nil, fmt.Errorf("failed to persist surplus balance: %w", err)
+ }
+ }
+
+ currentBalance, err := a.Balance(peer)
+ if err != nil {
+ if !errors.Is(err, ErrPeerNoBalance) {
+ return nil, fmt.Errorf("failed to load balance: %w", err)
+ }
+ }
+
+ // Get nextBalance by increasing current balance with price
+ nextBalance := new(big.Int).Add(currentBalance, cost)
+
+ loggerV2.Debug("debiting peer", "peer_address", peer, "price", price, "new_balance", nextBalance)
+
+ err = a.store.Put(peerBalanceKey(peer), nextBalance)
+ if err != nil {
+ return nil, fmt.Errorf("failed to persist balance: %w", err)
+ }
+
+ err = a.decreaseOriginatedBalanceTo(peer, nextBalance)
+ if err != nil {
+ a.logger.Warning("increase balance; failed to decrease originated balance", "error", err)
+ }
+
+ return nextBalance, nil
+}
+
+// Cleanup reduces shadow reserve if and only if debitaction have not been applied
+func (d *debitAction) Cleanup() {
+ if d.applied {
+ return
+ }
+
+ d.accountingPeer.lock.Lock()
+ defer d.accountingPeer.lock.Unlock()
+
+ a := d.accounting
+ d.accountingPeer.shadowReservedBalance = new(big.Int).Sub(d.accountingPeer.shadowReservedBalance, d.price)
+ d.accountingPeer.ghostBalance = new(big.Int).Add(d.accountingPeer.ghostBalance, d.price)
+ if d.accountingPeer.ghostBalance.Cmp(d.accountingPeer.disconnectLimit) > 0 {
+ _ = a.blocklist(d.peer, 1, "ghost overdraw")
+ }
+}
+
+func (a *Accounting) blocklistUntil(peer swarm.Address, multiplier int64) (int64, error) {
+
+ debt, err := a.peerLatentDebt(peer)
+ if err != nil {
+ return 0, err
+ }
+
+ if debt.Cmp(a.refreshRate) < 0 {
+ debt.Set(a.refreshRate)
+ }
+
+ additionalDebt := new(big.Int).Add(debt, a.paymentThreshold)
+
+ multiplyDebt := new(big.Int).Mul(additionalDebt, big.NewInt(multiplier))
+
+ k := new(big.Int).Div(multiplyDebt, a.refreshRate)
+
+ kInt := k.Int64()
+
+ return kInt, nil
+}
+
+func (a *Accounting) blocklist(peer swarm.Address, multiplier int64, reason string) error {
+ disconnectFor, err := a.blocklistUntil(peer, multiplier)
+ if err != nil {
+ return a.p2p.Blocklist(peer, 1*time.Minute, reason)
+ }
+
+ return a.p2p.Blocklist(peer, time.Duration(disconnectFor)*time.Second, reason)
+}
+
+func (a *Accounting) Connect(peer swarm.Address, fullNode bool) {
+ accountingPeer := a.getAccountingPeer(peer)
+ zero := big.NewInt(0)
+
+ accountingPeer.lock.Lock()
+ defer accountingPeer.lock.Unlock()
+
+ paymentThreshold := new(big.Int).Set(a.paymentThreshold)
+ thresholdGrowStep := new(big.Int).Set(a.thresholdGrowStep)
+ disconnectLimit := new(big.Int).Set(a.disconnectLimit)
+
+ if !fullNode {
+ paymentThreshold.Set(a.lightPaymentThreshold)
+ thresholdGrowStep.Set(a.lightThresholdGrowStep)
+ disconnectLimit.Set(a.lightDisconnectLimit)
+ }
+
+ accountingPeer.connected = true
+ accountingPeer.fullNode = fullNode
+ accountingPeer.shadowReservedBalance.Set(zero)
+ accountingPeer.ghostBalance.Set(zero)
+ accountingPeer.reservedBalance.Set(zero)
+ accountingPeer.refreshReservedBalance.Set(zero)
+ accountingPeer.paymentThresholdForPeer.Set(paymentThreshold)
+ accountingPeer.thresholdGrowAt.Set(thresholdGrowStep)
+ accountingPeer.disconnectLimit.Set(disconnectLimit)
+
+ err := a.store.Put(peerBalanceKey(peer), zero)
+ if err != nil {
+ a.logger.Error(err, "failed to persist balance")
+ }
+
+ err = a.store.Put(peerSurplusBalanceKey(peer), zero)
+ if err != nil {
+ a.logger.Error(err, "failed to persist surplus balance")
+ }
+}
+
+// decreaseOriginatedBalanceTo decreases the originated balance to provided limit or 0 if limit is positive
+func (a *Accounting) decreaseOriginatedBalanceTo(peer swarm.Address, limit *big.Int) error {
+ loggerV2 := a.logger.V(2).Register()
+
+ zero := big.NewInt(0)
+
+ toSet := new(big.Int).Set(limit)
+
+ originatedBalance, err := a.OriginatedBalance(peer)
+ if err != nil && !errors.Is(err, ErrPeerNoBalance) {
+ return fmt.Errorf("failed to load originated balance: %w", err)
+ }
+
+ if toSet.Cmp(zero) > 0 {
+ toSet.Set(zero)
+ }
+
+ // If originated balance is more into the negative domain, set it to limit
+ if originatedBalance.Cmp(toSet) < 0 {
+ err = a.store.Put(originatedBalanceKey(peer), toSet)
+ if err != nil {
+ return fmt.Errorf("failed to persist originated balance: %w", err)
+ }
+ loggerV2.Debug("decreasing originated balance of peer", "peer_address", peer, "new_balance", toSet)
+ }
+
+ return nil
+}
+
+// decreaseOriginatedBalanceBy decreases the originated balance by provided amount even below 0
+func (a *Accounting) decreaseOriginatedBalanceBy(peer swarm.Address, amount *big.Int) error {
+ loggerV2 := a.logger.V(2).Register()
+
+ originatedBalance, err := a.OriginatedBalance(peer)
+ if err != nil && !errors.Is(err, ErrPeerNoBalance) {
+ return fmt.Errorf("failed to load balance: %w", err)
+ }
+
+ // Move originated balance into the positive domain by amount
+ newOriginatedBalance := new(big.Int).Add(originatedBalance, amount)
+
+ err = a.store.Put(originatedBalanceKey(peer), newOriginatedBalance)
+ if err != nil {
+ return fmt.Errorf("failed to persist originated balance: %w", err)
+ }
+ loggerV2.Debug("decreasing originated balance of peer", "peer_address", peer, "amount", amount, "new_balance", newOriginatedBalance)
+
+ return nil
+}
+
+func (a *Accounting) SetRefreshFunc(f RefreshFunc) {
+ a.refreshFunction = f
+}
+
+func (a *Accounting) SetPayFunc(f PayFunc) {
+ a.payFunction = f
+}
+
+// Close hangs up running websockets on shutdown.
+func (a *Accounting) Close() error {
+ a.wg.Wait()
+ return nil
+}
+
+func percentOf(percent int64, of *big.Int) *big.Int {
+ return new(big.Int).Div(new(big.Int).Mul(of, big.NewInt(percent)), big.NewInt(100))
+}
diff --git a/pkg/accounting/metrics.go b/pkg/accounting/metrics.go
index e38bdbbd989..ef4a2fe06b4 100644
--- a/pkg/accounting/metrics.go
+++ b/pkg/accounting/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/accounting.go b/pkg/api/accounting.go
index fce1056bbb7..12ec60240f0 100644
--- a/pkg/api/accounting.go
+++ b/pkg/api/accounting.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/api.go b/pkg/api/api.go
index 4fd5390a203..03536504cd6 100644
--- a/pkg/api/api.go
+++ b/pkg/api/api.go
@@ -1,21 +1,13 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package api provides the functionality of the Bee
-// client-facing HTTP API.
package api
import (
- "context"
"crypto/ecdsa"
"encoding/base64"
"encoding/hex"
"errors"
- "fmt"
- "io"
- "math"
- "math/big"
"mime"
"net/http"
"reflect"
@@ -23,18 +15,13 @@ import (
"strings"
"sync"
"time"
- "unicode/utf8"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/v2/pkg/accesscontrol"
"github.com/ethersphere/bee/v2/pkg/accounting"
"github.com/ethersphere/bee/v2/pkg/crypto"
"github.com/ethersphere/bee/v2/pkg/feeds"
- "github.com/ethersphere/bee/v2/pkg/file/pipeline"
- "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder"
- "github.com/ethersphere/bee/v2/pkg/file/redundancy"
"github.com/ethersphere/bee/v2/pkg/gsoc"
- "github.com/ethersphere/bee/v2/pkg/jsonhttp"
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/p2p"
"github.com/ethersphere/bee/v2/pkg/pingpong"
@@ -43,7 +30,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/pss"
"github.com/ethersphere/bee/v2/pkg/resolver"
"github.com/ethersphere/bee/v2/pkg/resolver/client/ens"
- "github.com/ethersphere/bee/v2/pkg/sctx"
"github.com/ethersphere/bee/v2/pkg/settlement"
"github.com/ethersphere/bee/v2/pkg/settlement/swap"
"github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook"
@@ -53,7 +39,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storageincentives"
"github.com/ethersphere/bee/v2/pkg/storageincentives/staking"
- "github.com/ethersphere/bee/v2/pkg/storer"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/topology"
"github.com/ethersphere/bee/v2/pkg/topology/lightnode"
@@ -61,95 +46,10 @@ import (
"github.com/ethersphere/bee/v2/pkg/transaction"
"github.com/go-playground/validator/v10"
"github.com/gorilla/mux"
- "github.com/hashicorp/go-multierror"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sync/semaphore"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "api"
-
-const (
- SwarmPinHeader = "Swarm-Pin"
- SwarmTagHeader = "Swarm-Tag"
- SwarmEncryptHeader = "Swarm-Encrypt"
- SwarmIndexDocumentHeader = "Swarm-Index-Document"
- SwarmErrorDocumentHeader = "Swarm-Error-Document"
- SwarmSocSignatureHeader = "Swarm-Soc-Signature"
- SwarmFeedIndexHeader = "Swarm-Feed-Index"
- SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next"
- SwarmLegacyFeedResolve = "Swarm-Feed-Legacy-Resolve"
- SwarmOnlyRootChunk = "Swarm-Only-Root-Chunk"
- SwarmCollectionHeader = "Swarm-Collection"
- SwarmPostageBatchIdHeader = "Swarm-Postage-Batch-Id"
- SwarmPostageStampHeader = "Swarm-Postage-Stamp"
- SwarmDeferredUploadHeader = "Swarm-Deferred-Upload"
- SwarmRedundancyLevelHeader = "Swarm-Redundancy-Level"
- SwarmRedundancyStrategyHeader = "Swarm-Redundancy-Strategy"
- SwarmRedundancyFallbackModeHeader = "Swarm-Redundancy-Fallback-Mode"
- SwarmChunkRetrievalTimeoutHeader = "Swarm-Chunk-Retrieval-Timeout"
- SwarmLookAheadBufferSizeHeader = "Swarm-Lookahead-Buffer-Size"
- SwarmActHeader = "Swarm-Act"
- SwarmActTimestampHeader = "Swarm-Act-Timestamp"
- SwarmActPublisherHeader = "Swarm-Act-Publisher"
- SwarmActHistoryAddressHeader = "Swarm-Act-History-Address"
-
- ImmutableHeader = "Immutable"
- GasPriceHeader = "Gas-Price"
- GasLimitHeader = "Gas-Limit"
- ETagHeader = "ETag"
-
- AuthorizationHeader = "Authorization"
- AcceptEncodingHeader = "Accept-Encoding"
- ContentTypeHeader = "Content-Type"
- ContentDispositionHeader = "Content-Disposition"
- ContentLengthHeader = "Content-Length"
- RangeHeader = "Range"
- OriginHeader = "Origin"
- AccessControlExposeHeaders = "Access-Control-Expose-Headers"
-)
-
-const (
- multiPartFormData = "multipart/form-data"
- contentTypeTar = "application/x-tar"
- boolHeaderSetValue = "true"
-)
-
-var (
- errInvalidNameOrAddress = errors.New("invalid name or bzz address")
- errNoResolver = errors.New("no resolver connected")
- errInvalidRequest = errors.New("could not validate request")
- errInvalidContentType = errors.New("invalid content-type")
- errDirectoryStore = errors.New("could not store directory")
- errFileStore = errors.New("could not store file")
- errInvalidPostageBatch = errors.New("invalid postage batch id")
- errBatchUnusable = errors.New("batch not usable")
- errUnsupportedDevNodeOperation = errors.New("operation not supported in dev mode")
- errOperationSupportedOnlyInFullMode = errors.New("operation is supported only in full mode")
- errActDownload = errors.New("act download failed")
- errActUpload = errors.New("act upload failed")
- errActGranteeList = errors.New("failed to create or update grantee list")
-
- batchIdOrStampSig = fmt.Sprintf("Either '%s' or '%s' header must be set in the request", SwarmPostageStampHeader, SwarmPostageBatchIdHeader)
-)
-
-// Storer interface provides the functionality required from the local storage
-// component of the node.
-type Storer interface {
- storer.UploadStore
- storer.PinStore
- storer.CacheStore
- storer.NetStore
- storer.LocalStore
- storer.RadiusChecker
- storer.Debugger
- storer.NeighborhoodStats
-}
-
-type PinIntegrity interface {
- Check(ctx context.Context, logger log.Logger, pin string, out chan storer.PinStat)
-}
-
type Service struct {
storer Storer
resolver resolver.Interface
@@ -224,51 +124,58 @@ type Service struct {
isWarmingUp bool
}
-func (s *Service) SetP2P(p2p p2p.DebugService) {
- if s != nil {
- s.p2p = p2p
- }
-}
+// Configure will create a and initialize a new API service.
+func (s *Service) Configure(signer crypto.Signer, tracer *tracing.Tracer, o Options, e ExtraOptions, chainID int64, erc20 erc20.Service) {
+ s.signer = signer
+ s.Options = o
+ s.tracer = tracer
+ s.metrics = newMetrics()
-func (s *Service) SetSwarmAddress(addr *swarm.Address) {
- if s != nil {
- s.overlay = addr
- }
-}
+ s.quit = make(chan struct{})
-func (s *Service) SetRedistributionAgent(redistributionAgent *storageincentives.Agent) {
- if s != nil {
- s.redistributionAgent = redistributionAgent
- }
-}
+ s.storer = e.Storer
+ s.resolver = e.Resolver
+ s.pss = e.Pss
+ s.gsoc = e.Gsoc
+ s.feedFactory = e.FeedFactory
+ s.post = e.Post
+ s.accesscontrol = e.AccessControl
+ s.postageContract = e.PostageContract
+ s.steward = e.Steward
+ s.stakingContract = e.Staking
-type Options struct {
- CORSAllowedOrigins []string
- WsPingPeriod time.Duration
-}
+ s.pingpong = e.Pingpong
+ s.topologyDriver = e.TopologyDriver
+ s.accounting = e.Accounting
+ s.chequebook = e.Chequebook
+ s.swap = e.Swap
+ s.lightNodes = e.LightNodes
+ s.pseudosettle = e.Pseudosettle
+ s.blockTime = e.BlockTime
+
+ s.statusSem = semaphore.NewWeighted(1)
+ s.postageSem = semaphore.NewWeighted(1)
+ s.stakingSem = semaphore.NewWeighted(1)
+ s.cashOutChequeSem = semaphore.NewWeighted(1)
+
+ s.chainID = chainID
+ s.erc20Service = erc20
+ s.syncStatus = e.SyncStatus
-type ExtraOptions struct {
- Pingpong pingpong.Interface
- TopologyDriver topology.Driver
- LightNodes *lightnode.Container
- Accounting accounting.Interface
- Pseudosettle settlement.Interface
- Swap swap.Interface
- Chequebook chequebook.Service
- BlockTime time.Duration
- Storer Storer
- Resolver resolver.Interface
- Pss pss.Interface
- Gsoc gsoc.Listener
- FeedFactory feeds.Factory
- Post postage.Service
- AccessControl accesscontrol.Controller
- PostageContract postagecontract.Interface
- Staking staking.Contract
- Steward steward.Interface
- SyncStatus func() (bool, error)
- NodeStatus *status.Service
- PinIntegrity PinIntegrity
+ s.statusService = e.NodeStatus
+
+ s.preMapHooks["resolve"] = func(v string) (string, error) {
+ switch addr, err := s.resolveNameOrAddress(v); {
+ case err == nil:
+ return addr.String(), nil
+ case errors.Is(err, ens.ErrNotImplemented):
+ return v, nil
+ default:
+ return "", err
+ }
+ }
+
+ s.pinIntegrity = e.PinIntegrity
}
func New(
@@ -331,151 +238,6 @@ func New(
return s
}
-// Configure will create a and initialize a new API service.
-func (s *Service) Configure(signer crypto.Signer, tracer *tracing.Tracer, o Options, e ExtraOptions, chainID int64, erc20 erc20.Service) {
- s.signer = signer
- s.Options = o
- s.tracer = tracer
- s.metrics = newMetrics()
-
- s.quit = make(chan struct{})
-
- s.storer = e.Storer
- s.resolver = e.Resolver
- s.pss = e.Pss
- s.gsoc = e.Gsoc
- s.feedFactory = e.FeedFactory
- s.post = e.Post
- s.accesscontrol = e.AccessControl
- s.postageContract = e.PostageContract
- s.steward = e.Steward
- s.stakingContract = e.Staking
-
- s.pingpong = e.Pingpong
- s.topologyDriver = e.TopologyDriver
- s.accounting = e.Accounting
- s.chequebook = e.Chequebook
- s.swap = e.Swap
- s.lightNodes = e.LightNodes
- s.pseudosettle = e.Pseudosettle
- s.blockTime = e.BlockTime
-
- s.statusSem = semaphore.NewWeighted(1)
- s.postageSem = semaphore.NewWeighted(1)
- s.stakingSem = semaphore.NewWeighted(1)
- s.cashOutChequeSem = semaphore.NewWeighted(1)
-
- s.chainID = chainID
- s.erc20Service = erc20
- s.syncStatus = e.SyncStatus
-
- s.statusService = e.NodeStatus
-
- s.preMapHooks["resolve"] = func(v string) (string, error) {
- switch addr, err := s.resolveNameOrAddress(v); {
- case err == nil:
- return addr.String(), nil
- case errors.Is(err, ens.ErrNotImplemented):
- return v, nil
- default:
- return "", err
- }
- }
-
- s.pinIntegrity = e.PinIntegrity
-}
-
-func (s *Service) SetProbe(probe *Probe) {
- s.probe = probe
-}
-
-func (s *Service) SetIsWarmingUp(v bool) {
- s.isWarmingUp = v
-}
-
-// Close hangs up running websockets on shutdown.
-func (s *Service) Close() error {
- s.logger.Info("api shutting down")
- close(s.quit)
-
- done := make(chan struct{})
- go func() {
- defer close(done)
- s.wsWg.Wait()
- }()
-
- select {
- case <-done:
- case <-time.After(1 * time.Second):
- return errors.New("api shutting down with open websockets")
- }
-
- return nil
-}
-
-// getOrCreateSessionID attempts to get the session if an tag id is supplied, and returns an error
-// if it does not exist. If no id is supplied, it will attempt to create a new session and return it.
-func (s *Service) getOrCreateSessionID(tagUid uint64) (uint64, error) {
- var (
- tag storer.SessionInfo
- err error
- )
- // if tag ID is not supplied, create a new tag
- if tagUid == 0 {
- tag, err = s.storer.NewSession()
- } else {
- tag, err = s.storer.Session(tagUid)
- }
- return tag.TagID, err
-}
-
-func (s *Service) resolveNameOrAddress(str string) (swarm.Address, error) {
- // Try and mapStructure the name as a bzz address.
- addr, err := swarm.ParseHexAddress(str)
- if err == nil {
- s.loggerV1.Debug("resolve name: parsing bzz address successful", "string", str, "address", addr)
- return addr, nil
- }
-
- // If no resolver is not available, return an error.
- if s.resolver == nil {
- return swarm.ZeroAddress, errNoResolver
- }
-
- // Try and resolve the name using the provided resolver.
- s.logger.Debug("resolve name: attempting to resolve string to address", "string", str)
- addr, err = s.resolver.Resolve(str)
- if err == nil {
- s.loggerV1.Debug("resolve name: address resolved successfully", "string", str, "address", addr)
- return addr, nil
- }
-
- return swarm.ZeroAddress, fmt.Errorf("%w: %w", errInvalidNameOrAddress, err)
-}
-
-func (s *Service) newTracingHandler(spanName string) func(h http.Handler) http.Handler {
- return func(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ctx, err := s.tracer.WithContextFromHTTPHeaders(r.Context(), r.Header)
- if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
- s.logger.Debug("extract tracing context failed", "span_name", spanName, "error", err)
- // ignore
- }
-
- span, _, ctx := s.tracer.StartSpanFromContext(ctx, spanName, s.logger)
- defer span.Finish()
-
- err = s.tracer.AddContextHTTPHeader(ctx, r.Header)
- if err != nil {
- s.logger.Debug("inject tracing context failed", "span_name", spanName, "error", err)
- // ignore
- }
-
- h.ServeHTTP(w, r.WithContext(ctx))
- })
- }
-}
-
func (s *Service) contentLengthMetricMiddleware() func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -548,353 +310,3 @@ func (s *Service) observeUploadSpeed(w http.ResponseWriter, r *http.Request, sta
speed := float64(r.ContentLength) / time.Since(start).Seconds()
s.metrics.UploadSpeed.WithLabelValues(endpoint, mode).Observe(speed)
}
-
-// gasConfigMiddleware can be used by the APIs that allow block chain transactions to set
-// gas price and gas limit through the HTTP API headers.
-func (s *Service) gasConfigMiddleware(handlerName string) func(h http.Handler) http.Handler {
- return func(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- logger := s.logger.WithName(handlerName).Build()
-
- headers := struct {
- GasPrice *big.Int `map:"Gas-Price"`
- GasLimit uint64 `map:"Gas-Limit"`
- }{}
- if response := s.mapStructure(r.Header, &headers); response != nil {
- response("invalid header params", logger, w)
- return
- }
- ctx := r.Context()
- ctx = sctx.SetGasPrice(ctx, headers.GasPrice)
- ctx = sctx.SetGasLimit(ctx, headers.GasLimit)
-
- h.ServeHTTP(w, r.WithContext(ctx))
- })
- }
-}
-
-// corsHandler sets CORS headers to HTTP response if allowed origins are configured.
-func (s *Service) corsHandler(h http.Handler) http.Handler {
- allowedHeaders := []string{
- "User-Agent", "Accept", "X-Requested-With", "Access-Control-Request-Headers", "Access-Control-Request-Method", "Accept-Ranges", "Content-Encoding",
- AuthorizationHeader, AcceptEncodingHeader, ContentTypeHeader, ContentDispositionHeader, RangeHeader, OriginHeader,
- SwarmTagHeader, SwarmPinHeader, SwarmEncryptHeader, SwarmIndexDocumentHeader, SwarmErrorDocumentHeader, SwarmCollectionHeader,
- SwarmPostageBatchIdHeader, SwarmPostageStampHeader, SwarmDeferredUploadHeader, SwarmRedundancyLevelHeader,
- SwarmRedundancyStrategyHeader, SwarmRedundancyFallbackModeHeader, SwarmChunkRetrievalTimeoutHeader, SwarmLookAheadBufferSizeHeader,
- SwarmFeedIndexHeader, SwarmFeedIndexNextHeader, SwarmSocSignatureHeader, SwarmOnlyRootChunk, GasPriceHeader, GasLimitHeader, ImmutableHeader,
- SwarmActHeader, SwarmActTimestampHeader, SwarmActPublisherHeader, SwarmActHistoryAddressHeader,
- }
- allowedHeadersStr := strings.Join(allowedHeaders, ", ")
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if o := r.Header.Get(OriginHeader); o != "" && s.checkOrigin(r) {
- w.Header().Set("Access-Control-Allow-Credentials", "true")
- w.Header().Set("Access-Control-Allow-Origin", o)
- w.Header().Set("Access-Control-Allow-Headers", allowedHeadersStr)
- w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS, POST, PUT, DELETE")
- w.Header().Set("Access-Control-Max-Age", "3600")
- }
- h.ServeHTTP(w, r)
- })
-}
-
-// checkOrigin returns true if the origin is not set or is equal to the request host.
-func (s *Service) checkOrigin(r *http.Request) bool {
- origin := r.Header[OriginHeader]
- if len(origin) == 0 {
- return true
- }
- scheme := "http"
- if r.TLS != nil {
- scheme = "https"
- }
- hosts := append(s.CORSAllowedOrigins, scheme+"://"+r.Host)
- for _, v := range hosts {
- if equalASCIIFold(origin[0], v) || v == "*" {
- return true
- }
- }
-
- return false
-}
-
-// validationError is a custom error type for validation errors.
-type validationError struct {
- Entry string
- Value interface{}
- Cause error
-}
-
-// Error implements the error interface.
-func (e *validationError) Error() string {
- return fmt.Sprintf("`%s=%v`: %v", e.Entry, e.Value, e.Cause)
-}
-
-// mapStructure maps the input into output struct and validates the output.
-// It's a helper method for the handlers, which reduces the chattiness
-// of the code.
-func (s *Service) mapStructure(input, output interface{}) func(string, log.Logger, http.ResponseWriter) {
- // response unifies the response format for parsing and validation errors.
- response := func(err error) func(string, log.Logger, http.ResponseWriter) {
- return func(msg string, logger log.Logger, w http.ResponseWriter) {
- var merr *multierror.Error
- if !errors.As(err, &merr) {
- logger.Debug("mapping and validation failed", "error", err)
- logger.Error(err, "mapping and validation failed")
- jsonhttp.InternalServerError(w, err)
- return
- }
-
- logger.Debug(msg, "error", err)
- logger.Error(err, msg)
-
- resp := jsonhttp.StatusResponse{
- Message: msg,
- Code: http.StatusBadRequest,
- }
- for _, err := range merr.Errors {
- var perr *parseError
- if errors.As(err, &perr) {
- resp.Reasons = append(resp.Reasons, jsonhttp.Reason{
- Field: perr.Entry,
- Error: perr.Cause.Error(),
- })
- }
- var verr *validationError
- if errors.As(err, &verr) {
- resp.Reasons = append(resp.Reasons, jsonhttp.Reason{
- Field: verr.Entry,
- Error: verr.Cause.Error(),
- })
- }
- }
- jsonhttp.BadRequest(w, resp)
- }
- }
-
- if err := mapStructure(input, output, s.preMapHooks); err != nil {
- return response(err)
- }
-
- if err := s.validate.Struct(output); err != nil {
- var errs validator.ValidationErrors
- if !errors.As(err, &errs) {
- return response(err)
- }
-
- vErrs := &multierror.Error{ErrorFormat: flattenErrorsFormat}
- for _, err := range errs {
- val := err.Value()
- switch v := err.Value().(type) {
- case []byte:
- val = string(v)
- }
- vErrs = multierror.Append(vErrs,
- &validationError{
- Entry: strings.ToLower(err.Field()),
- Value: val,
- Cause: fmt.Errorf("want %s:%s", err.Tag(), err.Param()),
- })
- }
- return response(vErrs.ErrorOrNil())
- }
-
- return nil
-}
-
-// equalASCIIFold returns true if s is equal to t with ASCII case folding as
-// defined in RFC 4790.
-func equalASCIIFold(s, t string) bool {
- for s != "" && t != "" {
- sr, size := utf8.DecodeRuneInString(s)
- s = s[size:]
- tr, size := utf8.DecodeRuneInString(t)
- t = t[size:]
- if sr == tr {
- continue
- }
- if 'A' <= sr && sr <= 'Z' {
- sr = sr + 'a' - 'A'
- }
- if 'A' <= tr && tr <= 'Z' {
- tr = tr + 'a' - 'A'
- }
- if sr != tr {
- return false
- }
- }
- return s == t
-}
-
-type putterOptions struct {
- BatchID []byte
- TagID uint64
- Deferred bool
- Pin bool
-}
-
-type putterSessionWrapper struct {
- storer.PutterSession
- stamper postage.Stamper
- save func() error
-}
-
-func (p *putterSessionWrapper) Put(ctx context.Context, chunk swarm.Chunk) error {
- idAddress, err := storage.IdentityAddress(chunk)
- if err != nil {
- return err
- }
-
- stamp, err := p.stamper.Stamp(chunk.Address(), idAddress)
- if err != nil {
- return err
- }
- return p.PutterSession.Put(ctx, chunk.WithStamp(stamp))
-}
-
-func (p *putterSessionWrapper) Done(ref swarm.Address) error {
- return errors.Join(p.PutterSession.Done(ref), p.save())
-}
-
-func (p *putterSessionWrapper) Cleanup() error {
- return errors.Join(p.PutterSession.Cleanup(), p.save())
-}
-
-func (s *Service) getStamper(batchID []byte) (postage.Stamper, func() error, error) {
- exists, err := s.batchStore.Exists(batchID)
- if err != nil {
- return nil, nil, fmt.Errorf("batch exists: %w", err)
- }
-
- issuer, save, err := s.post.GetStampIssuer(batchID)
- if err != nil {
- return nil, nil, fmt.Errorf("stamp issuer: %w", err)
- }
-
- if usable := exists && s.post.IssuerUsable(issuer); !usable {
- return nil, nil, errBatchUnusable
- }
-
- return postage.NewStamper(s.stamperStore, issuer, s.signer), save, nil
-}
-
-func (s *Service) newStamperPutter(ctx context.Context, opts putterOptions) (storer.PutterSession, error) {
- if !opts.Deferred && s.beeMode == DevMode {
- return nil, errUnsupportedDevNodeOperation
- }
-
- stamper, save, err := s.getStamper(opts.BatchID)
- if err != nil {
- return nil, fmt.Errorf("get stamper: %w", err)
- }
-
- var session storer.PutterSession
- if opts.Deferred || opts.Pin {
- session, err = s.storer.Upload(ctx, opts.Pin, opts.TagID)
- } else {
- session = s.storer.DirectUpload()
- }
-
- if err != nil {
- return nil, fmt.Errorf("failed creating session: %w", err)
- }
-
- return &putterSessionWrapper{
- PutterSession: session,
- stamper: stamper,
- save: save,
- }, nil
-}
-
-func (s *Service) newStampedPutter(ctx context.Context, opts putterOptions, stamp *postage.Stamp) (storer.PutterSession, error) {
- if !opts.Deferred && s.beeMode == DevMode {
- return nil, errUnsupportedDevNodeOperation
- }
-
- storedBatch, err := s.batchStore.Get(stamp.BatchID())
- if err != nil {
- return nil, errInvalidPostageBatch
- }
-
- var session storer.PutterSession
- if opts.Deferred || opts.Pin {
- session, err = s.storer.Upload(ctx, opts.Pin, opts.TagID)
- if err != nil {
- return nil, fmt.Errorf("failed creating session: %w", err)
- }
- } else {
- session = s.storer.DirectUpload()
- }
-
- stamper := postage.NewPresignedStamper(stamp, storedBatch.Owner)
-
- return &putterSessionWrapper{
- PutterSession: session,
- stamper: stamper,
- save: func() error { return nil },
- }, nil
-}
-
-type pipelineFunc func(context.Context, io.Reader) (swarm.Address, error)
-
-func requestPipelineFn(s storage.Putter, encrypt bool, rLevel redundancy.Level) pipelineFunc {
- return func(ctx context.Context, r io.Reader) (swarm.Address, error) {
- pipe := builder.NewPipelineBuilder(ctx, s, encrypt, rLevel)
- return builder.FeedPipeline(ctx, pipe, r)
- }
-}
-
-func requestPipelineFactory(ctx context.Context, s storage.Putter, encrypt bool, rLevel redundancy.Level) func() pipeline.Interface {
- return func() pipeline.Interface {
- return builder.NewPipelineBuilder(ctx, s, encrypt, rLevel)
- }
-}
-
-type cleanupOnErrWriter struct {
- http.ResponseWriter
- logger log.Logger
- onErr func() error
-}
-
-func (r *cleanupOnErrWriter) WriteHeader(statusCode int) {
- // if there is an error status returned, cleanup.
- if statusCode >= http.StatusBadRequest {
- err := r.onErr()
- if err != nil {
- r.logger.Debug("failed cleaning up", "err", err)
- }
- }
- r.ResponseWriter.WriteHeader(statusCode)
-}
-
-// CalculateNumberOfChunks calculates the number of chunks in an arbitrary
-// content length.
-func CalculateNumberOfChunks(contentLength int64, isEncrypted bool) int64 {
- if contentLength <= swarm.ChunkSize {
- return 1
- }
- branchingFactor := swarm.Branches
- if isEncrypted {
- branchingFactor = swarm.EncryptedBranches
- }
-
- dataChunks := math.Ceil(float64(contentLength) / float64(swarm.ChunkSize))
- totalChunks := dataChunks
- intermediate := dataChunks / float64(branchingFactor)
-
- for intermediate > 1 {
- totalChunks += math.Ceil(intermediate)
- intermediate = intermediate / float64(branchingFactor)
- }
-
- return int64(totalChunks) + 1
-}
-
-// defaultUploadMethod returns true for deferred when the deferred header is not present.
-func defaultUploadMethod(deferred *bool) bool {
- if deferred == nil {
- return true
- }
-
- return *deferred
-}
diff --git a/pkg/api/api_js.go b/pkg/api/api_js.go
new file mode 100644
index 00000000000..070ed788b89
--- /dev/null
+++ b/pkg/api/api_js.go
@@ -0,0 +1,236 @@
+//go:build js
+// +build js
+
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package api
+
+import (
+ "crypto/ecdsa"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "mime"
+ "net/http"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethersphere/bee/v2/pkg/accesscontrol"
+ "github.com/ethersphere/bee/v2/pkg/accounting"
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/feeds"
+ "github.com/ethersphere/bee/v2/pkg/gsoc"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/pingpong"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
+ "github.com/ethersphere/bee/v2/pkg/pss"
+ "github.com/ethersphere/bee/v2/pkg/resolver"
+ "github.com/ethersphere/bee/v2/pkg/resolver/client/ens"
+ "github.com/ethersphere/bee/v2/pkg/settlement"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/erc20"
+ "github.com/ethersphere/bee/v2/pkg/status"
+ "github.com/ethersphere/bee/v2/pkg/steward"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives/staking"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/topology/lightnode"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/ethersphere/bee/v2/pkg/transaction"
+ "github.com/go-playground/validator/v10"
+ "github.com/gorilla/mux"
+ "golang.org/x/sync/semaphore"
+)
+
+type Service struct {
+ storer Storer
+ resolver resolver.Interface
+ pss pss.Interface
+ gsoc gsoc.Listener
+ steward steward.Interface
+ logger log.Logger
+ loggerV1 log.Logger
+ tracer *tracing.Tracer
+ feedFactory feeds.Factory
+ signer crypto.Signer
+ post postage.Service
+ accesscontrol accesscontrol.Controller
+ postageContract postagecontract.Interface
+ probe *Probe
+ stakingContract staking.Contract
+ Options
+
+ http.Handler
+ router *mux.Router
+
+ wsWg sync.WaitGroup // wait for all websockets to close on exit
+ quit chan struct{}
+
+ overlay *swarm.Address
+ publicKey ecdsa.PublicKey
+ pssPublicKey ecdsa.PublicKey
+ ethereumAddress common.Address
+ chequebookEnabled bool
+ swapEnabled bool
+ fullAPIEnabled bool
+
+ topologyDriver topology.Driver
+ p2p p2p.DebugService
+ accounting accounting.Interface
+ chequebook chequebook.Service
+ pseudosettle settlement.Interface
+ pingpong pingpong.Interface
+
+ batchStore postage.Storer
+ stamperStore storage.Store
+ pinIntegrity PinIntegrity
+
+ syncStatus func() (bool, error)
+
+ swap swap.Interface
+ transaction transaction.Service
+ lightNodes *lightnode.Container
+ blockTime time.Duration
+
+ statusSem *semaphore.Weighted
+ postageSem *semaphore.Weighted
+ stakingSem *semaphore.Weighted
+ cashOutChequeSem *semaphore.Weighted
+ beeMode BeeNodeMode
+
+ chainBackend transaction.Backend
+ erc20Service erc20.Service
+ chainID int64
+
+ whitelistedWithdrawalAddress []common.Address
+
+ preMapHooks map[string]func(v string) (string, error)
+ validate *validator.Validate
+
+ redistributionAgent *storageincentives.Agent
+
+ statusService *status.Service
+ isWarmingUp bool
+}
+
+// Configure will create a and initialize a new API service.
+func (s *Service) Configure(signer crypto.Signer, tracer *tracing.Tracer, o Options, e ExtraOptions, chainID int64, erc20 erc20.Service) {
+ s.signer = signer
+ s.Options = o
+ s.tracer = tracer
+
+ s.quit = make(chan struct{})
+
+ s.storer = e.Storer
+ s.resolver = e.Resolver
+ s.pss = e.Pss
+ s.gsoc = e.Gsoc
+ s.feedFactory = e.FeedFactory
+ s.post = e.Post
+ s.accesscontrol = e.AccessControl
+ s.postageContract = e.PostageContract
+ s.steward = e.Steward
+ s.stakingContract = e.Staking
+
+ s.pingpong = e.Pingpong
+ s.topologyDriver = e.TopologyDriver
+ s.accounting = e.Accounting
+ s.chequebook = e.Chequebook
+ s.swap = e.Swap
+ s.lightNodes = e.LightNodes
+ s.pseudosettle = e.Pseudosettle
+ s.blockTime = e.BlockTime
+
+ s.statusSem = semaphore.NewWeighted(1)
+ s.postageSem = semaphore.NewWeighted(1)
+ s.stakingSem = semaphore.NewWeighted(1)
+ s.cashOutChequeSem = semaphore.NewWeighted(1)
+
+ s.chainID = chainID
+ s.erc20Service = erc20
+ s.syncStatus = e.SyncStatus
+
+ s.statusService = e.NodeStatus
+
+ s.preMapHooks["resolve"] = func(v string) (string, error) {
+ switch addr, err := s.resolveNameOrAddress(v); {
+ case err == nil:
+ return addr.String(), nil
+ case errors.Is(err, ens.ErrNotImplemented):
+ return v, nil
+ default:
+ return "", err
+ }
+ }
+
+ s.pinIntegrity = e.PinIntegrity
+}
+
+func New(
+ publicKey, pssPublicKey ecdsa.PublicKey,
+ ethereumAddress common.Address,
+ whitelistedWithdrawalAddress []string,
+ logger log.Logger,
+ transaction transaction.Service,
+ batchStore postage.Storer,
+ beeMode BeeNodeMode,
+ chequebookEnabled bool,
+ swapEnabled bool,
+ chainBackend transaction.Backend,
+ cors []string,
+ stamperStore storage.Store,
+) *Service {
+ s := new(Service)
+
+ s.CORSAllowedOrigins = cors
+ s.beeMode = beeMode
+ s.logger = logger.WithName(loggerName).Register()
+ s.loggerV1 = s.logger.V(1).Register()
+ s.chequebookEnabled = chequebookEnabled
+ s.swapEnabled = swapEnabled
+ s.publicKey = publicKey
+ s.pssPublicKey = pssPublicKey
+ s.ethereumAddress = ethereumAddress
+ s.transaction = transaction
+ s.batchStore = batchStore
+ s.chainBackend = chainBackend
+ s.preMapHooks = map[string]func(v string) (string, error){
+ "mimeMediaType": func(v string) (string, error) {
+ typ, _, err := mime.ParseMediaType(v)
+ return typ, err
+ },
+ "decBase64url": func(v string) (string, error) {
+ buf, err := base64.URLEncoding.DecodeString(v)
+ return string(buf), err
+ },
+ "decHex": func(v string) (string, error) {
+ buf, err := hex.DecodeString(v)
+ return string(buf), err
+ },
+ }
+ s.validate = validator.New()
+ s.validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
+ name := strings.SplitN(fld.Tag.Get(mapStructureTagName), ",", 2)[0]
+ if name == "-" {
+ return ""
+ }
+ return name
+ })
+ s.stamperStore = stamperStore
+
+ for _, v := range whitelistedWithdrawalAddress {
+ s.whitelistedWithdrawalAddress = append(s.whitelistedWithdrawalAddress, common.HexToAddress(v))
+ }
+
+ return s
+}
diff --git a/pkg/api/api_shared.go b/pkg/api/api_shared.go
new file mode 100644
index 00000000000..d107ec388c8
--- /dev/null
+++ b/pkg/api/api_shared.go
@@ -0,0 +1,624 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package api provides the functionality of the Bee
+// client-facing HTTP API.
+package api
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "net/http"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/ethersphere/bee/v2/pkg/accesscontrol"
+ "github.com/ethersphere/bee/v2/pkg/accounting"
+ "github.com/ethersphere/bee/v2/pkg/feeds"
+ "github.com/ethersphere/bee/v2/pkg/file/pipeline"
+ "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder"
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy"
+ "github.com/ethersphere/bee/v2/pkg/gsoc"
+ "github.com/ethersphere/bee/v2/pkg/jsonhttp"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/pingpong"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
+ "github.com/ethersphere/bee/v2/pkg/pss"
+ "github.com/ethersphere/bee/v2/pkg/resolver"
+ "github.com/ethersphere/bee/v2/pkg/sctx"
+ "github.com/ethersphere/bee/v2/pkg/settlement"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook"
+ "github.com/ethersphere/bee/v2/pkg/status"
+ "github.com/ethersphere/bee/v2/pkg/steward"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives/staking"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/topology/lightnode"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/go-playground/validator/v10"
+ "github.com/hashicorp/go-multierror"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "api"
+
+const (
+ SwarmPinHeader = "Swarm-Pin"
+ SwarmTagHeader = "Swarm-Tag"
+ SwarmEncryptHeader = "Swarm-Encrypt"
+ SwarmIndexDocumentHeader = "Swarm-Index-Document"
+ SwarmErrorDocumentHeader = "Swarm-Error-Document"
+ SwarmSocSignatureHeader = "Swarm-Soc-Signature"
+ SwarmFeedIndexHeader = "Swarm-Feed-Index"
+ SwarmFeedIndexNextHeader = "Swarm-Feed-Index-Next"
+ SwarmLegacyFeedResolve = "Swarm-Feed-Legacy-Resolve"
+ SwarmOnlyRootChunk = "Swarm-Only-Root-Chunk"
+ SwarmCollectionHeader = "Swarm-Collection"
+ SwarmPostageBatchIdHeader = "Swarm-Postage-Batch-Id"
+ SwarmPostageStampHeader = "Swarm-Postage-Stamp"
+ SwarmDeferredUploadHeader = "Swarm-Deferred-Upload"
+ SwarmRedundancyLevelHeader = "Swarm-Redundancy-Level"
+ SwarmRedundancyStrategyHeader = "Swarm-Redundancy-Strategy"
+ SwarmRedundancyFallbackModeHeader = "Swarm-Redundancy-Fallback-Mode"
+ SwarmChunkRetrievalTimeoutHeader = "Swarm-Chunk-Retrieval-Timeout"
+ SwarmLookAheadBufferSizeHeader = "Swarm-Lookahead-Buffer-Size"
+ SwarmActHeader = "Swarm-Act"
+ SwarmActTimestampHeader = "Swarm-Act-Timestamp"
+ SwarmActPublisherHeader = "Swarm-Act-Publisher"
+ SwarmActHistoryAddressHeader = "Swarm-Act-History-Address"
+
+ ImmutableHeader = "Immutable"
+ GasPriceHeader = "Gas-Price"
+ GasLimitHeader = "Gas-Limit"
+ ETagHeader = "ETag"
+
+ AuthorizationHeader = "Authorization"
+ AcceptEncodingHeader = "Accept-Encoding"
+ ContentTypeHeader = "Content-Type"
+ ContentDispositionHeader = "Content-Disposition"
+ ContentLengthHeader = "Content-Length"
+ RangeHeader = "Range"
+ OriginHeader = "Origin"
+ AccessControlExposeHeaders = "Access-Control-Expose-Headers"
+)
+
+const (
+ multiPartFormData = "multipart/form-data"
+ contentTypeTar = "application/x-tar"
+ boolHeaderSetValue = "true"
+)
+
+var (
+ errInvalidNameOrAddress = errors.New("invalid name or bzz address")
+ errNoResolver = errors.New("no resolver connected")
+ errInvalidRequest = errors.New("could not validate request")
+ errInvalidContentType = errors.New("invalid content-type")
+ errDirectoryStore = errors.New("could not store directory")
+ errFileStore = errors.New("could not store file")
+ errInvalidPostageBatch = errors.New("invalid postage batch id")
+ errBatchUnusable = errors.New("batch not usable")
+ errUnsupportedDevNodeOperation = errors.New("operation not supported in dev mode")
+ errOperationSupportedOnlyInFullMode = errors.New("operation is supported only in full mode")
+ errActDownload = errors.New("act download failed")
+ errActUpload = errors.New("act upload failed")
+ errActGranteeList = errors.New("failed to create or update grantee list")
+
+ batchIdOrStampSig = fmt.Sprintf("Either '%s' or '%s' header must be set in the request", SwarmPostageStampHeader, SwarmPostageBatchIdHeader)
+)
+
+// Storer interface provides the functionality required from the local storage
+// component of the node.
+type Storer interface {
+ storer.UploadStore
+ storer.PinStore
+ storer.CacheStore
+ storer.NetStore
+ storer.LocalStore
+ storer.RadiusChecker
+ storer.Debugger
+ storer.NeighborhoodStats
+}
+
+type PinIntegrity interface {
+ Check(ctx context.Context, logger log.Logger, pin string, out chan storer.PinStat)
+}
+
+func (s *Service) SetP2P(p2p p2p.DebugService) {
+ if s != nil {
+ s.p2p = p2p
+ }
+}
+
+func (s *Service) SetSwarmAddress(addr *swarm.Address) {
+ if s != nil {
+ s.overlay = addr
+ }
+}
+
+func (s *Service) SetRedistributionAgent(redistributionAgent *storageincentives.Agent) {
+ if s != nil {
+ s.redistributionAgent = redistributionAgent
+ }
+}
+
+type Options struct {
+ CORSAllowedOrigins []string
+ WsPingPeriod time.Duration
+}
+
+type ExtraOptions struct {
+ Pingpong pingpong.Interface
+ TopologyDriver topology.Driver
+ LightNodes *lightnode.Container
+ Accounting accounting.Interface
+ Pseudosettle settlement.Interface
+ Swap swap.Interface
+ Chequebook chequebook.Service
+ BlockTime time.Duration
+ Storer Storer
+ Resolver resolver.Interface
+ Pss pss.Interface
+ Gsoc gsoc.Listener
+ FeedFactory feeds.Factory
+ Post postage.Service
+ AccessControl accesscontrol.Controller
+ PostageContract postagecontract.Interface
+ Staking staking.Contract
+ Steward steward.Interface
+ SyncStatus func() (bool, error)
+ NodeStatus *status.Service
+ PinIntegrity PinIntegrity
+}
+
+func (s *Service) SetProbe(probe *Probe) {
+ s.probe = probe
+}
+
+func (s *Service) SetIsWarmingUp(v bool) {
+ s.isWarmingUp = v
+}
+
+// Close hangs up running websockets on shutdown.
+func (s *Service) Close() error {
+ s.logger.Info("api shutting down")
+ close(s.quit)
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ s.wsWg.Wait()
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(1 * time.Second):
+ return errors.New("api shutting down with open websockets")
+ }
+
+ return nil
+}
+
+// getOrCreateSessionID attempts to get the session if an tag id is supplied, and returns an error
+// if it does not exist. If no id is supplied, it will attempt to create a new session and return it.
+func (s *Service) getOrCreateSessionID(tagUid uint64) (uint64, error) {
+ var (
+ tag storer.SessionInfo
+ err error
+ )
+ // if tag ID is not supplied, create a new tag
+ if tagUid == 0 {
+ tag, err = s.storer.NewSession()
+ } else {
+ tag, err = s.storer.Session(tagUid)
+ }
+ return tag.TagID, err
+}
+
+func (s *Service) resolveNameOrAddress(str string) (swarm.Address, error) {
+ // Try and mapStructure the name as a bzz address.
+ addr, err := swarm.ParseHexAddress(str)
+ if err == nil {
+ s.loggerV1.Debug("resolve name: parsing bzz address successful", "string", str, "address", addr)
+ return addr, nil
+ }
+
+ // If no resolver is not available, return an error.
+ if s.resolver == nil {
+ return swarm.ZeroAddress, errNoResolver
+ }
+
+ // Try and resolve the name using the provided resolver.
+ s.logger.Debug("resolve name: attempting to resolve string to address", "string", str)
+ addr, err = s.resolver.Resolve(str)
+ if err == nil {
+ s.loggerV1.Debug("resolve name: address resolved successfully", "string", str, "address", addr)
+ return addr, nil
+ }
+
+ return swarm.ZeroAddress, fmt.Errorf("%w: %w", errInvalidNameOrAddress, err)
+}
+
+func (s *Service) newTracingHandler(spanName string) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx, err := s.tracer.WithContextFromHTTPHeaders(r.Context(), r.Header)
+ if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
+ s.logger.Debug("extract tracing context failed", "span_name", spanName, "error", err)
+ // ignore
+ }
+
+ span, _, ctx := s.tracer.StartSpanFromContext(ctx, spanName, s.logger)
+ defer span.Finish()
+
+ err = s.tracer.AddContextHTTPHeader(ctx, r.Header)
+ if err != nil {
+ s.logger.Debug("inject tracing context failed", "span_name", spanName, "error", err)
+ // ignore
+ }
+
+ h.ServeHTTP(w, r.WithContext(ctx))
+ })
+ }
+}
+
+// gasConfigMiddleware can be used by the APIs that allow block chain transactions to set
+// gas price and gas limit through the HTTP API headers.
+func (s *Service) gasConfigMiddleware(handlerName string) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ logger := s.logger.WithName(handlerName).Build()
+
+ headers := struct {
+ GasPrice *big.Int `map:"Gas-Price"`
+ GasLimit uint64 `map:"Gas-Limit"`
+ }{}
+ if response := s.mapStructure(r.Header, &headers); response != nil {
+ response("invalid header params", logger, w)
+ return
+ }
+ ctx := r.Context()
+ ctx = sctx.SetGasPrice(ctx, headers.GasPrice)
+ ctx = sctx.SetGasLimit(ctx, headers.GasLimit)
+
+ h.ServeHTTP(w, r.WithContext(ctx))
+ })
+ }
+}
+
+// corsHandler sets CORS headers to HTTP response if allowed origins are configured.
+func (s *Service) corsHandler(h http.Handler) http.Handler {
+ allowedHeaders := []string{
+ "User-Agent", "Accept", "X-Requested-With", "Access-Control-Request-Headers", "Access-Control-Request-Method", "Accept-Ranges", "Content-Encoding",
+ AuthorizationHeader, AcceptEncodingHeader, ContentTypeHeader, ContentDispositionHeader, RangeHeader, OriginHeader,
+ SwarmTagHeader, SwarmPinHeader, SwarmEncryptHeader, SwarmIndexDocumentHeader, SwarmErrorDocumentHeader, SwarmCollectionHeader,
+ SwarmPostageBatchIdHeader, SwarmPostageStampHeader, SwarmDeferredUploadHeader, SwarmRedundancyLevelHeader,
+ SwarmRedundancyStrategyHeader, SwarmRedundancyFallbackModeHeader, SwarmChunkRetrievalTimeoutHeader, SwarmLookAheadBufferSizeHeader,
+ SwarmFeedIndexHeader, SwarmFeedIndexNextHeader, SwarmSocSignatureHeader, SwarmOnlyRootChunk, GasPriceHeader, GasLimitHeader, ImmutableHeader,
+ SwarmActHeader, SwarmActTimestampHeader, SwarmActPublisherHeader, SwarmActHistoryAddressHeader,
+ }
+ allowedHeadersStr := strings.Join(allowedHeaders, ", ")
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if o := r.Header.Get(OriginHeader); o != "" && s.checkOrigin(r) {
+ w.Header().Set("Access-Control-Allow-Credentials", "true")
+ w.Header().Set("Access-Control-Allow-Origin", o)
+ w.Header().Set("Access-Control-Allow-Headers", allowedHeadersStr)
+ w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS, POST, PUT, DELETE")
+ w.Header().Set("Access-Control-Max-Age", "3600")
+ }
+ h.ServeHTTP(w, r)
+ })
+}
+
+// checkOrigin returns true if the origin is not set or is equal to the request host.
+func (s *Service) checkOrigin(r *http.Request) bool {
+ origin := r.Header[OriginHeader]
+ if len(origin) == 0 {
+ return true
+ }
+ scheme := "http"
+ if r.TLS != nil {
+ scheme = "https"
+ }
+ hosts := append(s.CORSAllowedOrigins, scheme+"://"+r.Host)
+ for _, v := range hosts {
+ if equalASCIIFold(origin[0], v) || v == "*" {
+ return true
+ }
+ }
+
+ return false
+}
+
+// validationError is a custom error type for validation errors.
+type validationError struct {
+ Entry string
+ Value interface{}
+ Cause error
+}
+
+// Error implements the error interface.
+func (e *validationError) Error() string {
+ return fmt.Sprintf("`%s=%v`: %v", e.Entry, e.Value, e.Cause)
+}
+
+// mapStructure maps the input into output struct and validates the output.
+// It's a helper method for the handlers, which reduces the chattiness
+// of the code.
+func (s *Service) mapStructure(input, output interface{}) func(string, log.Logger, http.ResponseWriter) {
+ // response unifies the response format for parsing and validation errors.
+ response := func(err error) func(string, log.Logger, http.ResponseWriter) {
+ return func(msg string, logger log.Logger, w http.ResponseWriter) {
+ var merr *multierror.Error
+ if !errors.As(err, &merr) {
+ logger.Debug("mapping and validation failed", "error", err)
+ logger.Error(err, "mapping and validation failed")
+ jsonhttp.InternalServerError(w, err)
+ return
+ }
+
+ logger.Debug(msg, "error", err)
+ logger.Error(err, msg)
+
+ resp := jsonhttp.StatusResponse{
+ Message: msg,
+ Code: http.StatusBadRequest,
+ }
+ for _, err := range merr.Errors {
+ var perr *parseError
+ if errors.As(err, &perr) {
+ resp.Reasons = append(resp.Reasons, jsonhttp.Reason{
+ Field: perr.Entry,
+ Error: perr.Cause.Error(),
+ })
+ }
+ var verr *validationError
+ if errors.As(err, &verr) {
+ resp.Reasons = append(resp.Reasons, jsonhttp.Reason{
+ Field: verr.Entry,
+ Error: verr.Cause.Error(),
+ })
+ }
+ }
+ jsonhttp.BadRequest(w, resp)
+ }
+ }
+
+ if err := mapStructure(input, output, s.preMapHooks); err != nil {
+ return response(err)
+ }
+
+ if err := s.validate.Struct(output); err != nil {
+ var errs validator.ValidationErrors
+ if !errors.As(err, &errs) {
+ return response(err)
+ }
+
+ vErrs := &multierror.Error{ErrorFormat: flattenErrorsFormat}
+ for _, err := range errs {
+ val := err.Value()
+ switch v := err.Value().(type) {
+ case []byte:
+ val = string(v)
+ }
+ vErrs = multierror.Append(vErrs,
+ &validationError{
+ Entry: strings.ToLower(err.Field()),
+ Value: val,
+ Cause: fmt.Errorf("want %s:%s", err.Tag(), err.Param()),
+ })
+ }
+ return response(vErrs.ErrorOrNil())
+ }
+
+ return nil
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding as
+// defined in RFC 4790.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
+type putterOptions struct {
+ BatchID []byte
+ TagID uint64
+ Deferred bool
+ Pin bool
+}
+
+type putterSessionWrapper struct {
+ storer.PutterSession
+ stamper postage.Stamper
+ save func() error
+}
+
+func (p *putterSessionWrapper) Put(ctx context.Context, chunk swarm.Chunk) error {
+ idAddress, err := storage.IdentityAddress(chunk)
+ if err != nil {
+ return err
+ }
+
+ stamp, err := p.stamper.Stamp(chunk.Address(), idAddress)
+ if err != nil {
+ return err
+ }
+ return p.PutterSession.Put(ctx, chunk.WithStamp(stamp))
+}
+
+func (p *putterSessionWrapper) Done(ref swarm.Address) error {
+ return errors.Join(p.PutterSession.Done(ref), p.save())
+}
+
+func (p *putterSessionWrapper) Cleanup() error {
+ return errors.Join(p.PutterSession.Cleanup(), p.save())
+}
+
+func (s *Service) getStamper(batchID []byte) (postage.Stamper, func() error, error) {
+ exists, err := s.batchStore.Exists(batchID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("batch exists: %w", err)
+ }
+
+ issuer, save, err := s.post.GetStampIssuer(batchID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("stamp issuer: %w", err)
+ }
+
+ if usable := exists && s.post.IssuerUsable(issuer); !usable {
+ return nil, nil, errBatchUnusable
+ }
+
+ return postage.NewStamper(s.stamperStore, issuer, s.signer), save, nil
+}
+
+func (s *Service) newStamperPutter(ctx context.Context, opts putterOptions) (storer.PutterSession, error) {
+ if !opts.Deferred && s.beeMode == DevMode {
+ return nil, errUnsupportedDevNodeOperation
+ }
+
+ stamper, save, err := s.getStamper(opts.BatchID)
+ if err != nil {
+ return nil, fmt.Errorf("get stamper: %w", err)
+ }
+
+ var session storer.PutterSession
+ if opts.Deferred || opts.Pin {
+ session, err = s.storer.Upload(ctx, opts.Pin, opts.TagID)
+ } else {
+ session = s.storer.DirectUpload()
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("failed creating session: %w", err)
+ }
+
+ return &putterSessionWrapper{
+ PutterSession: session,
+ stamper: stamper,
+ save: save,
+ }, nil
+}
+
+func (s *Service) newStampedPutter(ctx context.Context, opts putterOptions, stamp *postage.Stamp) (storer.PutterSession, error) {
+ if !opts.Deferred && s.beeMode == DevMode {
+ return nil, errUnsupportedDevNodeOperation
+ }
+
+ storedBatch, err := s.batchStore.Get(stamp.BatchID())
+ if err != nil {
+ return nil, errInvalidPostageBatch
+ }
+
+ var session storer.PutterSession
+ if opts.Deferred || opts.Pin {
+ session, err = s.storer.Upload(ctx, opts.Pin, opts.TagID)
+ if err != nil {
+ return nil, fmt.Errorf("failed creating session: %w", err)
+ }
+ } else {
+ session = s.storer.DirectUpload()
+ }
+
+ stamper := postage.NewPresignedStamper(stamp, storedBatch.Owner)
+
+ return &putterSessionWrapper{
+ PutterSession: session,
+ stamper: stamper,
+ save: func() error { return nil },
+ }, nil
+}
+
+type pipelineFunc func(context.Context, io.Reader) (swarm.Address, error)
+
+func requestPipelineFn(s storage.Putter, encrypt bool, rLevel redundancy.Level) pipelineFunc {
+ return func(ctx context.Context, r io.Reader) (swarm.Address, error) {
+ pipe := builder.NewPipelineBuilder(ctx, s, encrypt, rLevel)
+ return builder.FeedPipeline(ctx, pipe, r)
+ }
+}
+
+func requestPipelineFactory(ctx context.Context, s storage.Putter, encrypt bool, rLevel redundancy.Level) func() pipeline.Interface {
+ return func() pipeline.Interface {
+ return builder.NewPipelineBuilder(ctx, s, encrypt, rLevel)
+ }
+}
+
+type cleanupOnErrWriter struct {
+ http.ResponseWriter
+ logger log.Logger
+ onErr func() error
+}
+
+func (r *cleanupOnErrWriter) WriteHeader(statusCode int) {
+ // if there is an error status returned, cleanup.
+ if statusCode >= http.StatusBadRequest {
+ err := r.onErr()
+ if err != nil {
+ r.logger.Debug("failed cleaning up", "err", err)
+ }
+ }
+ r.ResponseWriter.WriteHeader(statusCode)
+}
+
+// CalculateNumberOfChunks calculates the number of chunks in an arbitrary
+// content length.
+func CalculateNumberOfChunks(contentLength int64, isEncrypted bool) int64 {
+ if contentLength <= swarm.ChunkSize {
+ return 1
+ }
+ branchingFactor := swarm.Branches
+ if isEncrypted {
+ branchingFactor = swarm.EncryptedBranches
+ }
+
+ dataChunks := math.Ceil(float64(contentLength) / float64(swarm.ChunkSize))
+ totalChunks := dataChunks
+ intermediate := dataChunks / float64(branchingFactor)
+
+ for intermediate > 1 {
+ totalChunks += math.Ceil(intermediate)
+ intermediate = intermediate / float64(branchingFactor)
+ }
+
+ return int64(totalChunks) + 1
+}
+
+// defaultUploadMethod returns true for deferred when the deferred header is not present.
+func defaultUploadMethod(deferred *bool) bool {
+ if deferred == nil {
+ return true
+ }
+
+ return *deferred
+}
diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go
index 986bc663d3e..43a745f4224 100644
--- a/pkg/api/api_test.go
+++ b/pkg/api/api_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/bytes.go b/pkg/api/bytes.go
index 4c1cd891df0..1429a501783 100644
--- a/pkg/api/bytes.go
+++ b/pkg/api/bytes.go
@@ -1,34 +1,24 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package api
import (
- "encoding/binary"
"errors"
"fmt"
"net/http"
- "strconv"
"time"
"github.com/ethersphere/bee/v2/pkg/accesscontrol"
- "github.com/ethersphere/bee/v2/pkg/cac"
"github.com/ethersphere/bee/v2/pkg/file/redundancy"
"github.com/ethersphere/bee/v2/pkg/jsonhttp"
"github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/swarm"
- "github.com/ethersphere/bee/v2/pkg/tracing"
- "github.com/gorilla/mux"
"github.com/opentracing/opentracing-go/ext"
olog "github.com/opentracing/opentracing-go/log"
)
-type bytesPostResponse struct {
- Reference swarm.Address `json:"reference"`
-}
-
// bytesUploadHandler handles upload of raw binary data of arbitrary length.
func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
span, logger, ctx := s.tracer.StartSpanFromContext(r.Context(), "post_bytes", s.logger.WithName("post_bytes").Build())
@@ -166,65 +156,3 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
Reference: encryptedReference,
})
}
-
-// bytesGetHandler handles retrieval of raw binary data of arbitrary length.
-func (s *Service) bytesGetHandler(w http.ResponseWriter, r *http.Request) {
- logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("get_bytes_by_address").Build())
-
- paths := struct {
- Address swarm.Address `map:"address,resolve" validate:"required"`
- }{}
- if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
- response("invalid path params", logger, w)
- return
- }
-
- address := paths.Address
- if v := getAddressFromContext(r.Context()); !v.IsZero() {
- address = v
- }
-
- additionalHeaders := http.Header{
- ContentTypeHeader: {"application/octet-stream"},
- }
-
- s.downloadHandler(logger, w, r, address, additionalHeaders, true, false, nil)
-}
-
-func (s *Service) bytesHeadHandler(w http.ResponseWriter, r *http.Request) {
- logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("head_bytes_by_address").Build())
-
- paths := struct {
- Address swarm.Address `map:"address,resolve" validate:"required"`
- }{}
- if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
- w.WriteHeader(http.StatusBadRequest)
- return
- }
-
- address := paths.Address
- if v := getAddressFromContext(r.Context()); !v.IsZero() {
- address = v
- }
-
- getter := s.storer.Download(true)
- ch, err := getter.Get(r.Context(), address)
- if err != nil {
- logger.Debug("get root chunk failed", "chunk_address", address, "error", err)
- logger.Error(nil, "get root chunk failed")
- w.WriteHeader(http.StatusNotFound)
- return
- }
-
- w.Header().Add(AccessControlExposeHeaders, "Accept-Ranges, Content-Encoding")
- w.Header().Add(ContentTypeHeader, "application/octet-stream")
- var span int64
-
- if cac.Valid(ch) {
- span = int64(binary.LittleEndian.Uint64(ch.Data()[:swarm.SpanSize]))
- } else {
- span = int64(len(ch.Data()))
- }
- w.Header().Set(ContentLengthHeader, strconv.FormatInt(span, 10))
- w.WriteHeader(http.StatusOK) // HEAD requests do not write a body
-}
diff --git a/pkg/api/bytes_js.go b/pkg/api/bytes_js.go
new file mode 100644
index 00000000000..5702c7ba5f8
--- /dev/null
+++ b/pkg/api/bytes_js.go
@@ -0,0 +1,155 @@
+//go:build js
+// +build js
+
+package api
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/ethersphere/bee/v2/pkg/accesscontrol"
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy"
+ "github.com/ethersphere/bee/v2/pkg/jsonhttp"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+)
+
+// bytesUploadHandler handles upload of raw binary data of arbitrary length.
+func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) {
+ span, logger, ctx := s.tracer.StartSpanFromContext(r.Context(), "post_bytes", s.logger.WithName("post_bytes").Build())
+ defer span.Finish()
+
+ headers := struct {
+ BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"`
+ SwarmTag uint64 `map:"Swarm-Tag"`
+ Pin bool `map:"Swarm-Pin"`
+ Deferred *bool `map:"Swarm-Deferred-Upload"`
+ Encrypt bool `map:"Swarm-Encrypt"`
+ RLevel redundancy.Level `map:"Swarm-Redundancy-Level"`
+ Act bool `map:"Swarm-Act"`
+ HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"`
+ }{}
+ if response := s.mapStructure(r.Header, &headers); response != nil {
+ response("invalid header params", logger, w)
+ return
+ }
+
+ var (
+ tag uint64
+ err error
+ deferred = defaultUploadMethod(headers.Deferred)
+ )
+
+ if deferred || headers.Pin {
+ tag, err = s.getOrCreateSessionID(headers.SwarmTag)
+ if err != nil {
+ logger.Debug("get or create tag failed", "error", err)
+ logger.Error(nil, "get or create tag failed")
+ switch {
+ case errors.Is(err, storage.ErrNotFound):
+ jsonhttp.NotFound(w, "tag not found")
+ default:
+ jsonhttp.InternalServerError(w, "cannot get or create tag")
+ }
+ ext.LogError(span, err, olog.String("action", "tag.create"))
+ return
+ }
+ span.SetTag("tagID", tag)
+ }
+
+ putter, err := s.newStamperPutter(ctx, putterOptions{
+ BatchID: headers.BatchID,
+ TagID: tag,
+ Pin: headers.Pin,
+ Deferred: deferred,
+ })
+ if err != nil {
+ logger.Debug("get putter failed", "error", err)
+ logger.Error(nil, "get putter failed")
+ switch {
+ case errors.Is(err, errBatchUnusable) || errors.Is(err, postage.ErrNotUsable):
+ jsonhttp.UnprocessableEntity(w, "batch not usable yet or does not exist")
+ case errors.Is(err, postage.ErrNotFound):
+ jsonhttp.NotFound(w, "batch with id not found")
+ case errors.Is(err, errInvalidPostageBatch):
+ jsonhttp.BadRequest(w, "invalid batch id")
+ case errors.Is(err, errUnsupportedDevNodeOperation):
+ jsonhttp.BadRequest(w, errUnsupportedDevNodeOperation)
+ default:
+ jsonhttp.BadRequest(w, nil)
+ }
+ ext.LogError(span, err, olog.String("action", "new.StamperPutter"))
+ return
+ }
+
+ ow := &cleanupOnErrWriter{
+ ResponseWriter: w,
+ onErr: putter.Cleanup,
+ logger: logger,
+ }
+
+ p := requestPipelineFn(putter, headers.Encrypt, headers.RLevel)
+ reference, err := p(ctx, r.Body)
+ if err != nil {
+ logger.Debug("split write all failed", "error", err)
+ logger.Error(nil, "split write all failed")
+ switch {
+ case errors.Is(err, postage.ErrBucketFull):
+ jsonhttp.PaymentRequired(ow, "batch is overissued")
+ default:
+ jsonhttp.InternalServerError(ow, "split write all failed")
+ }
+ ext.LogError(span, err, olog.String("action", "split.WriteAll"))
+ return
+ }
+
+ encryptedReference := reference
+ historyReference := swarm.ZeroAddress
+ if headers.Act {
+ encryptedReference, historyReference, err = s.actEncryptionHandler(r.Context(), putter, reference, headers.HistoryAddress)
+ if err != nil {
+ logger.Debug("access control upload failed", "error", err)
+ logger.Error(nil, "access control upload failed")
+ switch {
+ case errors.Is(err, accesscontrol.ErrNotFound):
+ jsonhttp.NotFound(w, "act or history entry not found")
+ case errors.Is(err, accesscontrol.ErrInvalidPublicKey) || errors.Is(err, accesscontrol.ErrSecretKeyInfinity):
+ jsonhttp.BadRequest(w, "invalid public key")
+ case errors.Is(err, accesscontrol.ErrUnexpectedType):
+ jsonhttp.BadRequest(w, "failed to create history")
+ default:
+ jsonhttp.InternalServerError(w, errActUpload)
+ }
+ return
+ }
+ }
+ span.SetTag("root_address", encryptedReference)
+
+ err = putter.Done(reference)
+ if err != nil {
+ logger.Debug("done split failed", "error", err)
+ logger.Error(nil, "done split failed")
+ jsonhttp.InternalServerError(ow, "done split failed")
+ ext.LogError(span, err, olog.String("action", "putter.Done"))
+ return
+ }
+
+ if tag != 0 {
+ w.Header().Set(SwarmTagHeader, fmt.Sprint(tag))
+ }
+
+ span.LogFields(olog.Bool("success", true))
+
+ w.Header().Set(AccessControlExposeHeaders, SwarmTagHeader)
+ if headers.Act {
+ w.Header().Set(SwarmActHistoryAddressHeader, historyReference.String())
+ w.Header().Add(AccessControlExposeHeaders, SwarmActHistoryAddressHeader)
+ }
+ jsonhttp.Created(w, bytesPostResponse{
+ Reference: encryptedReference,
+ })
+}
diff --git a/pkg/api/bytes_shared.go b/pkg/api/bytes_shared.go
new file mode 100644
index 00000000000..fb4b932803c
--- /dev/null
+++ b/pkg/api/bytes_shared.go
@@ -0,0 +1,82 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package api
+
+import (
+ "encoding/binary"
+ "net/http"
+ "strconv"
+
+ "github.com/ethersphere/bee/v2/pkg/cac"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/gorilla/mux"
+)
+
+type bytesPostResponse struct {
+ Reference swarm.Address `json:"reference"`
+}
+
+// bytesGetHandler handles retrieval of raw binary data of arbitrary length.
+func (s *Service) bytesGetHandler(w http.ResponseWriter, r *http.Request) {
+ logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("get_bytes_by_address").Build())
+
+ paths := struct {
+ Address swarm.Address `map:"address,resolve" validate:"required"`
+ }{}
+ if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
+ response("invalid path params", logger, w)
+ return
+ }
+
+ address := paths.Address
+ if v := getAddressFromContext(r.Context()); !v.IsZero() {
+ address = v
+ }
+
+ additionalHeaders := http.Header{
+ ContentTypeHeader: {"application/octet-stream"},
+ }
+
+ s.downloadHandler(logger, w, r, address, additionalHeaders, true, false, nil)
+}
+
+func (s *Service) bytesHeadHandler(w http.ResponseWriter, r *http.Request) {
+ logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("head_bytes_by_address").Build())
+
+ paths := struct {
+ Address swarm.Address `map:"address,resolve" validate:"required"`
+ }{}
+ if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ address := paths.Address
+ if v := getAddressFromContext(r.Context()); !v.IsZero() {
+ address = v
+ }
+
+ getter := s.storer.Download(true)
+ ch, err := getter.Get(r.Context(), address)
+ if err != nil {
+ logger.Debug("get root chunk failed", "chunk_address", address, "error", err)
+ logger.Error(nil, "get root chunk failed")
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+
+ w.Header().Add(AccessControlExposeHeaders, "Accept-Ranges, Content-Encoding")
+ w.Header().Add(ContentTypeHeader, "application/octet-stream")
+ var span int64
+
+ if cac.Valid(ch) {
+ span = int64(binary.LittleEndian.Uint64(ch.Data()[:swarm.SpanSize]))
+ } else {
+ span = int64(len(ch.Data()))
+ }
+ w.Header().Set(ContentLengthHeader, strconv.FormatInt(span, 10))
+ w.WriteHeader(http.StatusOK) // HEAD requests do not write a body
+}
diff --git a/pkg/api/bzz.go b/pkg/api/bzz.go
index c05a73d177c..bc436755672 100644
--- a/pkg/api/bzz.go
+++ b/pkg/api/bzz.go
@@ -1,65 +1,22 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package api
import (
- "context"
- "encoding/hex"
"errors"
- "fmt"
"net/http"
- "path"
- "path/filepath"
- "strconv"
- "strings"
"time"
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
- olog "github.com/opentracing/opentracing-go/log"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethersphere/bee/v2/pkg/accesscontrol"
- "github.com/ethersphere/bee/v2/pkg/feeds"
- "github.com/ethersphere/bee/v2/pkg/file"
- "github.com/ethersphere/bee/v2/pkg/file/joiner"
- "github.com/ethersphere/bee/v2/pkg/file/loadsave"
"github.com/ethersphere/bee/v2/pkg/file/redundancy"
- "github.com/ethersphere/bee/v2/pkg/file/redundancy/getter"
"github.com/ethersphere/bee/v2/pkg/jsonhttp"
- "github.com/ethersphere/bee/v2/pkg/log"
- "github.com/ethersphere/bee/v2/pkg/manifest"
"github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/storage"
- "github.com/ethersphere/bee/v2/pkg/storer"
"github.com/ethersphere/bee/v2/pkg/swarm"
- "github.com/ethersphere/bee/v2/pkg/topology"
- "github.com/ethersphere/bee/v2/pkg/tracing"
- "github.com/ethersphere/langos"
- "github.com/gorilla/mux"
-)
-
-// The size of buffer used for prefetching content with Langos when not using erasure coding
-// Warning: This value influences the number of chunk requests and chunker join goroutines
-// per file request.
-// Recommended value is 8 or 16 times the io.Copy default buffer value which is 32kB, depending
-// on the file size. Use lookaheadBufferSize() to get the correct buffer size for the request.
-const (
- smallFileBufferSize = 8 * 32 * 1024
- largeFileBufferSize = 16 * 32 * 1024
-
- largeBufferFilesizeThreshold = 10 * 1000000 // ten megs
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
)
-func lookaheadBufferSize(size int64) int {
- if size <= largeBufferFilesizeThreshold {
- return smallFileBufferSize
- }
- return largeFileBufferSize
-}
-
func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
span, logger, ctx := s.tracer.StartSpanFromContext(r.Context(), "post_bzz", s.logger.WithName("post_bzz").Build())
defer span.Finish()
@@ -143,567 +100,3 @@ func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
}
s.fileUploadHandler(ctx, logger, span, ow, r, putter, headers.Encrypt, tag, headers.RLevel, headers.Act, headers.HistoryAddress)
}
-
-// bzzUploadResponse is returned when an HTTP request to upload a file is successful
-type bzzUploadResponse struct {
- Reference swarm.Address `json:"reference"`
-}
-
-// fileUploadHandler uploads the file and its metadata supplied in the file body and
-// the headers
-func (s *Service) fileUploadHandler(
- ctx context.Context,
- logger log.Logger,
- span opentracing.Span,
- w http.ResponseWriter,
- r *http.Request,
- putter storer.PutterSession,
- encrypt bool,
- tagID uint64,
- rLevel redundancy.Level,
- act bool,
- historyAddress swarm.Address,
-) {
- queries := struct {
- FileName string `map:"name" validate:"startsnotwith=/"`
- }{}
- if response := s.mapStructure(r.URL.Query(), &queries); response != nil {
- response("invalid query params", logger, w)
- return
- }
-
- p := requestPipelineFn(putter, encrypt, rLevel)
-
- // first store the file and get its reference
- fr, err := p(ctx, r.Body)
- if err != nil {
- logger.Debug("file store failed", "file_name", queries.FileName, "error", err)
- logger.Error(nil, "file store failed", "file_name", queries.FileName)
- switch {
- case errors.Is(err, postage.ErrBucketFull):
- jsonhttp.PaymentRequired(w, "batch is overissued")
- default:
- jsonhttp.InternalServerError(w, errFileStore)
- }
- ext.LogError(span, err, olog.String("action", "file.store"))
- return
- }
-
- // If filename is still empty, use the file hash as the filename
- if queries.FileName == "" {
- queries.FileName = fr.String()
- if err := s.validate.Struct(queries); err != nil {
- verr := &validationError{
- Entry: "file hash",
- Value: queries.FileName,
- Cause: err,
- }
- logger.Debug("invalid body filename", "error", verr)
- logger.Error(nil, "invalid body filename")
- jsonhttp.BadRequest(w, jsonhttp.StatusResponse{
- Message: "invalid body params",
- Code: http.StatusBadRequest,
- Reasons: []jsonhttp.Reason{{
- Field: "file hash",
- Error: verr.Error(),
- }},
- })
- return
- }
- }
-
- factory := requestPipelineFactory(ctx, putter, encrypt, rLevel)
- l := loadsave.New(s.storer.ChunkStore(), s.storer.Cache(), factory, rLevel)
-
- m, err := manifest.NewDefaultManifest(l, encrypt)
- if err != nil {
- logger.Debug("create manifest failed", "file_name", queries.FileName, "error", err)
- logger.Error(nil, "create manifest failed", "file_name", queries.FileName)
- switch {
- case errors.Is(err, manifest.ErrInvalidManifestType):
- jsonhttp.BadRequest(w, "create manifest failed")
- default:
- jsonhttp.InternalServerError(w, nil)
- }
- return
- }
-
- rootMetadata := map[string]string{
- manifest.WebsiteIndexDocumentSuffixKey: queries.FileName,
- }
- err = m.Add(ctx, manifest.RootPath, manifest.NewEntry(swarm.ZeroAddress, rootMetadata))
- if err != nil {
- logger.Debug("adding metadata to manifest failed", "file_name", queries.FileName, "error", err)
- logger.Error(nil, "adding metadata to manifest failed", "file_name", queries.FileName)
- jsonhttp.InternalServerError(w, "add metadata failed")
- return
- }
-
- fileMtdt := map[string]string{
- manifest.EntryMetadataContentTypeKey: r.Header.Get(ContentTypeHeader), // Content-Type has already been validated.
- manifest.EntryMetadataFilenameKey: queries.FileName,
- }
-
- err = m.Add(ctx, queries.FileName, manifest.NewEntry(fr, fileMtdt))
- if err != nil {
- logger.Debug("adding file to manifest failed", "file_name", queries.FileName, "error", err)
- logger.Error(nil, "adding file to manifest failed", "file_name", queries.FileName)
- jsonhttp.InternalServerError(w, "add file failed")
- return
- }
-
- logger.Debug("info", "encrypt", encrypt, "file_name", queries.FileName, "hash", fr, "metadata", fileMtdt)
-
- manifestReference, err := m.Store(ctx)
- if err != nil {
- logger.Debug("manifest store failed", "file_name", queries.FileName, "error", err)
- logger.Error(nil, "manifest store failed", "file_name", queries.FileName)
- switch {
- case errors.Is(err, postage.ErrBucketFull):
- jsonhttp.PaymentRequired(w, "batch is overissued")
- default:
- jsonhttp.InternalServerError(w, "manifest store failed")
- }
- return
- }
- logger.Debug("store", "manifest_reference", manifestReference)
-
- reference := manifestReference
- historyReference := swarm.ZeroAddress
- if act {
- reference, historyReference, err = s.actEncryptionHandler(r.Context(), putter, reference, historyAddress)
- if err != nil {
- logger.Debug("access control upload failed", "error", err)
- logger.Error(nil, "access control upload failed")
- switch {
- case errors.Is(err, accesscontrol.ErrNotFound):
- jsonhttp.NotFound(w, "act or history entry not found")
- case errors.Is(err, accesscontrol.ErrInvalidPublicKey) || errors.Is(err, accesscontrol.ErrSecretKeyInfinity):
- jsonhttp.BadRequest(w, "invalid public key")
- case errors.Is(err, accesscontrol.ErrUnexpectedType):
- jsonhttp.BadRequest(w, "failed to create history")
- default:
- jsonhttp.InternalServerError(w, errActUpload)
- }
- return
- }
- }
-
- err = putter.Done(manifestReference)
- if err != nil {
- logger.Debug("done split failed", "reference", manifestReference, "error", err)
- logger.Error(nil, "done split failed")
- jsonhttp.InternalServerError(w, "done split failed")
- ext.LogError(span, err, olog.String("action", "putter.Done"))
- return
- }
- span.LogFields(olog.Bool("success", true))
- span.SetTag("root_address", reference)
-
- if tagID != 0 {
- w.Header().Set(SwarmTagHeader, fmt.Sprint(tagID))
- span.SetTag("tagID", tagID)
- }
- w.Header().Set(ETagHeader, fmt.Sprintf("%q", reference.String()))
- w.Header().Set(AccessControlExposeHeaders, SwarmTagHeader)
- if act {
- w.Header().Set(SwarmActHistoryAddressHeader, historyReference.String())
- w.Header().Add(AccessControlExposeHeaders, SwarmActHistoryAddressHeader)
- }
-
- jsonhttp.Created(w, bzzUploadResponse{
- Reference: reference,
- })
-}
-
-func (s *Service) bzzDownloadHandler(w http.ResponseWriter, r *http.Request) {
- logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("get_bzz_by_path").Build())
-
- paths := struct {
- Address swarm.Address `map:"address,resolve" validate:"required"`
- Path string `map:"path"`
- }{}
- if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
- response("invalid path params", logger, w)
- return
- }
-
- address := paths.Address
- if v := getAddressFromContext(r.Context()); !v.IsZero() {
- address = v
- }
-
- if strings.HasSuffix(paths.Path, "/") {
- paths.Path = strings.TrimRight(paths.Path, "/") + "/" // NOTE: leave one slash if there was some.
- }
-
- s.serveReference(logger, address, paths.Path, w, r, false)
-}
-
-func (s *Service) bzzHeadHandler(w http.ResponseWriter, r *http.Request) {
- logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("head_bzz_by_path").Build())
-
- paths := struct {
- Address swarm.Address `map:"address,resolve" validate:"required"`
- Path string `map:"path"`
- }{}
- if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
- response("invalid path params", logger, w)
- return
- }
-
- address := paths.Address
- if v := getAddressFromContext(r.Context()); !v.IsZero() {
- address = v
- }
-
- if strings.HasSuffix(paths.Path, "/") {
- paths.Path = strings.TrimRight(paths.Path, "/") + "/" // NOTE: leave one slash if there was some.
- }
-
- s.serveReference(logger, address, paths.Path, w, r, true)
-}
-
-func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathVar string, w http.ResponseWriter, r *http.Request, headerOnly bool) {
- loggerV1 := logger.V(1).Build()
-
- headers := struct {
- Cache *bool `map:"Swarm-Cache"`
- Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"`
- FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"`
- RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"`
- ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"`
- }{}
-
- if response := s.mapStructure(r.Header, &headers); response != nil {
- response("invalid header params", logger, w)
- return
- }
- cache := true
- if headers.Cache != nil {
- cache = *headers.Cache
- }
-
- rLevel := redundancy.DefaultLevel
- if headers.RLevel != nil {
- rLevel = *headers.RLevel
- }
-
- ctx := r.Context()
- ls := loadsave.NewReadonly(s.storer.Download(cache), s.storer.Cache(), redundancy.DefaultLevel)
- feedDereferenced := false
-
- ctx, err := getter.SetConfigInContext(ctx, headers.Strategy, headers.FallbackMode, headers.ChunkRetrievalTimeout, logger)
- if err != nil {
- logger.Error(err, err.Error())
- jsonhttp.BadRequest(w, "could not parse headers")
- return
- }
-
-FETCH:
- // read manifest entry
- m, err := manifest.NewDefaultManifestReference(
- address,
- ls,
- )
- if err != nil {
- logger.Debug("bzz download: not manifest", "address", address, "error", err)
- logger.Error(nil, "not manifest")
- jsonhttp.NotFound(w, nil)
- return
- }
-
- // there's a possible ambiguity here, right now the data which was
- // read can be an entry.Entry or a mantaray feed manifest. Try to
- // unmarshal as mantaray first and possibly resolve the feed, otherwise
- // go on normally.
- if !feedDereferenced {
- if l, err := s.manifestFeed(ctx, m); err == nil {
- // we have a feed manifest here
- ch, cur, _, err := l.At(ctx, time.Now().Unix(), 0)
- if err != nil {
- logger.Debug("bzz download: feed lookup failed", "error", err)
- logger.Error(nil, "bzz download: feed lookup failed")
- jsonhttp.NotFound(w, "feed not found")
- return
- }
- if ch == nil {
- logger.Debug("bzz download: feed lookup: no updates")
- logger.Error(nil, "bzz download: feed lookup")
- jsonhttp.NotFound(w, "no update found")
- return
- }
- wc, err := feeds.GetWrappedChunk(ctx, s.storer.Download(cache), ch, false)
- if err != nil {
- logger.Debug("bzz download: mapStructure feed update failed", "error", err)
- logger.Error(nil, "bzz download: mapStructure feed update failed")
- jsonhttp.InternalServerError(w, "mapStructure feed update")
- return
- }
- address = wc.Address()
- // modify ls and init with non-existing wrapped chunk
- ls = loadsave.NewReadonlyWithRootCh(s.storer.Download(cache), s.storer.Cache(), wc, rLevel)
-
- feedDereferenced = true
- curBytes, err := cur.MarshalBinary()
- if err != nil {
- s.logger.Debug("bzz download: marshal feed index failed", "error", err)
- s.logger.Error(nil, "bzz download: marshal index failed")
- jsonhttp.InternalServerError(w, "marshal index")
- return
- }
-
- w.Header().Set(SwarmFeedIndexHeader, hex.EncodeToString(curBytes))
- // this header might be overriding others. handle with care. in the future
- // we should implement an append functionality for this specific header,
- // since different parts of handlers might be overriding others' values
- // resulting in inconsistent headers in the response.
- w.Header().Set(AccessControlExposeHeaders, SwarmFeedIndexHeader)
- goto FETCH
- }
- }
-
- if pathVar == "" {
- loggerV1.Debug("bzz download: handle empty path", "address", address)
-
- if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteIndexDocumentSuffixKey); ok {
- pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey)
- indexDocumentManifestEntry, err := m.Lookup(ctx, pathWithIndex)
- if err == nil {
- // index document exists
- logger.Debug("bzz download: serving path", "path", pathWithIndex)
-
- s.serveManifestEntry(logger, w, r, indexDocumentManifestEntry, !feedDereferenced, headerOnly)
- return
- }
- }
- logger.Debug("bzz download: address not found or incorrect", "address", address, "path", pathVar)
- logger.Error(nil, "address not found or incorrect")
- jsonhttp.NotFound(w, "address not found or incorrect")
- return
- }
- me, err := m.Lookup(ctx, pathVar)
- if err != nil {
- loggerV1.Debug("bzz download: invalid path", "address", address, "path", pathVar, "error", err)
- logger.Error(nil, "bzz download: invalid path")
-
- if errors.Is(err, manifest.ErrNotFound) {
-
- if !strings.HasPrefix(pathVar, "/") {
- // check for directory
- dirPath := pathVar + "/"
- exists, err := m.HasPrefix(ctx, dirPath)
- if err == nil && exists {
- // redirect to directory
- u := r.URL
- u.Path += "/"
- redirectURL := u.String()
-
- logger.Debug("bzz download: redirecting failed", "url", redirectURL, "error", err)
-
- http.Redirect(w, r, redirectURL, http.StatusPermanentRedirect)
- return
- }
- }
-
- // check index suffix path
- if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteIndexDocumentSuffixKey); ok {
- if !strings.HasSuffix(pathVar, indexDocumentSuffixKey) {
- // check if path is directory with index
- pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey)
- indexDocumentManifestEntry, err := m.Lookup(ctx, pathWithIndex)
- if err == nil {
- // index document exists
- logger.Debug("bzz download: serving path", "path", pathWithIndex)
-
- s.serveManifestEntry(logger, w, r, indexDocumentManifestEntry, !feedDereferenced, headerOnly)
- return
- }
- }
- }
-
- // check if error document is to be shown
- if errorDocumentPath, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteErrorDocumentPathKey); ok {
- if pathVar != errorDocumentPath {
- errorDocumentManifestEntry, err := m.Lookup(ctx, errorDocumentPath)
- if err == nil {
- // error document exists
- logger.Debug("bzz download: serving path", "path", errorDocumentPath)
-
- s.serveManifestEntry(logger, w, r, errorDocumentManifestEntry, !feedDereferenced, headerOnly)
- return
- }
- }
- }
-
- jsonhttp.NotFound(w, "path address not found")
- } else {
- jsonhttp.NotFound(w, nil)
- }
- return
- }
-
- // serve requested path
- s.serveManifestEntry(logger, w, r, me, !feedDereferenced, headerOnly)
-}
-
-func (s *Service) serveManifestEntry(
- logger log.Logger,
- w http.ResponseWriter,
- r *http.Request,
- manifestEntry manifest.Entry,
- etag, headersOnly bool,
-) {
- additionalHeaders := http.Header{}
- mtdt := manifestEntry.Metadata()
- if fname, ok := mtdt[manifest.EntryMetadataFilenameKey]; ok {
- fname = filepath.Base(fname) // only keep the file name
- additionalHeaders[ContentDispositionHeader] = []string{fmt.Sprintf("inline; filename=\"%s\"", escapeQuotes(fname))}
- }
- if mimeType, ok := mtdt[manifest.EntryMetadataContentTypeKey]; ok {
- additionalHeaders[ContentTypeHeader] = []string{mimeType}
- }
-
- s.downloadHandler(logger, w, r, manifestEntry.Reference(), additionalHeaders, etag, headersOnly, nil)
-}
-
-// downloadHandler contains common logic for downloading Swarm file from API
-func (s *Service) downloadHandler(logger log.Logger, w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, etag, headersOnly bool, rootCh swarm.Chunk) {
- headers := struct {
- Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"`
- RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"`
- FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"`
- ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"`
- LookaheadBufferSize *int `map:"Swarm-Lookahead-Buffer-Size"`
- Cache *bool `map:"Swarm-Cache"`
- }{}
-
- if response := s.mapStructure(r.Header, &headers); response != nil {
- response("invalid header params", logger, w)
- return
- }
- cache := true
- if headers.Cache != nil {
- cache = *headers.Cache
- }
-
- ctx := r.Context()
- ctx, err := getter.SetConfigInContext(ctx, headers.Strategy, headers.FallbackMode, headers.ChunkRetrievalTimeout, logger)
- if err != nil {
- logger.Error(err, err.Error())
- jsonhttp.BadRequest(w, "could not parse headers")
- return
- }
- rLevel := redundancy.DefaultLevel
- if headers.RLevel != nil {
- rLevel = *headers.RLevel
- }
-
- var (
- reader file.Joiner
- l int64
- )
- if rootCh != nil {
- reader, l, err = joiner.NewJoiner(ctx, s.storer.Download(cache), s.storer.Cache(), reference, rootCh)
- } else {
- reader, l, err = joiner.New(ctx, s.storer.Download(cache), s.storer.Cache(), reference, rLevel)
- }
- if err != nil {
- if errors.Is(err, storage.ErrNotFound) || errors.Is(err, topology.ErrNotFound) {
- logger.Debug("api download: not found ", "address", reference, "error", err)
- logger.Error(nil, err.Error())
- jsonhttp.NotFound(w, nil)
- return
- }
- logger.Debug("api download: unexpected error", "address", reference, "error", err)
- logger.Error(nil, "api download: unexpected error")
- jsonhttp.InternalServerError(w, "joiner failed")
- return
- }
-
- // include additional headers
- for name, values := range additionalHeaders {
- for _, value := range values {
- w.Header().Add(name, value)
- }
- }
- if etag {
- w.Header().Set(ETagHeader, fmt.Sprintf("%q", reference))
- }
- w.Header().Set(ContentLengthHeader, strconv.FormatInt(l, 10))
- w.Header().Add(AccessControlExposeHeaders, ContentDispositionHeader)
-
- if headersOnly {
- w.WriteHeader(http.StatusOK)
- return
- }
-
- bufSize := lookaheadBufferSize(l)
- if headers.LookaheadBufferSize != nil {
- bufSize = *(headers.LookaheadBufferSize)
- }
- if bufSize > 0 {
- http.ServeContent(w, r, "", time.Now(), langos.NewBufferedLangos(reader, bufSize))
- return
- }
- http.ServeContent(w, r, "", time.Now(), reader)
-}
-
-// manifestMetadataLoad returns the value for a key stored in the metadata of
-// manifest path, or empty string if no value is present.
-// The ok result indicates whether value was found in the metadata.
-func manifestMetadataLoad(
- ctx context.Context,
- manifest manifest.Interface,
- path, metadataKey string,
-) (string, bool) {
- me, err := manifest.Lookup(ctx, path)
- if err != nil {
- return "", false
- }
-
- manifestRootMetadata := me.Metadata()
- if val, ok := manifestRootMetadata[metadataKey]; ok {
- return val, ok
- }
-
- return "", false
-}
-
-func (s *Service) manifestFeed(
- ctx context.Context,
- m manifest.Interface,
-) (feeds.Lookup, error) {
- e, err := m.Lookup(ctx, "/")
- if err != nil {
- return nil, fmt.Errorf("node lookup: %w", err)
- }
- var (
- owner, topic []byte
- t = new(feeds.Type)
- )
- meta := e.Metadata()
- if e := meta[feedMetadataEntryOwner]; e != "" {
- owner, err = hex.DecodeString(e)
- if err != nil {
- return nil, err
- }
- }
- if e := meta[feedMetadataEntryTopic]; e != "" {
- topic, err = hex.DecodeString(e)
- if err != nil {
- return nil, err
- }
- }
- if e := meta[feedMetadataEntryType]; e != "" {
- err := t.FromString(e)
- if err != nil {
- return nil, err
- }
- }
- if len(owner) == 0 || len(topic) == 0 {
- return nil, fmt.Errorf("node lookup: %s", "feed metadata absent")
- }
- f := feeds.New(topic, common.BytesToAddress(owner))
- return s.feedFactory.NewLookup(*t, f)
-}
diff --git a/pkg/api/bzz_js.go b/pkg/api/bzz_js.go
new file mode 100644
index 00000000000..d8af4d278cc
--- /dev/null
+++ b/pkg/api/bzz_js.go
@@ -0,0 +1,100 @@
+// go:build js
+//go:build js
+// +build js
+
+package api
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy"
+ "github.com/ethersphere/bee/v2/pkg/jsonhttp"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+)
+
+func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) {
+ span, logger, ctx := s.tracer.StartSpanFromContext(r.Context(), "post_bzz", s.logger.WithName("post_bzz").Build())
+ defer span.Finish()
+
+ headers := struct {
+ ContentType string `map:"Content-Type,mimeMediaType" validate:"required"`
+ BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"`
+ SwarmTag uint64 `map:"Swarm-Tag"`
+ Pin bool `map:"Swarm-Pin"`
+ Deferred *bool `map:"Swarm-Deferred-Upload"`
+ Encrypt bool `map:"Swarm-Encrypt"`
+ IsDir bool `map:"Swarm-Collection"`
+ RLevel redundancy.Level `map:"Swarm-Redundancy-Level"`
+ Act bool `map:"Swarm-Act"`
+ HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"`
+ }{}
+ if response := s.mapStructure(r.Header, &headers); response != nil {
+ response("invalid header params", logger, w)
+ return
+ }
+
+ var (
+ tag uint64
+ err error
+ deferred = defaultUploadMethod(headers.Deferred)
+ )
+
+ if deferred || headers.Pin {
+ tag, err = s.getOrCreateSessionID(headers.SwarmTag)
+ if err != nil {
+ logger.Debug("get or create tag failed", "error", err)
+ logger.Error(nil, "get or create tag failed")
+ switch {
+ case errors.Is(err, storage.ErrNotFound):
+ jsonhttp.NotFound(w, "tag not found")
+ default:
+ jsonhttp.InternalServerError(w, "cannot get or create tag")
+ }
+ ext.LogError(span, err, olog.String("action", "tag.create"))
+ return
+ }
+ span.SetTag("tagID", tag)
+ }
+
+ putter, err := s.newStamperPutter(ctx, putterOptions{
+ BatchID: headers.BatchID,
+ TagID: tag,
+ Pin: headers.Pin,
+ Deferred: deferred,
+ })
+ if err != nil {
+ logger.Debug("putter failed", "error", err)
+ logger.Error(nil, "putter failed")
+ switch {
+ case errors.Is(err, errBatchUnusable) || errors.Is(err, postage.ErrNotUsable):
+ jsonhttp.UnprocessableEntity(w, "batch not usable yet or does not exist")
+ case errors.Is(err, postage.ErrNotFound):
+ jsonhttp.NotFound(w, "batch with id not found")
+ case errors.Is(err, errInvalidPostageBatch):
+ jsonhttp.BadRequest(w, "invalid batch id")
+ case errors.Is(err, errUnsupportedDevNodeOperation):
+ jsonhttp.BadRequest(w, errUnsupportedDevNodeOperation)
+ default:
+ jsonhttp.BadRequest(w, nil)
+ }
+ ext.LogError(span, err, olog.String("action", "new.StamperPutter"))
+ return
+ }
+
+ ow := &cleanupOnErrWriter{
+ ResponseWriter: w,
+ onErr: putter.Cleanup,
+ logger: logger,
+ }
+
+ if headers.IsDir || headers.ContentType == multiPartFormData {
+ s.dirUploadHandler(ctx, logger, span, ow, r, putter, r.Header.Get(ContentTypeHeader), headers.Encrypt, tag, headers.RLevel, headers.Act, headers.HistoryAddress)
+ return
+ }
+ s.fileUploadHandler(ctx, logger, span, ow, r, putter, headers.Encrypt, tag, headers.RLevel, headers.Act, headers.HistoryAddress)
+}
diff --git a/pkg/api/bzz_shared.go b/pkg/api/bzz_shared.go
new file mode 100644
index 00000000000..3559785743d
--- /dev/null
+++ b/pkg/api/bzz_shared.go
@@ -0,0 +1,625 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package api
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/http"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethersphere/bee/v2/pkg/accesscontrol"
+ "github.com/ethersphere/bee/v2/pkg/feeds"
+ "github.com/ethersphere/bee/v2/pkg/file"
+ "github.com/ethersphere/bee/v2/pkg/file/joiner"
+ "github.com/ethersphere/bee/v2/pkg/file/loadsave"
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy"
+ "github.com/ethersphere/bee/v2/pkg/file/redundancy/getter"
+ "github.com/ethersphere/bee/v2/pkg/jsonhttp"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/manifest"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/ethersphere/langos"
+ "github.com/gorilla/mux"
+)
+
+// The size of buffer used for prefetching content with Langos when not using erasure coding
+// Warning: This value influences the number of chunk requests and chunker join goroutines
+// per file request.
+// Recommended value is 8 or 16 times the io.Copy default buffer value which is 32kB, depending
+// on the file size. Use lookaheadBufferSize() to get the correct buffer size for the request.
+const (
+ smallFileBufferSize = 8 * 32 * 1024
+ largeFileBufferSize = 16 * 32 * 1024
+
+ largeBufferFilesizeThreshold = 10 * 1000000 // ten megs
+)
+
+func lookaheadBufferSize(size int64) int {
+ if size <= largeBufferFilesizeThreshold {
+ return smallFileBufferSize
+ }
+ return largeFileBufferSize
+}
+
+// bzzUploadResponse is returned when an HTTP request to upload a file is successful
+type bzzUploadResponse struct {
+ Reference swarm.Address `json:"reference"`
+}
+
+// fileUploadHandler uploads the file and its metadata supplied in the file body and
+// the headers
+func (s *Service) fileUploadHandler(
+ ctx context.Context,
+ logger log.Logger,
+ span opentracing.Span,
+ w http.ResponseWriter,
+ r *http.Request,
+ putter storer.PutterSession,
+ encrypt bool,
+ tagID uint64,
+ rLevel redundancy.Level,
+ act bool,
+ historyAddress swarm.Address,
+) {
+ queries := struct {
+ FileName string `map:"name" validate:"startsnotwith=/"`
+ }{}
+ if response := s.mapStructure(r.URL.Query(), &queries); response != nil {
+ response("invalid query params", logger, w)
+ return
+ }
+
+ p := requestPipelineFn(putter, encrypt, rLevel)
+
+ // first store the file and get its reference
+ fr, err := p(ctx, r.Body)
+ if err != nil {
+ logger.Debug("file store failed", "file_name", queries.FileName, "error", err)
+ logger.Error(nil, "file store failed", "file_name", queries.FileName)
+ switch {
+ case errors.Is(err, postage.ErrBucketFull):
+ jsonhttp.PaymentRequired(w, "batch is overissued")
+ default:
+ jsonhttp.InternalServerError(w, errFileStore)
+ }
+ ext.LogError(span, err, olog.String("action", "file.store"))
+ return
+ }
+
+ // If filename is still empty, use the file hash as the filename
+ if queries.FileName == "" {
+ queries.FileName = fr.String()
+ if err := s.validate.Struct(queries); err != nil {
+ verr := &validationError{
+ Entry: "file hash",
+ Value: queries.FileName,
+ Cause: err,
+ }
+ logger.Debug("invalid body filename", "error", verr)
+ logger.Error(nil, "invalid body filename")
+ jsonhttp.BadRequest(w, jsonhttp.StatusResponse{
+ Message: "invalid body params",
+ Code: http.StatusBadRequest,
+ Reasons: []jsonhttp.Reason{{
+ Field: "file hash",
+ Error: verr.Error(),
+ }},
+ })
+ return
+ }
+ }
+
+ factory := requestPipelineFactory(ctx, putter, encrypt, rLevel)
+ l := loadsave.New(s.storer.ChunkStore(), s.storer.Cache(), factory, rLevel)
+
+ m, err := manifest.NewDefaultManifest(l, encrypt)
+ if err != nil {
+ logger.Debug("create manifest failed", "file_name", queries.FileName, "error", err)
+ logger.Error(nil, "create manifest failed", "file_name", queries.FileName)
+ switch {
+ case errors.Is(err, manifest.ErrInvalidManifestType):
+ jsonhttp.BadRequest(w, "create manifest failed")
+ default:
+ jsonhttp.InternalServerError(w, nil)
+ }
+ return
+ }
+
+ rootMetadata := map[string]string{
+ manifest.WebsiteIndexDocumentSuffixKey: queries.FileName,
+ }
+ err = m.Add(ctx, manifest.RootPath, manifest.NewEntry(swarm.ZeroAddress, rootMetadata))
+ if err != nil {
+ logger.Debug("adding metadata to manifest failed", "file_name", queries.FileName, "error", err)
+ logger.Error(nil, "adding metadata to manifest failed", "file_name", queries.FileName)
+ jsonhttp.InternalServerError(w, "add metadata failed")
+ return
+ }
+
+ fileMtdt := map[string]string{
+ manifest.EntryMetadataContentTypeKey: r.Header.Get(ContentTypeHeader), // Content-Type has already been validated.
+ manifest.EntryMetadataFilenameKey: queries.FileName,
+ }
+
+ err = m.Add(ctx, queries.FileName, manifest.NewEntry(fr, fileMtdt))
+ if err != nil {
+ logger.Debug("adding file to manifest failed", "file_name", queries.FileName, "error", err)
+ logger.Error(nil, "adding file to manifest failed", "file_name", queries.FileName)
+ jsonhttp.InternalServerError(w, "add file failed")
+ return
+ }
+
+ logger.Debug("info", "encrypt", encrypt, "file_name", queries.FileName, "hash", fr, "metadata", fileMtdt)
+
+ manifestReference, err := m.Store(ctx)
+ if err != nil {
+ logger.Debug("manifest store failed", "file_name", queries.FileName, "error", err)
+ logger.Error(nil, "manifest store failed", "file_name", queries.FileName)
+ switch {
+ case errors.Is(err, postage.ErrBucketFull):
+ jsonhttp.PaymentRequired(w, "batch is overissued")
+ default:
+ jsonhttp.InternalServerError(w, "manifest store failed")
+ }
+ return
+ }
+ logger.Debug("store", "manifest_reference", manifestReference)
+
+ reference := manifestReference
+ historyReference := swarm.ZeroAddress
+ if act {
+ reference, historyReference, err = s.actEncryptionHandler(r.Context(), putter, reference, historyAddress)
+ if err != nil {
+ logger.Debug("access control upload failed", "error", err)
+ logger.Error(nil, "access control upload failed")
+ switch {
+ case errors.Is(err, accesscontrol.ErrNotFound):
+ jsonhttp.NotFound(w, "act or history entry not found")
+ case errors.Is(err, accesscontrol.ErrInvalidPublicKey) || errors.Is(err, accesscontrol.ErrSecretKeyInfinity):
+ jsonhttp.BadRequest(w, "invalid public key")
+ case errors.Is(err, accesscontrol.ErrUnexpectedType):
+ jsonhttp.BadRequest(w, "failed to create history")
+ default:
+ jsonhttp.InternalServerError(w, errActUpload)
+ }
+ return
+ }
+ }
+
+ err = putter.Done(manifestReference)
+ if err != nil {
+ logger.Debug("done split failed", "reference", manifestReference, "error", err)
+ logger.Error(nil, "done split failed")
+ jsonhttp.InternalServerError(w, "done split failed")
+ ext.LogError(span, err, olog.String("action", "putter.Done"))
+ return
+ }
+ span.LogFields(olog.Bool("success", true))
+ span.SetTag("root_address", reference)
+
+ if tagID != 0 {
+ w.Header().Set(SwarmTagHeader, fmt.Sprint(tagID))
+ span.SetTag("tagID", tagID)
+ }
+ w.Header().Set(ETagHeader, fmt.Sprintf("%q", reference.String()))
+ w.Header().Set(AccessControlExposeHeaders, SwarmTagHeader)
+ if act {
+ w.Header().Set(SwarmActHistoryAddressHeader, historyReference.String())
+ w.Header().Add(AccessControlExposeHeaders, SwarmActHistoryAddressHeader)
+ }
+
+ jsonhttp.Created(w, bzzUploadResponse{
+ Reference: reference,
+ })
+}
+
+func (s *Service) bzzDownloadHandler(w http.ResponseWriter, r *http.Request) {
+ logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("get_bzz_by_path").Build())
+
+ paths := struct {
+ Address swarm.Address `map:"address,resolve" validate:"required"`
+ Path string `map:"path"`
+ }{}
+ if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
+ response("invalid path params", logger, w)
+ return
+ }
+
+ address := paths.Address
+ if v := getAddressFromContext(r.Context()); !v.IsZero() {
+ address = v
+ }
+
+ if strings.HasSuffix(paths.Path, "/") {
+ paths.Path = strings.TrimRight(paths.Path, "/") + "/" // NOTE: leave one slash if there was some.
+ }
+
+ s.serveReference(logger, address, paths.Path, w, r, false)
+}
+
+func (s *Service) bzzHeadHandler(w http.ResponseWriter, r *http.Request) {
+ logger := tracing.NewLoggerWithTraceID(r.Context(), s.logger.WithName("head_bzz_by_path").Build())
+
+ paths := struct {
+ Address swarm.Address `map:"address,resolve" validate:"required"`
+ Path string `map:"path"`
+ }{}
+ if response := s.mapStructure(mux.Vars(r), &paths); response != nil {
+ response("invalid path params", logger, w)
+ return
+ }
+
+ address := paths.Address
+ if v := getAddressFromContext(r.Context()); !v.IsZero() {
+ address = v
+ }
+
+ if strings.HasSuffix(paths.Path, "/") {
+ paths.Path = strings.TrimRight(paths.Path, "/") + "/" // NOTE: leave one slash if there was some.
+ }
+
+ s.serveReference(logger, address, paths.Path, w, r, true)
+}
+
+func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathVar string, w http.ResponseWriter, r *http.Request, headerOnly bool) {
+ loggerV1 := logger.V(1).Build()
+
+ headers := struct {
+ Cache *bool `map:"Swarm-Cache"`
+ Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"`
+ FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"`
+ RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"`
+ ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"`
+ }{}
+
+ if response := s.mapStructure(r.Header, &headers); response != nil {
+ response("invalid header params", logger, w)
+ return
+ }
+ cache := true
+ if headers.Cache != nil {
+ cache = *headers.Cache
+ }
+
+ rLevel := redundancy.DefaultLevel
+ if headers.RLevel != nil {
+ rLevel = *headers.RLevel
+ }
+
+ ctx := r.Context()
+ ls := loadsave.NewReadonly(s.storer.Download(cache), s.storer.Cache(), redundancy.DefaultLevel)
+ feedDereferenced := false
+
+ ctx, err := getter.SetConfigInContext(ctx, headers.Strategy, headers.FallbackMode, headers.ChunkRetrievalTimeout, logger)
+ if err != nil {
+ logger.Error(err, err.Error())
+ jsonhttp.BadRequest(w, "could not parse headers")
+ return
+ }
+
+FETCH:
+ // read manifest entry
+ m, err := manifest.NewDefaultManifestReference(
+ address,
+ ls,
+ )
+ if err != nil {
+ logger.Debug("bzz download: not manifest", "address", address, "error", err)
+ logger.Error(nil, "not manifest")
+ jsonhttp.NotFound(w, nil)
+ return
+ }
+
+ // there's a possible ambiguity here, right now the data which was
+ // read can be an entry.Entry or a mantaray feed manifest. Try to
+ // unmarshal as mantaray first and possibly resolve the feed, otherwise
+ // go on normally.
+ if !feedDereferenced {
+ if l, err := s.manifestFeed(ctx, m); err == nil {
+ // we have a feed manifest here
+ ch, cur, _, err := l.At(ctx, time.Now().Unix(), 0)
+ if err != nil {
+ logger.Debug("bzz download: feed lookup failed", "error", err)
+ logger.Error(nil, "bzz download: feed lookup failed")
+ jsonhttp.NotFound(w, "feed not found")
+ return
+ }
+ if ch == nil {
+ logger.Debug("bzz download: feed lookup: no updates")
+ logger.Error(nil, "bzz download: feed lookup")
+ jsonhttp.NotFound(w, "no update found")
+ return
+ }
+ wc, err := feeds.GetWrappedChunk(ctx, s.storer.Download(cache), ch, false)
+ if err != nil {
+ logger.Debug("bzz download: mapStructure feed update failed", "error", err)
+ logger.Error(nil, "bzz download: mapStructure feed update failed")
+ jsonhttp.InternalServerError(w, "mapStructure feed update")
+ return
+ }
+ address = wc.Address()
+ // modify ls and init with non-existing wrapped chunk
+ ls = loadsave.NewReadonlyWithRootCh(s.storer.Download(cache), s.storer.Cache(), wc, rLevel)
+
+ feedDereferenced = true
+ curBytes, err := cur.MarshalBinary()
+ if err != nil {
+ s.logger.Debug("bzz download: marshal feed index failed", "error", err)
+ s.logger.Error(nil, "bzz download: marshal index failed")
+ jsonhttp.InternalServerError(w, "marshal index")
+ return
+ }
+
+ w.Header().Set(SwarmFeedIndexHeader, hex.EncodeToString(curBytes))
+ // this header might be overriding others. handle with care. in the future
+ // we should implement an append functionality for this specific header,
+ // since different parts of handlers might be overriding others' values
+ // resulting in inconsistent headers in the response.
+ w.Header().Set(AccessControlExposeHeaders, SwarmFeedIndexHeader)
+ goto FETCH
+ }
+ }
+
+ if pathVar == "" {
+ loggerV1.Debug("bzz download: handle empty path", "address", address)
+
+ if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteIndexDocumentSuffixKey); ok {
+ pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey)
+ indexDocumentManifestEntry, err := m.Lookup(ctx, pathWithIndex)
+ if err == nil {
+ // index document exists
+ logger.Debug("bzz download: serving path", "path", pathWithIndex)
+
+ s.serveManifestEntry(logger, w, r, indexDocumentManifestEntry, !feedDereferenced, headerOnly)
+ return
+ }
+ }
+ logger.Debug("bzz download: address not found or incorrect", "address", address, "path", pathVar)
+ logger.Error(nil, "address not found or incorrect")
+ jsonhttp.NotFound(w, "address not found or incorrect")
+ return
+ }
+ me, err := m.Lookup(ctx, pathVar)
+ if err != nil {
+ loggerV1.Debug("bzz download: invalid path", "address", address, "path", pathVar, "error", err)
+ logger.Error(nil, "bzz download: invalid path")
+
+ if errors.Is(err, manifest.ErrNotFound) {
+
+ if !strings.HasPrefix(pathVar, "/") {
+ // check for directory
+ dirPath := pathVar + "/"
+ exists, err := m.HasPrefix(ctx, dirPath)
+ if err == nil && exists {
+ // redirect to directory
+ u := r.URL
+ u.Path += "/"
+ redirectURL := u.String()
+
+ logger.Debug("bzz download: redirecting failed", "url", redirectURL, "error", err)
+
+ http.Redirect(w, r, redirectURL, http.StatusPermanentRedirect)
+ return
+ }
+ }
+
+ // check index suffix path
+ if indexDocumentSuffixKey, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteIndexDocumentSuffixKey); ok {
+ if !strings.HasSuffix(pathVar, indexDocumentSuffixKey) {
+ // check if path is directory with index
+ pathWithIndex := path.Join(pathVar, indexDocumentSuffixKey)
+ indexDocumentManifestEntry, err := m.Lookup(ctx, pathWithIndex)
+ if err == nil {
+ // index document exists
+ logger.Debug("bzz download: serving path", "path", pathWithIndex)
+
+ s.serveManifestEntry(logger, w, r, indexDocumentManifestEntry, !feedDereferenced, headerOnly)
+ return
+ }
+ }
+ }
+
+ // check if error document is to be shown
+ if errorDocumentPath, ok := manifestMetadataLoad(ctx, m, manifest.RootPath, manifest.WebsiteErrorDocumentPathKey); ok {
+ if pathVar != errorDocumentPath {
+ errorDocumentManifestEntry, err := m.Lookup(ctx, errorDocumentPath)
+ if err == nil {
+ // error document exists
+ logger.Debug("bzz download: serving path", "path", errorDocumentPath)
+
+ s.serveManifestEntry(logger, w, r, errorDocumentManifestEntry, !feedDereferenced, headerOnly)
+ return
+ }
+ }
+ }
+
+ jsonhttp.NotFound(w, "path address not found")
+ } else {
+ jsonhttp.NotFound(w, nil)
+ }
+ return
+ }
+
+ // serve requested path
+ s.serveManifestEntry(logger, w, r, me, !feedDereferenced, headerOnly)
+}
+
+func (s *Service) serveManifestEntry(
+ logger log.Logger,
+ w http.ResponseWriter,
+ r *http.Request,
+ manifestEntry manifest.Entry,
+ etag, headersOnly bool,
+) {
+ additionalHeaders := http.Header{}
+ mtdt := manifestEntry.Metadata()
+ if fname, ok := mtdt[manifest.EntryMetadataFilenameKey]; ok {
+ fname = filepath.Base(fname) // only keep the file name
+ additionalHeaders[ContentDispositionHeader] = []string{fmt.Sprintf("inline; filename=\"%s\"", escapeQuotes(fname))}
+ }
+ if mimeType, ok := mtdt[manifest.EntryMetadataContentTypeKey]; ok {
+ additionalHeaders[ContentTypeHeader] = []string{mimeType}
+ }
+
+ s.downloadHandler(logger, w, r, manifestEntry.Reference(), additionalHeaders, etag, headersOnly, nil)
+}
+
+// downloadHandler contains common logic for downloading Swarm file from API
+func (s *Service) downloadHandler(logger log.Logger, w http.ResponseWriter, r *http.Request, reference swarm.Address, additionalHeaders http.Header, etag, headersOnly bool, rootCh swarm.Chunk) {
+ headers := struct {
+ Strategy *getter.Strategy `map:"Swarm-Redundancy-Strategy"`
+ RLevel *redundancy.Level `map:"Swarm-Redundancy-Level"`
+ FallbackMode *bool `map:"Swarm-Redundancy-Fallback-Mode"`
+ ChunkRetrievalTimeout *string `map:"Swarm-Chunk-Retrieval-Timeout"`
+ LookaheadBufferSize *int `map:"Swarm-Lookahead-Buffer-Size"`
+ Cache *bool `map:"Swarm-Cache"`
+ }{}
+
+ if response := s.mapStructure(r.Header, &headers); response != nil {
+ response("invalid header params", logger, w)
+ return
+ }
+ cache := true
+ if headers.Cache != nil {
+ cache = *headers.Cache
+ }
+
+ ctx := r.Context()
+ ctx, err := getter.SetConfigInContext(ctx, headers.Strategy, headers.FallbackMode, headers.ChunkRetrievalTimeout, logger)
+ if err != nil {
+ logger.Error(err, err.Error())
+ jsonhttp.BadRequest(w, "could not parse headers")
+ return
+ }
+ rLevel := redundancy.DefaultLevel
+ if headers.RLevel != nil {
+ rLevel = *headers.RLevel
+ }
+
+ var (
+ reader file.Joiner
+ l int64
+ )
+ if rootCh != nil {
+ reader, l, err = joiner.NewJoiner(ctx, s.storer.Download(cache), s.storer.Cache(), reference, rootCh)
+ } else {
+ reader, l, err = joiner.New(ctx, s.storer.Download(cache), s.storer.Cache(), reference, rLevel)
+ }
+ if err != nil {
+ if errors.Is(err, storage.ErrNotFound) || errors.Is(err, topology.ErrNotFound) {
+ logger.Debug("api download: not found ", "address", reference, "error", err)
+ logger.Error(nil, err.Error())
+ jsonhttp.NotFound(w, nil)
+ return
+ }
+ logger.Debug("api download: unexpected error", "address", reference, "error", err)
+ logger.Error(nil, "api download: unexpected error")
+ jsonhttp.InternalServerError(w, "joiner failed")
+ return
+ }
+
+ // include additional headers
+ for name, values := range additionalHeaders {
+ for _, value := range values {
+ w.Header().Add(name, value)
+ }
+ }
+ if etag {
+ w.Header().Set(ETagHeader, fmt.Sprintf("%q", reference))
+ }
+ w.Header().Set(ContentLengthHeader, strconv.FormatInt(l, 10))
+ w.Header().Add(AccessControlExposeHeaders, ContentDispositionHeader)
+
+ if headersOnly {
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+
+ bufSize := lookaheadBufferSize(l)
+ if headers.LookaheadBufferSize != nil {
+ bufSize = *(headers.LookaheadBufferSize)
+ }
+ if bufSize > 0 {
+ http.ServeContent(w, r, "", time.Now(), langos.NewBufferedLangos(reader, bufSize))
+ return
+ }
+ http.ServeContent(w, r, "", time.Now(), reader)
+}
+
+// manifestMetadataLoad returns the value for a key stored in the metadata of
+// manifest path, or empty string if no value is present.
+// The ok result indicates whether value was found in the metadata.
+func manifestMetadataLoad(
+ ctx context.Context,
+ manifest manifest.Interface,
+ path, metadataKey string,
+) (string, bool) {
+ me, err := manifest.Lookup(ctx, path)
+ if err != nil {
+ return "", false
+ }
+
+ manifestRootMetadata := me.Metadata()
+ if val, ok := manifestRootMetadata[metadataKey]; ok {
+ return val, ok
+ }
+
+ return "", false
+}
+
+func (s *Service) manifestFeed(
+ ctx context.Context,
+ m manifest.Interface,
+) (feeds.Lookup, error) {
+ e, err := m.Lookup(ctx, "/")
+ if err != nil {
+ return nil, fmt.Errorf("node lookup: %w", err)
+ }
+ var (
+ owner, topic []byte
+ t = new(feeds.Type)
+ )
+ meta := e.Metadata()
+ if e := meta[feedMetadataEntryOwner]; e != "" {
+ owner, err = hex.DecodeString(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if e := meta[feedMetadataEntryTopic]; e != "" {
+ topic, err = hex.DecodeString(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if e := meta[feedMetadataEntryType]; e != "" {
+ err := t.FromString(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(owner) == 0 || len(topic) == 0 {
+ return nil, fmt.Errorf("node lookup: %s", "feed metadata absent")
+ }
+ f := feeds.New(topic, common.BytesToAddress(owner))
+ return s.feedFactory.NewLookup(*t, f)
+}
diff --git a/pkg/api/chequebook.go b/pkg/api/chequebook.go
index 33cd75d9767..73d59874806 100644
--- a/pkg/api/chequebook.go
+++ b/pkg/api/chequebook.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/chunk_stream.go b/pkg/api/chunk_stream.go
index 2f91939f21a..ded9c4cf97a 100644
--- a/pkg/api/chunk_stream.go
+++ b/pkg/api/chunk_stream.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/chunk_stream_test.go b/pkg/api/chunk_stream_test.go
index 47c8e860b51..a44dc9316d8 100644
--- a/pkg/api/chunk_stream_test.go
+++ b/pkg/api/chunk_stream_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/gsoc.go b/pkg/api/gsoc.go
index 60d048ffdc0..f4c1a24619d 100644
--- a/pkg/api/gsoc.go
+++ b/pkg/api/gsoc.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2024 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/gsoc_test.go b/pkg/api/gsoc_test.go
index edef7a39842..3138f18245a 100644
--- a/pkg/api/gsoc_test.go
+++ b/pkg/api/gsoc_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2024 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/metrics.go b/pkg/api/metrics.go
index 4f27142dfab..aadfc0273c4 100644
--- a/pkg/api/metrics.go
+++ b/pkg/api/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/postage.go b/pkg/api/postage.go
index 9d6aa2bcc11..31f026594a9 100644
--- a/pkg/api/postage.go
+++ b/pkg/api/postage.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/pss.go b/pkg/api/pss.go
index ef1c3a84d47..8a7dd8f7a8e 100644
--- a/pkg/api/pss.go
+++ b/pkg/api/pss.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/pss_test.go b/pkg/api/pss_test.go
index d78f8ea668f..17e8c7f9933 100644
--- a/pkg/api/pss_test.go
+++ b/pkg/api/pss_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/rchash.go b/pkg/api/rchash.go
index d0870fde9b8..9ebaff24edd 100644
--- a/pkg/api/rchash.go
+++ b/pkg/api/rchash.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2023 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/redistribution.go b/pkg/api/redistribution.go
index bce920e1d72..fbea2a15b76 100644
--- a/pkg/api/redistribution.go
+++ b/pkg/api/redistribution.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2023 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/router.go b/pkg/api/router.go
index 714217c9c11..cf5d56767bb 100644
--- a/pkg/api/router.go
+++ b/pkg/api/router.go
@@ -1,6 +1,5 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package api
@@ -16,40 +15,10 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/felixge/fgprof"
"github.com/gorilla/handlers"
- "github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus/promhttp"
"resenje.org/web"
)
-const (
- apiVersion = "v1" // Only one api version exists, this should be configurable with more.
- rootPath = "/" + apiVersion
-)
-
-func (s *Service) Mount() {
- if s == nil {
- return
- }
-
- router := mux.NewRouter()
-
- router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler)
-
- s.router = router
-
- s.mountTechnicalDebug()
- s.mountBusinessDebug()
- s.mountAPI()
-
- s.Handler = web.ChainHandlers(
- httpaccess.NewHTTPAccessLogHandler(s.logger, s.tracer, "api access"),
- handlers.CompressHandler,
- s.corsHandler,
- web.NoCacheHeadersHandler,
- web.FinalHandler(router),
- )
-}
-
// EnableFullAPI will enable all available endpoints, because some endpoints are not available during syncing.
func (s *Service) EnableFullAPI() {
if s == nil {
@@ -104,110 +73,6 @@ func (s *Service) EnableFullAPI() {
)
}
-func (s *Service) mountTechnicalDebug() {
- s.router.Handle("/node", jsonhttp.MethodHandler{
- "GET": http.HandlerFunc(s.nodeGetHandler),
- })
-
- s.router.Handle("/addresses", jsonhttp.MethodHandler{
- "GET": http.HandlerFunc(s.addressesHandler),
- })
-
- s.router.Handle("/chainstate", jsonhttp.MethodHandler{
- "GET": http.HandlerFunc(s.chainStateHandler),
- })
-
- s.router.Handle("/debugstore", jsonhttp.MethodHandler{
- "GET": web.ChainHandlers(
- httpaccess.NewHTTPAccessSuppressLogHandler(),
- web.FinalHandlerFunc(s.debugStorage),
- ),
- })
-
- s.router.Path("/metrics").Handler(web.ChainHandlers(
- httpaccess.NewHTTPAccessSuppressLogHandler(),
- web.FinalHandler(promhttp.InstrumentMetricHandler(
- s.metricsRegistry,
- promhttp.HandlerFor(s.metricsRegistry, promhttp.HandlerOpts{}),
- )),
- ))
-
- s.router.Handle("/debug/pprof", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- u := r.URL
- u.Path += "/"
- http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
- }))
-
- s.router.Handle("/debug/fgprof", fgprof.Handler())
- s.router.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
- s.router.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
- s.router.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
- s.router.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
- s.router.PathPrefix("/debug/pprof/").Handler(http.HandlerFunc(pprof.Index))
- s.router.Handle("/debug/vars", expvar.Handler())
-
- s.router.Handle("/loggers", jsonhttp.MethodHandler{
- "GET": web.ChainHandlers(
- httpaccess.NewHTTPAccessSuppressLogHandler(),
- web.FinalHandlerFunc(s.loggerGetHandler),
- ),
- })
-
- s.router.Handle("/loggers/{exp}", jsonhttp.MethodHandler{
- "GET": web.ChainHandlers(
- httpaccess.NewHTTPAccessSuppressLogHandler(),
- web.FinalHandlerFunc(s.loggerGetHandler),
- ),
- })
-
- s.router.Handle("/loggers/{exp}/{verbosity}", jsonhttp.MethodHandler{
- "PUT": web.ChainHandlers(
- httpaccess.NewHTTPAccessSuppressLogHandler(),
- web.FinalHandlerFunc(s.loggerSetVerbosityHandler),
- ),
- })
-
- s.router.Handle("/readiness", web.ChainHandlers(
- httpaccess.NewHTTPAccessSuppressLogHandler(),
- web.FinalHandlerFunc(s.readinessHandler),
- ))
-
- s.router.Handle("/health", web.ChainHandlers(
- httpaccess.NewHTTPAccessSuppressLogHandler(),
- web.FinalHandlerFunc(s.healthHandler),
- ))
-}
-
-func (s *Service) checkRouteAvailability(handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if !s.fullAPIEnabled {
- jsonhttp.ServiceUnavailable(w, "Node is syncing. This endpoint is unavailable. Try again later.")
- return
- }
- handler.ServeHTTP(w, r)
- })
-}
-
-func (s *Service) checkSwapAvailability(handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if !s.swapEnabled {
- jsonhttp.NotImplemented(w, "Swap is disabled. This endpoint is unavailable.")
- return
- }
- handler.ServeHTTP(w, r)
- })
-}
-
-func (s *Service) checkChequebookAvailability(handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if !s.chequebookEnabled {
- jsonhttp.NotImplemented(w, "Chequebook is disabled. This endpoint is unavailable.")
- return
- }
- handler.ServeHTTP(w, r)
- })
-}
-
func (s *Service) mountAPI() {
subdomainRouter := s.router.Host("{subdomain:.*}.swarm.localhost").Subrouter()
@@ -386,6 +251,80 @@ func (s *Service) mountAPI() {
})
}
+func (s *Service) mountTechnicalDebug() {
+ s.router.Handle("/node", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.nodeGetHandler),
+ })
+
+ s.router.Handle("/addresses", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.addressesHandler),
+ })
+
+ s.router.Handle("/chainstate", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.chainStateHandler),
+ })
+
+ s.router.Handle("/debugstore", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.debugStorage),
+ ),
+ })
+
+ s.router.Path("/metrics").Handler(web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandler(promhttp.InstrumentMetricHandler(
+ s.metricsRegistry,
+ promhttp.HandlerFor(s.metricsRegistry, promhttp.HandlerOpts{}),
+ )),
+ ))
+
+ s.router.Handle("/debug/pprof", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ u := r.URL
+ u.Path += "/"
+ http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
+ }))
+
+ s.router.Handle("/debug/fgprof", fgprof.Handler())
+ s.router.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
+ s.router.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
+ s.router.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
+ s.router.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
+ s.router.PathPrefix("/debug/pprof/").Handler(http.HandlerFunc(pprof.Index))
+ s.router.Handle("/debug/vars", expvar.Handler())
+
+ s.router.Handle("/loggers", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.loggerGetHandler),
+ ),
+ })
+
+ s.router.Handle("/loggers/{exp}", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.loggerGetHandler),
+ ),
+ })
+
+ s.router.Handle("/loggers/{exp}/{verbosity}", jsonhttp.MethodHandler{
+ "PUT": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.loggerSetVerbosityHandler),
+ ),
+ })
+
+ s.router.Handle("/readiness", web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.readinessHandler),
+ ))
+
+ s.router.Handle("/health", web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.healthHandler),
+ ))
+}
+
func (s *Service) mountBusinessDebug() {
handle := func(path string, handler http.Handler) {
routeHandler := s.checkRouteAvailability(handler)
diff --git a/pkg/api/router_js.go b/pkg/api/router_js.go
new file mode 100644
index 00000000000..552bed76967
--- /dev/null
+++ b/pkg/api/router_js.go
@@ -0,0 +1,363 @@
+//go:build js
+// +build js
+
+package api
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/ethersphere/bee/v2/pkg/jsonhttp"
+ "github.com/ethersphere/bee/v2/pkg/log/httpaccess"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/gorilla/handlers"
+ "resenje.org/web"
+)
+
+// EnableFullAPI will enable all available endpoints, because some endpoints are not available during syncing.
+func (s *Service) EnableFullAPI() {
+ if s == nil {
+ return
+ }
+
+ s.fullAPIEnabled = true
+
+ compressHandler := func(h http.Handler) http.Handler {
+ downloadEndpoints := []string{
+ "/bzz",
+ "/bytes",
+ "/chunks",
+ "/feeds",
+ "/soc",
+ rootPath + "/bzz",
+ rootPath + "/bytes",
+ rootPath + "/chunks",
+ rootPath + "/feeds",
+ rootPath + "/soc",
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Skip compression for GET requests on download endpoints.
+ // This is done in order to preserve Content-Length header in response,
+ // because CompressHandler is always removing it.
+ if r.Method == http.MethodGet {
+ for _, endpoint := range downloadEndpoints {
+ if strings.HasPrefix(r.URL.Path, endpoint) {
+ h.ServeHTTP(w, r)
+ return
+ }
+ }
+ }
+
+ if r.Method == http.MethodHead {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ handlers.CompressHandler(h).ServeHTTP(w, r)
+ })
+ }
+
+ s.Handler = web.ChainHandlers(
+ httpaccess.NewHTTPAccessLogHandler(s.logger, s.tracer, "api access"),
+ compressHandler,
+ s.corsHandler,
+ web.FinalHandler(s.router),
+ )
+}
+
+func (s *Service) mountAPI() {
+ subdomainRouter := s.router.Host("{subdomain:.*}.swarm.localhost").Subrouter()
+
+ subdomainRouter.Handle("/{path:.*}", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ web.FinalHandlerFunc(s.subdomainHandler),
+ ),
+ })
+
+ s.router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, "Ethereum Swarm Bee")
+ })
+
+ s.router.HandleFunc("/robots.txt", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, "User-agent: *\nDisallow: /")
+ })
+
+ // handle is a helper closure which simplifies the router setup.
+ handle := func(path string, handler http.Handler) {
+ routeHandler := s.checkRouteAvailability(handler)
+ s.router.Handle(path, routeHandler)
+ s.router.Handle(rootPath+path, routeHandler)
+ }
+
+ handle("/bytes", jsonhttp.MethodHandler{
+ "POST": web.ChainHandlers(
+ web.FinalHandlerFunc(s.bytesUploadHandler),
+ ),
+ })
+
+ handle("/bytes/{address}", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ s.actDecryptionHandler(),
+ web.FinalHandlerFunc(s.bytesGetHandler),
+ ),
+ "HEAD": web.ChainHandlers(
+ s.actDecryptionHandler(),
+ web.FinalHandlerFunc(s.bytesHeadHandler),
+ ),
+ })
+
+ handle("/chunks", jsonhttp.MethodHandler{
+ "POST": web.ChainHandlers(
+ jsonhttp.NewMaxBodyBytesHandler(swarm.SocMaxChunkSize),
+ web.FinalHandlerFunc(s.chunkUploadHandler),
+ ),
+ })
+
+ handle("/chunks/{address}", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ s.actDecryptionHandler(),
+ web.FinalHandlerFunc(s.chunkGetHandler),
+ ),
+ "HEAD": web.ChainHandlers(
+ s.actDecryptionHandler(),
+ web.FinalHandlerFunc(s.hasChunkHandler),
+ ),
+ })
+
+ handle("/envelope/{address}", jsonhttp.MethodHandler{
+ "POST": http.HandlerFunc(s.envelopePostHandler),
+ })
+
+ handle("/soc/{owner}/{id}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.socGetHandler),
+ "POST": web.ChainHandlers(
+ jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkWithSpanSize),
+ web.FinalHandlerFunc(s.socUploadHandler),
+ ),
+ })
+
+ handle("/feeds/{owner}/{topic}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.feedGetHandler),
+ "POST": web.ChainHandlers(
+ jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkWithSpanSize),
+ web.FinalHandlerFunc(s.feedPostHandler),
+ ),
+ })
+
+ handle("/bzz", jsonhttp.MethodHandler{
+ "POST": web.ChainHandlers(
+ web.FinalHandlerFunc(s.bzzUploadHandler),
+ ),
+ })
+
+ handle("/grantee", jsonhttp.MethodHandler{
+ "POST": http.HandlerFunc(s.actCreateGranteesHandler),
+ })
+
+ handle("/grantee/{address}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.actListGranteesHandler),
+ "PATCH": http.HandlerFunc(s.actGrantRevokeHandler),
+ })
+
+ handle("/bzz/{address}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ u := r.URL
+ u.Path += "/"
+ http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
+ }))
+
+ handle("/bzz/{address}/{path:.*}", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ s.actDecryptionHandler(),
+ web.FinalHandlerFunc(s.bzzDownloadHandler),
+ ),
+ "HEAD": web.ChainHandlers(
+ s.actDecryptionHandler(),
+ web.FinalHandlerFunc(s.bzzHeadHandler),
+ ),
+ })
+
+ handle("/tags", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.listTagsHandler),
+ "POST": web.ChainHandlers(
+ jsonhttp.NewMaxBodyBytesHandler(1024),
+ web.FinalHandlerFunc(s.createTagHandler),
+ ),
+ })
+
+ handle("/tags/{id}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.getTagHandler),
+ "DELETE": http.HandlerFunc(s.deleteTagHandler),
+ "PATCH": web.ChainHandlers(
+ jsonhttp.NewMaxBodyBytesHandler(1024),
+ web.FinalHandlerFunc(s.doneSplitHandler),
+ ),
+ })
+
+ handle("/pins", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.listPinnedRootHashes),
+ })
+
+ handle("/pins/check", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.pinIntegrityHandler),
+ })
+
+ handle("/pins/{reference}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.getPinnedRootHash),
+ "POST": http.HandlerFunc(s.pinRootHash),
+ "DELETE": http.HandlerFunc(s.unpinRootHash),
+ },
+ )
+
+ handle("/stewardship/{address}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.stewardshipGetHandler),
+ "PUT": http.HandlerFunc(s.stewardshipPutHandler),
+ })
+
+}
+func (s *Service) mountTechnicalDebug() {
+ s.router.Handle("/node", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.nodeGetHandler),
+ })
+
+ s.router.Handle("/addresses", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.addressesHandler),
+ })
+
+ s.router.Handle("/debugstore", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.debugStorage),
+ ),
+ })
+
+ s.router.Handle("/loggers", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.loggerGetHandler),
+ ),
+ })
+
+ s.router.Handle("/loggers/{exp}", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.loggerGetHandler),
+ ),
+ })
+
+ s.router.Handle("/loggers/{exp}/{verbosity}", jsonhttp.MethodHandler{
+ "PUT": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.loggerSetVerbosityHandler),
+ ),
+ })
+
+ s.router.Handle("/readiness", web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.readinessHandler),
+ ))
+
+ s.router.Handle("/health", web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.healthHandler),
+ ))
+}
+
+func (s *Service) mountBusinessDebug() {
+ handle := func(path string, handler http.Handler) {
+ routeHandler := s.checkRouteAvailability(handler)
+ s.router.Handle(path, routeHandler)
+ s.router.Handle(rootPath+path, routeHandler)
+ }
+
+ handle("/peers", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.peersHandler),
+ })
+
+ handle("/pingpong/{address}", jsonhttp.MethodHandler{
+ "POST": http.HandlerFunc(s.pingpongHandler),
+ })
+
+ handle("/connect/{multi-address:.+}", jsonhttp.MethodHandler{
+ "POST": http.HandlerFunc(s.peerConnectHandler),
+ })
+
+ handle("/blocklist", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.blocklistedPeersHandler),
+ })
+
+ handle("/peers/{address}", jsonhttp.MethodHandler{
+ "DELETE": http.HandlerFunc(s.peerDisconnectHandler),
+ })
+
+ handle("/topology", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.topologyHandler),
+ })
+
+ handle("/welcome-message", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.getWelcomeMessageHandler),
+ "POST": web.ChainHandlers(
+ jsonhttp.NewMaxBodyBytesHandler(welcomeMessageMaxRequestSize),
+ web.FinalHandlerFunc(s.setWelcomeMessageHandler),
+ ),
+ })
+
+ handle("/balances", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.compensatedBalancesHandler),
+ })
+
+ handle("/balances/{peer}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.compensatedPeerBalanceHandler),
+ })
+
+ handle("/consumed", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.balancesHandler),
+ })
+
+ handle("/consumed/{peer}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.peerBalanceHandler),
+ })
+
+ handle("/timesettlements", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.settlementsHandlerPseudosettle),
+ })
+
+ handle("/settlements", web.ChainHandlers(
+ s.checkSwapAvailability,
+ web.FinalHandler(jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.settlementsHandler),
+ }),
+ ))
+
+ handle("/settlements/{peer}", web.ChainHandlers(
+ s.checkSwapAvailability,
+ web.FinalHandler(jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.peerSettlementsHandler),
+ }),
+ ))
+
+ handle("/status", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ web.FinalHandlerFunc(s.statusGetHandler),
+ ),
+ })
+
+ handle("/status/peers", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ s.statusAccessHandler,
+ web.FinalHandlerFunc(s.statusGetPeersHandler),
+ ),
+ })
+
+ handle("/status/neighborhoods", jsonhttp.MethodHandler{
+ "GET": web.ChainHandlers(
+ httpaccess.NewHTTPAccessSuppressLogHandler(),
+ s.statusAccessHandler,
+ web.FinalHandlerFunc(s.statusGetNeighborhoods),
+ ),
+ })
+
+}
diff --git a/pkg/api/router_shared.go b/pkg/api/router_shared.go
new file mode 100644
index 00000000000..1b922aeb6d9
--- /dev/null
+++ b/pkg/api/router_shared.go
@@ -0,0 +1,74 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package api
+
+import (
+ "net/http"
+
+ "github.com/ethersphere/bee/v2/pkg/jsonhttp"
+ "github.com/ethersphere/bee/v2/pkg/log/httpaccess"
+ "github.com/gorilla/handlers"
+ "github.com/gorilla/mux"
+ "resenje.org/web"
+)
+
+const (
+ apiVersion = "v1" // Only one api version exists, this should be configurable with more.
+ rootPath = "/" + apiVersion
+)
+
+func (s *Service) Mount() {
+ if s == nil {
+ return
+ }
+
+ router := mux.NewRouter()
+
+ router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler)
+
+ s.router = router
+
+ s.mountTechnicalDebug()
+ s.mountBusinessDebug()
+ s.mountAPI()
+
+ s.Handler = web.ChainHandlers(
+ httpaccess.NewHTTPAccessLogHandler(s.logger, s.tracer, "api access"),
+ handlers.CompressHandler,
+ s.corsHandler,
+ web.NoCacheHeadersHandler,
+ web.FinalHandler(router),
+ )
+}
+
+func (s *Service) checkRouteAvailability(handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !s.fullAPIEnabled {
+ jsonhttp.ServiceUnavailable(w, "Node is syncing. This endpoint is unavailable. Try again later.")
+ return
+ }
+ handler.ServeHTTP(w, r)
+ })
+}
+
+func (s *Service) checkSwapAvailability(handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !s.swapEnabled {
+ jsonhttp.NotImplemented(w, "Swap is disabled. This endpoint is unavailable.")
+ return
+ }
+ handler.ServeHTTP(w, r)
+ })
+}
+
+func (s *Service) checkChequebookAvailability(handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !s.chequebookEnabled {
+ jsonhttp.NotImplemented(w, "Chequebook is disabled. This endpoint is unavailable.")
+ return
+ }
+ handler.ServeHTTP(w, r)
+ })
+}
diff --git a/pkg/api/router_test.go b/pkg/api/router_test.go
index 22ba15c8cde..71d3c8b56ce 100644
--- a/pkg/api/router_test.go
+++ b/pkg/api/router_test.go
@@ -6,7 +6,6 @@ package api_test
import (
"net/http"
- "slices"
"strings"
"testing"
@@ -428,7 +427,7 @@ func TestEndpointOptions(t *testing.T) {
actualMethods := strings.Split(allowHeader, ", ")
for _, expectedMethod := range tt.expectedMethods {
- if !slices.Contains(actualMethods, expectedMethod) {
+ if !contains(actualMethods, expectedMethod) {
t.Errorf("expected method %s not found for route %s", expectedMethod, tt.route)
}
}
@@ -437,3 +436,12 @@ func TestEndpointOptions(t *testing.T) {
})
}
}
+
+func contains(slice []string, item string) bool {
+ for _, s := range slice {
+ if s == item {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/api/staking.go b/pkg/api/staking.go
index 2f957e4532a..410b670dd1f 100644
--- a/pkg/api/staking.go
+++ b/pkg/api/staking.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2022 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/transaction.go b/pkg/api/transaction.go
index 5be66c7fb42..c9001e865b5 100644
--- a/pkg/api/transaction.go
+++ b/pkg/api/transaction.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/api/wallet.go b/pkg/api/wallet.go
index 5cdd0de087d..bf3b6a6f2c3 100644
--- a/pkg/api/wallet.go
+++ b/pkg/api/wallet.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2022 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/bzz/address.go b/pkg/bzz/address.go
index 42fa8fd90b4..0f3f25cd4fa 100644
--- a/pkg/bzz/address.go
+++ b/pkg/bzz/address.go
@@ -113,11 +113,14 @@ func (a *Address) Equal(b *Address) bool {
}
func multiaddrEqual(a, b ma.Multiaddr) bool {
- if a == nil || b == nil {
- return a == b
+ switch {
+ case a == nil && b == nil:
+ return true
+ case a == nil || b == nil:
+ return false
+ default:
+ return a.Equal(b)
}
-
- return a.Equal(b)
}
func (a *Address) MarshalJSON() ([]byte, error) {
diff --git a/pkg/crypto/signer.go b/pkg/crypto/signer.go
index 150a05ef953..d47b27a0efc 100644
--- a/pkg/crypto/signer.go
+++ b/pkg/crypto/signer.go
@@ -140,10 +140,7 @@ func (d *defaultSigner) SignTypedData(typedData *eip712.TypedData) ([]byte, erro
// sign the provided hash and convert it to the ethereum (r,s,v) format.
func (d *defaultSigner) sign(sighash []byte, isCompressedKey bool) ([]byte, error) {
pvk, _ := btcec.PrivKeyFromBytes(d.key.D.Bytes())
- signature, err := btcecdsa.SignCompact(pvk, sighash, false)
- if err != nil {
- return nil, err
- }
+ signature := btcecdsa.SignCompact(pvk, sighash, false)
// Convert to Ethereum signature format with 'recovery id' v at the end.
v := signature[0]
diff --git a/pkg/fs/fs.go b/pkg/fs/fs.go
new file mode 100644
index 00000000000..7eaebc80fb8
--- /dev/null
+++ b/pkg/fs/fs.go
@@ -0,0 +1,40 @@
+//go:build !js
+// +build !js
+
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reservefs.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package fs
+
+import (
+ "os"
+)
+
+// osOpen calls os.Open.
+func osOpen(name string) (OsFile, error) {
+ return os.Open(name)
+}
+
+func osOpenFile(name string, flag int, perm os.FileMode) (OsFile, error) {
+ return os.OpenFile(name, flag, perm)
+}
+
+// osMkdirAll calls os.MkdirAll.
+func osMkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+ return os.WriteFile(filename, data, perm)
+}
+
+func fsStat(name string) (os.FileInfo, error) {
+ return os.Stat(name)
+}
+
+func osRemove(name string) error {
+ return os.Remove(name)
+}
diff --git a/pkg/fs/fs_js.go b/pkg/fs/fs_js.go
new file mode 100644
index 00000000000..c5ee8122caa
--- /dev/null
+++ b/pkg/fs/fs_js.go
@@ -0,0 +1,572 @@
+//go:build js && wasm
+// +build js,wasm
+
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package fs
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+ "syscall"
+ "syscall/js"
+ "time"
+)
+
+var _ os.FileInfo = fsFileInfo{}
+
+// fsFileInfo is an implementation of os.FileInfo for JavaScript/Wasm.
+// It's backed by fs.
+type fsFileInfo struct {
+ js.Value
+ path string
+}
+
+// Name returns the base name of the file.
+func (fi fsFileInfo) Name() string {
+ return filepath.Base(fi.path)
+}
+
+// Size returns the length of the file in bytes.
+func (fi fsFileInfo) Size() int64 {
+ return int64(fi.Value.Get("size").Int())
+}
+
+// Mode returns the file mode bits.
+func (fi fsFileInfo) Mode() os.FileMode {
+ return os.FileMode(fi.Value.Get("size").Int())
+}
+
+// ModTime returns the modification time.
+func (fi fsFileInfo) ModTime() time.Time {
+ modifiedTimeString := fi.Value.Get("mtime").String()
+ modifiedTime, err := time.Parse(time.RFC3339, modifiedTimeString)
+ if err != nil {
+ panic(fmt.Errorf("could not convert string mtime (%q) to time.Time: %s", modifiedTimeString, err.Error()))
+ }
+ return modifiedTime
+}
+
+// IsDir is an abbreviation for Mode().IsDir().
+func (fi fsFileInfo) IsDir() bool {
+ return fi.Value.Call("isDirectory").Bool()
+}
+
+// underlying data source (always returns nil for wasm/js).
+func (fi fsFileInfo) Sys() interface{} {
+ return nil
+}
+
+var _ OsFile = &fsFile{}
+
+// fsFile is an implementation of osFile for JavaScript/Wasm. It's backed
+// by fs.
+type fsFile struct {
+ // name is the name of the file (including path)
+ path string
+ // fd is a file descriptor used as a reference to the file.
+ fd int
+ // currOffset is the current value of the offset used for reading or writing.
+ currOffset int64
+}
+
+// Stat returns the FileInfo structure describing file. If there is an error,
+// it will be of type *PathError.
+func (f fsFile) Stat() (os.FileInfo, error) {
+ return fsStat(f.path)
+}
+
+// Read reads up to len(b) bytes from the File. It returns the number of bytes
+// read and any error encountered. At end of file, Read returns 0, io.EOF.
+func (f *fsFile) Read(b []byte) (n int, err error) {
+ bytesRead, err := f.read(b, f.currOffset)
+ if bytesRead == 0 {
+ return 0, io.EOF
+ }
+ f.currOffset += int64(bytesRead)
+ return bytesRead, nil
+}
+
+// ReadAt reads len(b) bytes from the File starting at byte offset off. It
+// returns the number of bytes read and the error, if any. ReadAt always
+// returns a non-nil error when n < len(b). At end of file, that error is
+// io.EOF.
+func (f fsFile) ReadAt(b []byte, off int64) (n int, err error) {
+ bytesRead, err := f.read(b, off)
+ if bytesRead < len(b) {
+ return bytesRead, io.EOF
+ }
+ return bytesRead, nil
+}
+
+func (f fsFile) read(b []byte, off int64) (n int, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ // JavaScript API expects a Uint8Array which we then convert into []byte.
+ buffer := js.Global().Get("Uint8Array").New(len(b))
+ rawBytesRead := jsReadSync(f.fd, buffer, 0, len(b), int(off))
+ bytesRead := rawBytesRead.Int()
+ for i := 0; i < bytesRead; i++ {
+ b[i] = byte(buffer.Index(i).Int())
+ }
+ return bytesRead, nil
+}
+
+// Write writes len(b) bytes to the File. It returns the number of bytes
+// written and an error, if any. Write returns a non-nil error when n !=
+// len(b).
+func (f *fsFile) Write(b []byte) (n int, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ uint8arr := js.Global().Get("Uint8Array").New(len(b))
+ js.CopyBytesToJS(uint8arr, b)
+ rawBytesWritten := jsWriteSync(f.fd, uint8arr, 0, len(b), int(f.currOffset))
+ bytesWritten := rawBytesWritten.Int()
+ f.currOffset += int64(bytesWritten)
+ if bytesWritten != len(b) {
+ return bytesWritten, io.ErrShortWrite
+ }
+ if err := f.Sync(); err != nil {
+ return bytesWritten, err
+ }
+ return bytesWritten, nil
+}
+
+// Seek sets the offset for the next Read or Write on file to offset,
+// interpreted according to whence: 0 means relative to the origin of the
+// file, 1 means relative to the current offset, and 2 means relative to the
+// end. It returns the new offset and an error, if any. The behavior of Seek
+// on a file opened with O_APPEND is not specified.
+func (f *fsFile) Seek(offset int64, whence int) (ret int64, err error) {
+ switch whence {
+ case io.SeekStart:
+ f.currOffset = offset
+ return f.currOffset, nil
+ case io.SeekCurrent:
+ f.currOffset += offset
+ return f.currOffset, nil
+ case io.SeekEnd:
+ f.currOffset = -offset
+ return f.currOffset, nil
+ }
+ return 0, fmt.Errorf("Seek: unexpected whence value: %d", whence)
+}
+
+// Sync commits the current contents of the file to stable storage. Typically,
+// this means flushing the file system's in-memory copy of recently written
+// data to disk.
+func (f fsFile) Sync() (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ jsFsyncSync(f.fd)
+ return nil
+}
+
+// Close closes the File, rendering it unusable for I/O. On files that support
+// SetDeadline, any pending I/O operations will be canceled and return
+// immediately with an error.
+func (f fsFile) Close() (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ jsCloseSync(f.fd)
+ return nil
+}
+
+func fsStat(path string) (fileInfo os.FileInfo, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ rawFileInfo := jsStatSync(path)
+ return fsFileInfo{Value: rawFileInfo, path: path}, nil
+}
+
+func osOpen(path string) (OsFile, error) {
+ if isfsSupported() {
+ return fsOpenFile(path, os.O_RDONLY, os.ModePerm)
+ }
+ return os.Open(path)
+}
+
+func fsOpenFile(path string, flag int, perm os.FileMode) (file OsFile, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ jsFlag, err := toJSFlag(flag)
+ if err != nil {
+ return nil, err
+ }
+ rawFD := jsOpenSync(path, jsFlag, int(perm))
+ return &fsFile{path: path, fd: rawFD.Int()}, nil
+}
+
+func toJSFlag(flag int) (string, error) {
+ // O_APPEND takes precedence
+ if flag&os.O_APPEND != 0 {
+ return "a", nil
+ }
+ // O_CREATE + O_RDWR
+ if flag&os.O_CREATE != 0 && flag&os.O_RDWR != 0 {
+ return "w+", nil // create if not exist, read/write, truncate
+ }
+ // O_CREATE + O_WRONLY
+ if flag&os.O_CREATE != 0 && flag&os.O_WRONLY != 0 {
+ return "w", nil // create if not exist, write only, truncate
+ }
+ // O_RDWR (no create)
+ if flag&os.O_RDWR != 0 {
+ return "r+", nil // read/write, fail if not exist
+ }
+ // O_WRONLY (no create)
+ if flag&os.O_WRONLY != 0 {
+ return "w", nil // write only, truncate, fail if not exist
+ }
+ // O_RDONLY
+ return "r", nil // read only
+}
+
+func Readdirnames(path string, n int) ([]string, error) {
+ if isfsSupported() {
+ return fsReaddirnames(path, n)
+ }
+ // In Go, this requires two steps. Open the dir, then call Readdirnames.
+ dir, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ return dir.Readdirnames(n)
+}
+
+func fsReaddirnames(path string, n int) ([]string, error) {
+ rawNames := jsReaddirSync(path)
+ length := rawNames.Get("length").Int()
+ if n != 0 && length > n {
+ // If n > 0, only return up to n names.
+ length = n
+ }
+ names := make([]string, length)
+ for i := 0; i < length; i++ {
+ names[i] = rawNames.Index(i).String()
+ }
+ return names, nil
+}
+
+func osMkdirAll(path string, perm os.FileMode) error {
+ if isfsSupported() {
+ return fsMkdirAll(path, perm)
+ }
+ return os.MkdirAll(path, perm)
+}
+
+func fsMkdirAll(path string, perm os.FileMode) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ // Note: mkdirAll is not supported by fs so we have to manually create
+ // each directory.
+ names := strings.Split(path, string(os.PathSeparator))
+ for i := range names {
+ partialPath := filepath.Join(names[:i+1]...)
+ if err := fsMkdir(partialPath, perm); err != nil {
+ if os.IsExist(err) {
+ // If the directory already exists, that's fine.
+ continue
+ }
+ }
+ }
+ return nil
+}
+
+func fsMkdir(dir string, perm os.FileMode) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ jsMkdirSync(dir, int(perm))
+ return nil
+}
+
+func fsRename(oldpath, newpath string) error {
+ jsRenameSync(oldpath, newpath)
+ return nil
+}
+
+// isfsSupported returns true if fs is supported. It does this by
+// checking for the global "fs" object.
+func isfsSupported() bool {
+ return !reflect.DeepEqual(js.Global().Get("ZenFS"), js.Null()) && !reflect.DeepEqual(js.Global().Get("ZenFS"), js.Undefined())
+}
+
+// convertJSError converts an error returned by the fs API into a Go
+// error. This is important because Go expects certain types of errors to be
+// returned (e.g. ENOENT when a file doesn't exist) and programs often change
+// their behavior depending on the type of error.
+func convertJSError(err js.Error) error {
+ if reflect.DeepEqual(err.Value, js.Undefined()) || reflect.DeepEqual(err.Value, js.Null()) {
+ return nil // No error
+ }
+ // There is an error, check the code
+ if code := err.Value.Get("code"); !reflect.DeepEqual(code, js.Undefined()) && !reflect.DeepEqual(code, js.Null()) {
+ switch code.String() {
+ case "ENOENT":
+ return os.ErrNotExist
+ case "EISDIR":
+ return syscall.EISDIR
+ case "EEXIST":
+ return os.ErrExist
+ }
+ }
+ return err
+}
+
+// Note: JavaScript doesn't have an flock syscall so we have to fake it. This
+// won't work if another process tries to read/write to the same file. It only
+// works in the context of this process, but is safe with multiple goroutines.
+
+// locksMu protects access to readLocks and writeLocks
+var locksMu = sync.Mutex{}
+
+// readLocks is a map of path to the number of readers.
+var readLocks = map[string]uint{}
+
+// writeLocks keeps track of files which are locked for writing.
+var writeLocks = map[string]struct{}{}
+
+type fsFileLock struct {
+ path string
+ readOnly bool
+ file OsFile
+}
+
+func isErrInvalid(err error) bool {
+ if err == os.ErrInvalid {
+ return true
+ }
+ // Go >= 1.8 returns *os.PathError instead
+ if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
+ return true
+ }
+ return false
+}
+
+func jsReadSync(fd int, buffer js.Value, offset, length, position int) js.Value {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("read", fd, buffer, offset, length, position, callback)
+ return waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsWriteSync(fd int, data js.Value, offset, length, position int) js.Value {
+ return js.Global().Get("ZenFS").Call("writeSync", fd, data, offset, length, position)
+}
+
+func jsFsyncSync(fd int) {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("fsync", fd, callback)
+ waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsCloseSync(fd int) {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("close", fd, callback)
+ waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsStatSync(path string) js.Value {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("stat", path, callback)
+ return waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsOpenSync(path string, flags string, mode int) js.Value {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("open", path, flags, mode, callback)
+ return waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsUnlinkSync(path string) {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("unlink", path, callback)
+ waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsReaddirSync(path string) js.Value {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("readdir", path, callback)
+ return waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsMkdirSync(path string, mode int) {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("mkdir", path, mode, callback)
+ waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsRenameSync(oldPath string, newPath string) {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ js.Global().Get("ZenFS").Call("rename", oldPath, newPath, callback)
+ waitForCallbackResults(resultsChan, errChan)
+}
+
+func jsWriteFileSync(filename string, data []byte, perm os.FileMode) {
+ callback, resultsChan, errChan := makeAutoReleaseCallback()
+ uint8arr := js.Global().Get("Uint8Array").New(len(data))
+ js.CopyBytesToJS(uint8arr, data)
+ opts := js.Global().Get("Object").New()
+ opts.Set("mode", int(perm))
+ js.Global().Get("ZenFS").Call("writeFile", filename, uint8arr, opts, callback)
+ waitForCallbackResults(resultsChan, errChan)
+}
+
+// makeAutoReleaseCallback creates and returns a js.Func that can be used as a
+// callback. The callback will be released immediately after being called. The
+// callback assumes the JavaScript callback convention and accepts two
+// arguments: (err, result). If err is not null or undefined, it will send err
+// through errChan. Otherwise, it will send result through resultsChan.
+func makeAutoReleaseCallback() (callback js.Func, resultsChan chan js.Value, errChan chan error) {
+ resultsChan = make(chan js.Value, 1)
+ errChan = make(chan error, 1)
+ callback = js.FuncOf(func(this js.Value, args []js.Value) interface{} {
+ defer callback.Release()
+ go func() {
+ if len(args) == 0 {
+ resultsChan <- js.Undefined()
+ return
+ }
+ err := args[0]
+ if !reflect.DeepEqual(err, js.Undefined()) && !reflect.DeepEqual(err, js.Null()) {
+ errChan <- js.Error{Value: err}
+ return
+ }
+ if len(args) >= 2 {
+ resultsChan <- args[1]
+ } else {
+ resultsChan <- js.Undefined()
+ }
+ }()
+ return nil
+ })
+ return callback, resultsChan, errChan
+}
+
+// waitForCallbackResults blocks until receiving from either resultsChan or
+// errChan. If it receives from resultsChan first, it will return the result. If
+// it receives from errChan first, it will panic with the error.
+func waitForCallbackResults(resultsChan chan js.Value, errChan chan error) js.Value {
+ select {
+ case result := <-resultsChan:
+ return result
+ case err := <-errChan:
+ // Expected to be recovered up the call stack.
+ panic(err)
+ }
+}
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+ jsWriteFileSync(filename, data, perm)
+ return nil
+}
+
+func osOpenFile(name string, flag int, perm os.FileMode) (OsFile, error) {
+ if isfsSupported() {
+ return fsOpenFile(name, flag, perm)
+ }
+ return os.OpenFile(name, flag, perm)
+}
+
+func (f *fsFile) WriteAt(b []byte, off int64) (n int, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ uint8arr := js.Global().Get("Uint8Array").New(len(b))
+ js.CopyBytesToJS(uint8arr, b)
+ rawBytesWritten := jsWriteSync(f.fd, uint8arr, 0, len(b), int(off))
+ bytesWritten := rawBytesWritten.Int()
+ if bytesWritten != len(b) {
+ return bytesWritten, io.ErrShortWrite
+ }
+ if err := f.Sync(); err != nil {
+ return bytesWritten, err
+ }
+ return bytesWritten, nil
+}
+func (f *fsFile) Truncate(size int64) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ js.Global().Get("ZenFS").Call("ftruncateSync", f.fd, size)
+ return nil
+}
+
+func osRemove(name string) error {
+ if isfsSupported() {
+ return fsRemove(name)
+ }
+ return os.Remove(name)
+}
+
+func fsRemove(name string) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if jsErr, ok := e.(js.Error); ok {
+ err = convertJSError(jsErr)
+ }
+ }
+ }()
+ jsUnlinkSync(name)
+ return nil
+}
+
+func (f *fsFile) WriteString(s string) (n int, err error) {
+ return f.Write([]byte(s))
+}
diff --git a/pkg/fs/fs_shared.go b/pkg/fs/fs_shared.go
new file mode 100644
index 00000000000..08a492a36e7
--- /dev/null
+++ b/pkg/fs/fs_shared.go
@@ -0,0 +1,89 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reservefs.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package fs
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// osFile is an interface for interacting with a file. In Go, the underlying
+// type of osFile is always simply *os.File. In JavaScript/Wasm, the underlying
+// type is a wrapper type which mimics the functionality of os.File.
+type OsFile interface {
+ // Stat returns the FileInfo structure describing file. If there is an error,
+ // it will be of type *PathError.
+ Stat() (os.FileInfo, error)
+ // Read reads up to len(b) bytes from the File. It returns the number of bytes
+ // read and any error encountered. At end of file, Read returns 0, io.EOF.
+ Read(b []byte) (n int, err error)
+ // ReadAt reads len(b) bytes from the File starting at byte offset off. It
+ // returns the number of bytes read and the error, if any. ReadAt always
+ // returns a non-nil error when n < len(b). At end of file, that error is
+ // io.EOF.
+ ReadAt(b []byte, off int64) (n int, err error)
+ // Write writes len(b) bytes to the File. It returns the number of bytes
+ // written and an error, if any. Write returns a non-nil error when n !=
+ // len(b).
+ Write(b []byte) (n int, err error)
+ // Seek sets the offset for the next Read or Write on file to offset,
+ // interpreted according to whence: 0 means relative to the origin of the
+ // file, 1 means relative to the current offset, and 2 means relative to the
+ // end. It returns the new offset and an error, if any. The behavior of Seek
+ // on a file opened with O_APPEND is not specified.
+ Seek(offset int64, whence int) (ret int64, err error)
+ // Sync commits the current contents of the file to stable storage. Typically,
+ // this means flushing the file system's in-memory copy of recently written
+ // data to disk.
+ Sync() error
+ // Close closes the File, rendering it unusable for I/O. On files that support
+ // SetDeadline, any pending I/O operations will be canceled and return
+ // immediately with an error.
+ Close() error
+
+ WriteAt([]byte, int64) (int, error)
+ Truncate(size int64) error
+ WriteString(s string) (n int, err error)
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ f, err := osOpen(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return io.ReadAll(f)
+}
+
+func MkdirAll(path string, perm os.FileMode) error {
+ return osMkdirAll(path, perm)
+}
+
+func Open(name string) (OsFile, error) {
+ return osOpen(name)
+}
+
+func OpenFile(name string, flag int, perm os.FileMode) (OsFile, error) {
+ return osOpenFile(name, flag, perm)
+}
+
+func WriteFile(filename string, data []byte, perm os.FileMode) error {
+ if err := MkdirAll(filepath.Dir(filename), 0o755); err != nil {
+ return err
+ }
+ return writeFile(filename, data, perm)
+}
+
+func Stat(name string) (os.FileInfo, error) {
+ return fsStat(name)
+}
+
+func Remove(name string) error {
+ return osRemove(name)
+}
diff --git a/pkg/hive/hive.go b/pkg/hive/hive.go
index 27858cdcf87..dcefc899143 100644
--- a/pkg/hive/hive.go
+++ b/pkg/hive/hive.go
@@ -1,13 +1,6 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package hive exposes the hive protocol implementation
-// which is the discovery protocol used to inform and be
-// informed about other peers in the network. It gossips
-// about all peers by default and performs no specific
-// prioritization about which peers are gossipped to
-// others.
+//go:build !js
+// +build !js
+
package hive
import (
@@ -18,8 +11,6 @@ import (
"sync"
"time"
- "golang.org/x/sync/semaphore"
-
"github.com/ethersphere/bee/v2/pkg/addressbook"
"github.com/ethersphere/bee/v2/pkg/bzz"
"github.com/ethersphere/bee/v2/pkg/hive/pb"
@@ -30,26 +21,7 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
-)
-
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "hive"
-
-const (
- protocolName = "hive"
- protocolVersion = "1.1.0"
- peersStreamName = "peers"
- messageTimeout = 1 * time.Minute // maximum allowed time for a message to be read or written.
- maxBatchSize = 30
- pingTimeout = time.Second * 15 // time to wait for ping to succeed
- batchValidationTimeout = 5 * time.Minute // prevent lock contention on peer validation
-)
-
-var (
- limitBurst = 4 * int(swarm.MaxBins)
- limitRate = time.Minute
-
- ErrRateLimitExceeded = errors.New("rate limit exceeded")
+ "golang.org/x/sync/semaphore"
)
type Service struct {
@@ -92,23 +64,6 @@ func New(streamer p2p.StreamerPinger, addressbook addressbook.GetPutter, network
return svc
}
-func (s *Service) Protocol() p2p.ProtocolSpec {
- return p2p.ProtocolSpec{
- Name: protocolName,
- Version: protocolVersion,
- StreamSpecs: []p2p.StreamSpec{
- {
- Name: peersStreamName,
- Handler: s.peersHandler,
- },
- },
- DisconnectIn: s.disconnect,
- DisconnectOut: s.disconnect,
- }
-}
-
-var ErrShutdownInProgress = errors.New("shutdown in progress")
-
func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, peers ...swarm.Address) error {
maxSize := maxBatchSize
s.metrics.BroadcastPeers.Inc()
@@ -140,27 +95,6 @@ func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, p
return nil
}
-func (s *Service) SetAddPeersHandler(h func(addr ...swarm.Address)) {
- s.addPeersHandler = h
-}
-
-func (s *Service) Close() error {
- close(s.quit)
-
- stopped := make(chan struct{})
- go func() {
- defer close(stopped)
- s.wg.Wait()
- }()
-
- select {
- case <-stopped:
- return nil
- case <-time.After(time.Second * 5):
- return errors.New("hive: waited 5 seconds to close active goroutines")
- }
-}
-
func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) (err error) {
s.metrics.BroadcastPeersSends.Inc()
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName)
@@ -187,7 +121,7 @@ func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swa
}
if !s.allowPrivateCIDRs && manet.IsPrivateAddr(addr.Underlay) {
- continue // Don't advertise private CIDRs to the public network.
+ // continue // Don't advertise private CIDRs to the public network.
}
peersRequest.Peers = append(peersRequest.Peers, &pb.BzzAddress{
@@ -241,41 +175,6 @@ func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.St
return nil
}
-func (s *Service) disconnect(peer p2p.Peer) error {
- s.inLimiter.Clear(peer.Address.ByteString())
- s.outLimiter.Clear(peer.Address.ByteString())
- return nil
-}
-
-func (s *Service) startCheckPeersHandler() {
- ctx, cancel := context.WithCancel(context.Background())
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- <-s.quit
- cancel()
- }()
-
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- for {
- select {
- case <-ctx.Done():
- return
- case newPeers := <-s.peersChan:
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- cctx, cancel := context.WithTimeout(ctx, batchValidationTimeout)
- defer cancel()
- s.checkAndAddPeers(cctx, newPeers)
- }()
- }
- }
- }()
-}
-
func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) {
var peersToAdd []swarm.Address
mtx := sync.Mutex{}
diff --git a/pkg/hive/hive_js.go b/pkg/hive/hive_js.go
new file mode 100644
index 00000000000..a50d9062be9
--- /dev/null
+++ b/pkg/hive/hive_js.go
@@ -0,0 +1,278 @@
+//go:build js
+// +build js
+
+package hive
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "sync"
+
+ ma "github.com/multiformats/go-multiaddr"
+
+ "github.com/ethersphere/bee/v2/pkg/addressbook"
+ "github.com/ethersphere/bee/v2/pkg/bzz"
+ "github.com/ethersphere/bee/v2/pkg/hive/pb"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/ratelimit"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "golang.org/x/sync/semaphore"
+)
+
+type Service struct {
+ streamer p2p.StreamerPinger
+ addressBook addressbook.GetPutter
+ addPeersHandler func(...swarm.Address)
+ networkID uint64
+ logger log.Logger
+ inLimiter *ratelimit.Limiter
+ outLimiter *ratelimit.Limiter
+ quit chan struct{}
+ wg sync.WaitGroup
+ peersChan chan pb.Peers
+ sem *semaphore.Weighted
+ bootnode bool
+ allowPrivateCIDRs bool
+}
+
+func New(streamer p2p.StreamerPinger, addressbook addressbook.GetPutter, networkID uint64, bootnode bool, allowPrivateCIDRs bool, logger log.Logger) *Service {
+ svc := &Service{
+ streamer: streamer,
+ logger: logger.WithName(loggerName).Register(),
+ addressBook: addressbook,
+ networkID: networkID,
+ inLimiter: ratelimit.New(limitRate, limitBurst),
+ outLimiter: ratelimit.New(limitRate, limitBurst),
+ quit: make(chan struct{}),
+ peersChan: make(chan pb.Peers),
+ sem: semaphore.NewWeighted(int64(swarm.MaxBins)),
+ bootnode: bootnode,
+ allowPrivateCIDRs: allowPrivateCIDRs,
+ }
+
+ if !bootnode {
+ svc.startCheckPeersHandler()
+ }
+
+ return svc
+}
+
+func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, peers ...swarm.Address) error {
+ maxSize := maxBatchSize
+
+ for len(peers) > 0 {
+ if maxSize > len(peers) {
+ maxSize = len(peers)
+ }
+
+ // If broadcasting limit is exceeded, return early
+ if !s.outLimiter.Allow(addressee.ByteString(), maxSize) {
+ return nil
+ }
+
+ select {
+ case <-s.quit:
+ return ErrShutdownInProgress
+ default:
+ }
+
+ if err := s.sendPeers(ctx, addressee, peers[:maxSize]); err != nil {
+ return err
+ }
+
+ peers = peers[maxSize:]
+ }
+
+ return nil
+}
+
+func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) (err error) {
+
+ stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName)
+ if err != nil {
+ return fmt.Errorf("new stream: %w", err)
+ }
+ defer func() {
+ if err != nil {
+ _ = stream.Reset()
+ } else {
+ _ = stream.Close()
+ }
+ }()
+ w, _ := protobuf.NewWriterAndReader(stream)
+ var peersRequest pb.Peers
+ for _, p := range peers {
+ addr, err := s.addressBook.Get(p)
+ if err != nil {
+ if errors.Is(err, addressbook.ErrNotFound) {
+ s.logger.Debug("broadcast peers; peer not found in the addressbook, skipping...", "peer_address", p)
+ continue
+ }
+ return err
+ }
+
+ if !s.allowPrivateCIDRs && manet.IsPrivateAddr(addr.Underlay) {
+ // continue // Don't advertise private CIDRs to the public network.
+ }
+
+ peersRequest.Peers = append(peersRequest.Peers, &pb.BzzAddress{
+ Overlay: addr.Overlay.Bytes(),
+ Underlay: addr.Underlay.Bytes(),
+ Signature: addr.Signature,
+ Nonce: addr.Nonce,
+ })
+ }
+
+ if err := w.WriteMsgWithContext(ctx, &peersRequest); err != nil {
+ return fmt.Errorf("write Peers message: %w", err)
+ }
+
+ return nil
+}
+
+func (s *Service) peersHandler(ctx context.Context, peer p2p.Peer, stream p2p.Stream) error {
+
+ _, r := protobuf.NewWriterAndReader(stream)
+ ctx, cancel := context.WithTimeout(ctx, messageTimeout)
+ defer cancel()
+ var peersReq pb.Peers
+ if err := r.ReadMsgWithContext(ctx, &peersReq); err != nil {
+ _ = stream.Reset()
+ return fmt.Errorf("read requestPeers message: %w", err)
+ }
+
+ if !s.inLimiter.Allow(peer.Address.ByteString(), len(peersReq.Peers)) {
+ _ = stream.Reset()
+ return ErrRateLimitExceeded
+ }
+
+ // close the stream before processing in order to unblock the sending side
+ // fullclose is called async because there is no need to wait for confirmation,
+ // but we still want to handle not closed stream from the other side to avoid zombie stream
+ go stream.FullClose()
+
+ if s.bootnode {
+ return nil
+ }
+
+ select {
+ case s.peersChan <- peersReq:
+ case <-s.quit:
+ return errors.New("failed to process peers, shutting down hive")
+ }
+
+ return nil
+}
+
+func (s *Service) checkAndAddPeers(ctx context.Context, peers pb.Peers) {
+ var peersToAdd []swarm.Address
+ mtx := sync.Mutex{}
+ wg := sync.WaitGroup{}
+
+ addPeer := func(newPeer *pb.BzzAddress, multiUnderlay ma.Multiaddr) {
+ parts := ma.Split(multiUnderlay)
+
+ var baseParts []ma.Component
+ var p2pID string
+
+ for _, c := range parts {
+ switch c.Protocol().Name {
+ case "ws", "wss":
+ continue // skip if already present
+ case "p2p":
+ p2pID = c.Value()
+ default:
+ baseParts = append(baseParts, c)
+ }
+ }
+
+ baseAddr, _ := ma.NewMultiaddr("")
+ for _, c := range baseParts {
+ componentStr := "/" + c.Protocol().Name + "/" + c.Value()
+ component, err := ma.NewMultiaddr(componentStr)
+ if err != nil {
+ s.logger.Error(errors.New("invalid multiaddr component"), "component", componentStr, "error", err)
+ return
+ }
+ baseAddr = baseAddr.Encapsulate(component)
+ }
+
+ // Add /ws and /p2p
+ baseAddr = baseAddr.Encapsulate(ma.StringCast("/ws"))
+ if p2pID != "" {
+ baseAddr = baseAddr.Encapsulate(ma.StringCast("/p2p/" + p2pID))
+ }
+
+ s.logger.Debug("rewritten multiaddr to use websocket", "peer_address", hex.EncodeToString(newPeer.Overlay), "ws_underlay", baseAddr.String())
+
+ err := s.sem.Acquire(ctx, 1)
+ if err != nil {
+ return
+ }
+
+ wg.Add(1)
+ go func() {
+
+ defer func() {
+ s.sem.Release(1)
+ wg.Done()
+ }()
+
+ ctx, cancel := context.WithTimeout(ctx, pingTimeout)
+ defer cancel()
+
+ // check if the underlay is usable by doing a raw ping using libp2p
+ if _, err := s.streamer.Ping(ctx, baseAddr); err != nil {
+
+ s.logger.Debug("unreachable peer underlay", "peer_address", hex.EncodeToString(newPeer.Overlay), "underlay", multiUnderlay, "error", err)
+ return
+ }
+
+ bzzAddress := bzz.Address{
+ Overlay: swarm.NewAddress(newPeer.Overlay),
+ Underlay: baseAddr,
+ Signature: newPeer.Signature,
+ Nonce: newPeer.Nonce,
+ }
+
+ err := s.addressBook.Put(bzzAddress.Overlay, bzzAddress)
+ if err != nil {
+ s.logger.Warning("skipping peer in response", "peer_address", newPeer.String(), "error", err)
+ return
+ }
+
+ mtx.Lock()
+ peersToAdd = append(peersToAdd, bzzAddress.Overlay)
+ mtx.Unlock()
+ }()
+ }
+
+ for _, p := range peers.Peers {
+
+ multiUnderlay, err := ma.NewMultiaddrBytes(p.Underlay)
+ if err != nil {
+ s.logger.Debug("multi address underlay", "error", err)
+ continue
+ }
+
+ // if peer exists already in the addressBook
+ // and if the underlays match, skip
+ addr, err := s.addressBook.Get(swarm.NewAddress(p.Overlay))
+ if err == nil && addr.Underlay.Equal(multiUnderlay) {
+ continue
+ }
+
+ // add peer does not exist in the addressbook
+ addPeer(p, multiUnderlay)
+ }
+ wg.Wait()
+
+ if s.addPeersHandler != nil && len(peersToAdd) > 0 {
+ s.addPeersHandler(peersToAdd...)
+ }
+}
diff --git a/pkg/hive/hive_shared.go b/pkg/hive/hive_shared.go
new file mode 100644
index 00000000000..cd449a924a5
--- /dev/null
+++ b/pkg/hive/hive_shared.go
@@ -0,0 +1,113 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hive exposes the hive protocol implementation
+// which is the discovery protocol used to inform and be
+// informed about other peers in the network. It gossips
+// about all peers by default and performs no specific
+// prioritization about which peers are gossipped to
+// others.
+package hive
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "hive"
+
+const (
+ protocolName = "hive"
+ protocolVersion = "1.1.0"
+ peersStreamName = "peers"
+ messageTimeout = 1 * time.Minute // maximum allowed time for a message to be read or written.
+ maxBatchSize = 30
+ pingTimeout = time.Second * 15 // time to wait for ping to succeed
+ batchValidationTimeout = 5 * time.Minute // prevent lock contention on peer validation
+)
+
+var (
+ limitBurst = 4 * int(swarm.MaxBins)
+ limitRate = time.Minute
+
+ ErrRateLimitExceeded = errors.New("rate limit exceeded")
+)
+
+func (s *Service) Protocol() p2p.ProtocolSpec {
+ return p2p.ProtocolSpec{
+ Name: protocolName,
+ Version: protocolVersion,
+ StreamSpecs: []p2p.StreamSpec{
+ {
+ Name: peersStreamName,
+ Handler: s.peersHandler,
+ },
+ },
+ DisconnectIn: s.disconnect,
+ DisconnectOut: s.disconnect,
+ }
+}
+
+var ErrShutdownInProgress = errors.New("shutdown in progress")
+
+func (s *Service) SetAddPeersHandler(h func(addr ...swarm.Address)) {
+ s.addPeersHandler = h
+}
+
+func (s *Service) Close() error {
+ close(s.quit)
+
+ stopped := make(chan struct{})
+ go func() {
+ defer close(stopped)
+ s.wg.Wait()
+ }()
+
+ select {
+ case <-stopped:
+ return nil
+ case <-time.After(time.Second * 5):
+ return errors.New("hive: waited 5 seconds to close active goroutines")
+ }
+}
+
+func (s *Service) disconnect(peer p2p.Peer) error {
+ s.inLimiter.Clear(peer.Address.ByteString())
+ s.outLimiter.Clear(peer.Address.ByteString())
+ return nil
+}
+
+func (s *Service) startCheckPeersHandler() {
+ ctx, cancel := context.WithCancel(context.Background())
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ <-s.quit
+ cancel()
+ }()
+
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case newPeers := <-s.peersChan:
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ cctx, cancel := context.WithTimeout(ctx, batchValidationTimeout)
+ defer cancel()
+ s.checkAndAddPeers(cctx, newPeers)
+ }()
+ }
+ }
+ }()
+}
diff --git a/pkg/hive/metrics.go b/pkg/hive/metrics.go
index 62e9f5e482b..72edc349fa0 100644
--- a/pkg/hive/metrics.go
+++ b/pkg/hive/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/keystore/file/service.go b/pkg/keystore/file/service.go
index 01a5db72aeb..c2698421019 100644
--- a/pkg/keystore/file/service.go
+++ b/pkg/keystore/file/service.go
@@ -10,6 +10,7 @@ import (
"os"
"path/filepath"
+ fs "github.com/ethersphere/bee/v2/pkg/fs"
"github.com/ethersphere/bee/v2/pkg/keystore"
)
@@ -29,7 +30,7 @@ func New(dir string) *Service {
func (s *Service) Exists(name string) (bool, error) {
filename := s.keyFilename(name)
- data, err := os.ReadFile(filename)
+ data, err := fs.ReadFile(filename)
if err != nil && !os.IsNotExist(err) {
return false, fmt.Errorf("read private key: %w", err)
}
@@ -53,11 +54,11 @@ func (s *Service) SetKey(name, password string, edg keystore.EDG) (*ecdsa.Privat
filename := s.keyFilename(name)
- if err := os.MkdirAll(filepath.Dir(filename), 0700); err != nil {
+ if err := fs.MkdirAll(filepath.Dir(filename), 0700); err != nil {
return nil, err
}
- if err := os.WriteFile(filename, d, 0600); err != nil {
+ if err := fs.WriteFile(filename, d, 0600); err != nil {
return nil, err
}
@@ -67,7 +68,7 @@ func (s *Service) SetKey(name, password string, edg keystore.EDG) (*ecdsa.Privat
func (s *Service) Key(name, password string, edg keystore.EDG) (pk *ecdsa.PrivateKey, created bool, err error) {
filename := s.keyFilename(name)
- data, err := os.ReadFile(filename)
+ data, err := fs.ReadFile(filename)
if err != nil && !os.IsNotExist(err) {
return nil, false, fmt.Errorf("read private key: %w", err)
}
diff --git a/pkg/log/log.go b/pkg/log/log.go
index 5a517d02a33..6664b2f7d0a 100644
--- a/pkg/log/log.go
+++ b/pkg/log/log.go
@@ -190,15 +190,6 @@ func (ls *lockWriter) Write(bs []byte) (int, error) {
return n, err
}
-// Options specifies parameters that affect logger behavior.
-type Options struct {
- sink io.Writer
- verbosity Level
- levelHooks levelHooks
- fmtOptions fmtOptions
- logMetrics *metrics
-}
-
// Option represent Options parameters modifier.
type Option func(*Options)
@@ -288,14 +279,3 @@ func WithLevelHooks(l Level, hooks ...Hook) Option {
}
}
}
-
-// WithLogMetrics tells the logger to collect metrics about log messages.
-func WithLogMetrics() Option {
- return func(opts *Options) {
- if opts.logMetrics != nil {
- return
- }
- opts.logMetrics = newLogMetrics()
- WithLevelHooks(VerbosityAll, opts.logMetrics)(opts)
- }
-}
diff --git a/pkg/log/logger.go b/pkg/log/logger.go
index f40ffd59916..b7bf31d4f03 100644
--- a/pkg/log/logger.go
+++ b/pkg/log/logger.go
@@ -1,142 +1,15 @@
-// Copyright 2022 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package log
import (
- "bytes"
- "fmt"
"io"
- "os"
- "reflect"
- "strings"
- "time"
m "github.com/ethersphere/bee/v2/pkg/metrics"
- "github.com/hashicorp/go-multierror"
"github.com/prometheus/client_golang/prometheus"
)
-var _ Logger = (*logger)(nil)
-
-// levelHooks is a helper type for storing and
-// help triggering the hooks on a logger instance.
-type levelHooks map[Level][]Hook
-
-// fire triggers all the hooks for the given level.
-// If level V is enabled in debug verbosity, then
-// the VerbosityAll hooks are triggered.
-func (lh levelHooks) fire(level Level) error {
- if level > VerbosityDebug {
- level = VerbosityAll
- }
- for _, hook := range lh[level] {
- if err := hook.Fire(level); err != nil {
- return err
- }
- }
- return nil
-}
-
-type builder struct {
- l *logger
-
- // clone indicates whether this builder was cloned.
- cloned bool
-
- // v level represents the granularity of debug calls.
- v uint
-
- // names represents a path in the tree,
- // element 0 is the root of the tree.
- names []string
-
- // namesStr is a cache of render names slice, so
- // we don't have to render them on each Build call.
- namesStr string
-
- // values holds additional key/value pairs
- // that are included on every log call.
- values []interface{}
-
- // valuesStr is a cache of render values slice, so
- // we don't have to render them on each Build call.
- valuesStr string
-}
-
-// V implements the Builder interface V method.
-func (b *builder) V(level uint) Builder {
- if level > 0 {
- c := b.clone()
- c.v += level
- return c
- }
- return b
-}
-
-// WithName implements the Builder interface WithName method.
-func (b *builder) WithName(name string) Builder {
- c := b.clone()
- c.names = append(c.names, name)
- return c
-}
-
-// WithValues implements the Builder interface WithValues method.
-func (b *builder) WithValues(keysAndValues ...interface{}) Builder {
- c := b.clone()
- c.values = append(c.values, keysAndValues...)
- return c
-}
-
-// Build implements the Builder interface Build method.
-func (b *builder) Build() Logger {
- if !b.cloned && b.l.id != "" {
- return b.l
- }
-
- b.namesStr = strings.Join(b.names, "/")
- // ~5 is the average length of an English word; 4 is the rune size.
- bufCap := nextPowOf2(uint64(5 * 4 * len(b.values)))
- buf := bytes.NewBuffer(make([]byte, 0, bufCap))
- b.l.formatter.flatten(buf, b.values, false, false)
- b.valuesStr = buf.String()
-
- key := hash(b.namesStr, b.v, b.valuesStr, b.l.sink)
- if i, ok := loggers.Load(key); ok {
- // Nothing to build, the instance exists.
- return i.(*logger)
- }
- // A new child instance.
- c := *b.l
- b.l = &c
- c.builder = b
- c.cloned = false
- c.id = key
-
- return &c
-}
-
-// Register implements the Builder interface Register method.
-func (b *builder) Register() Logger {
- val := b.Build()
- key := hash(b.namesStr, b.v, b.valuesStr, b.l.sink)
- res, _ := loggers.LoadOrStore(key, val)
- return res.(*logger)
-}
-
-func (b *builder) clone() *builder {
- if b.cloned {
- return b
- }
-
- c := *b
- c.cloned = true
- c.names = append(make([]string, 0, len(c.names)), c.names...)
- c.values = append(make([]interface{}, 0, len(c.values)), c.values...)
- return &c
-}
-
// logger implements the Logger interface.
type logger struct {
*builder
@@ -169,119 +42,3 @@ type logger struct {
func (l *logger) Metrics() []prometheus.Collector {
return m.PrometheusCollectorsFromFields(l.metrics)
}
-
-// Verbosity implements the Logger interface Verbosity method.
-func (l *logger) Verbosity() Level {
- return l.verbosity.get()
-}
-
-// Debug implements the Logger interface Debug method.
-func (l *logger) Debug(msg string, keysAndValues ...interface{}) {
- if int(l.verbosity.get()) >= int(l.v) {
- if err := l.log(VerbosityDebug, CategoryDebug, nil, msg, keysAndValues...); err != nil {
- fmt.Fprintln(os.Stderr, err)
- }
- }
-}
-
-// Info implements the Logger interface Info method.
-func (l *logger) Info(msg string, keysAndValues ...interface{}) {
- if l.verbosity.get() >= VerbosityInfo {
- if err := l.log(VerbosityInfo, CategoryInfo, nil, msg, keysAndValues...); err != nil {
- fmt.Fprintln(os.Stderr, err)
- }
- }
-}
-
-// Warning implements the Logger interface Warning method.
-func (l *logger) Warning(msg string, keysAndValues ...interface{}) {
- if l.verbosity.get() >= VerbosityWarning {
- if err := l.log(VerbosityWarning, CategoryWarning, nil, msg, keysAndValues...); err != nil {
- fmt.Fprintln(os.Stderr, err)
- }
- }
-}
-
-// Error implements the Logger interface Error method.
-func (l *logger) Error(err error, msg string, keysAndValues ...interface{}) {
- if l.verbosity.get() >= VerbosityError {
- if err := l.log(VerbosityError, CategoryError, err, msg, keysAndValues...); err != nil {
- fmt.Fprintln(os.Stderr, err)
- }
- }
-}
-
-// setVerbosity changes the verbosity level or the logger.
-func (l *logger) setVerbosity(v Level) {
- l.verbosity.set(v)
-}
-
-// log logs the given msg and key-value pairs with the given level
-// and the given message category caller (if enabled) to the sink.
-func (l *logger) log(vl Level, mc MessageCategory, err error, msg string, keysAndValues ...interface{}) error {
- base := make([]interface{}, 0, 14+len(keysAndValues))
- if l.formatter.opts.logTimestamp {
- base = append(base, "time", time.Now().Format(l.formatter.opts.timestampLayout))
- }
- base = append(base, "level", vl.String(), "logger", l.namesStr)
- if vl == VerbosityDebug && l.v > 0 {
- base = append(base, "v", l.v)
- }
- if policy := l.formatter.opts.caller; policy == CategoryAll || policy == mc {
- base = append(base, "caller", l.formatter.caller())
- }
- base = append(base, "msg", msg)
- if vl == VerbosityError {
- if err != nil {
- base = append(base, "error", err.Error())
- }
- }
- if len(l.values) > 0 {
- base = append(base, l.values...)
- }
- buf := l.formatter.render(base, keysAndValues)
-
- var merr *multierror.Error
- if _, err = l.sink.Write(buf); err != nil {
- merr = multierror.Append(
- merr,
- fmt.Errorf("log %s: failed to write message: %w", vl, err),
- )
- }
- if err := l.levelHooks.fire(vl + Level(l.v)); err != nil {
- merr = multierror.Append(
- merr,
- fmt.Errorf("log %s: failed to fire hooks: %w", vl, err),
- )
- }
- return merr.ErrorOrNil()
-}
-
-// hash is a hashing function for creating unique identifiers.
-func hash(prefix string, v uint, values string, w io.Writer) string {
- var sink uintptr
- if reflect.ValueOf(w).Kind() == reflect.Ptr {
- sink = reflect.ValueOf(w).Pointer()
- } else {
- sink = reflect.ValueOf(&w).Pointer()
- }
- return fmt.Sprintf("%s[%d][%s]>>%d", prefix, v, values, sink)
-}
-
-// nextPowOf2 rounds up n to the next highest power of 2.
-// See: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
-func nextPowOf2(n uint64) uint64 {
- n--
- n |= n >> 1
- n |= n >> 2
- n |= n >> 4
- n |= n >> 8
- n |= n >> 16
- n |= n >> 32
- n++
- return n
-}
-
-// TODO:
-// - Implement the HTTP log middleware
-// - Write benchmarks and do optimizations; consider `func (l *VLogger) getBuffer() *buffer` from glog
diff --git a/pkg/log/logger_js.go b/pkg/log/logger_js.go
new file mode 100644
index 00000000000..7fd8174eeb6
--- /dev/null
+++ b/pkg/log/logger_js.go
@@ -0,0 +1,31 @@
+//go:build js
+// +build js
+
+package log
+
+import "io"
+
+// logger implements the Logger interface.
+type logger struct {
+ *builder
+
+ // id is the unique identifier of a logger.
+ // It identifies the instance of a logger in the logger registry.
+ id string
+
+ // formatter formats log messages before they are written to the sink.
+ formatter *formatter
+
+ // verbosity represents the current verbosity level.
+ // This variable is used to modify the verbosity of the logger instance.
+ // Higher values enable more logs. Logs at or below this level
+ // will be written, while logs above this level will be discarded.
+ verbosity Level
+
+ // sink represents the stream where the logs are written.
+ sink io.Writer
+
+ // levelHooks allow triggering of registered hooks
+ // on their associated severity log levels.
+ levelHooks levelHooks
+}
diff --git a/pkg/log/logger_shared.go b/pkg/log/logger_shared.go
new file mode 100644
index 00000000000..d7a0384bc49
--- /dev/null
+++ b/pkg/log/logger_shared.go
@@ -0,0 +1,252 @@
+// Copyright 2022 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+var _ Logger = (*logger)(nil)
+
+// levelHooks is a helper type for storing and
+// help triggering the hooks on a logger instance.
+type levelHooks map[Level][]Hook
+
+// fire triggers all the hooks for the given level.
+// If level V is enabled in debug verbosity, then
+// the VerbosityAll hooks are triggered.
+func (lh levelHooks) fire(level Level) error {
+ if level > VerbosityDebug {
+ level = VerbosityAll
+ }
+ for _, hook := range lh[level] {
+ if err := hook.Fire(level); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type builder struct {
+ l *logger
+
+ // clone indicates whether this builder was cloned.
+ cloned bool
+
+ // v level represents the granularity of debug calls.
+ v uint
+
+ // names represents a path in the tree,
+ // element 0 is the root of the tree.
+ names []string
+
+ // namesStr is a cache of render names slice, so
+ // we don't have to render them on each Build call.
+ namesStr string
+
+ // values holds additional key/value pairs
+ // that are included on every log call.
+ values []interface{}
+
+ // valuesStr is a cache of render values slice, so
+ // we don't have to render them on each Build call.
+ valuesStr string
+}
+
+// V implements the Builder interface V method.
+func (b *builder) V(level uint) Builder {
+ if level > 0 {
+ c := b.clone()
+ c.v += level
+ return c
+ }
+ return b
+}
+
+// WithName implements the Builder interface WithName method.
+func (b *builder) WithName(name string) Builder {
+ c := b.clone()
+ c.names = append(c.names, name)
+ return c
+}
+
+// WithValues implements the Builder interface WithValues method.
+func (b *builder) WithValues(keysAndValues ...interface{}) Builder {
+ c := b.clone()
+ c.values = append(c.values, keysAndValues...)
+ return c
+}
+
+// Build implements the Builder interface Build method.
+func (b *builder) Build() Logger {
+ if !b.cloned && b.l.id != "" {
+ return b.l
+ }
+
+ b.namesStr = strings.Join(b.names, "/")
+ // ~5 is the average length of an English word; 4 is the rune size.
+ bufCap := nextPowOf2(uint64(5 * 4 * len(b.values)))
+ buf := bytes.NewBuffer(make([]byte, 0, bufCap))
+ b.l.formatter.flatten(buf, b.values, false, false)
+ b.valuesStr = buf.String()
+
+ key := hash(b.namesStr, b.v, b.valuesStr, b.l.sink)
+ if i, ok := loggers.Load(key); ok {
+ // Nothing to build, the instance exists.
+ return i.(*logger)
+ }
+ // A new child instance.
+ c := *b.l
+ b.l = &c
+ c.builder = b
+ c.cloned = false
+ c.id = key
+
+ return &c
+}
+
+// Register implements the Builder interface Register method.
+func (b *builder) Register() Logger {
+ val := b.Build()
+ key := hash(b.namesStr, b.v, b.valuesStr, b.l.sink)
+ res, _ := loggers.LoadOrStore(key, val)
+ return res.(*logger)
+}
+
+func (b *builder) clone() *builder {
+ if b.cloned {
+ return b
+ }
+
+ c := *b
+ c.cloned = true
+ c.names = append(make([]string, 0, len(c.names)), c.names...)
+ c.values = append(make([]interface{}, 0, len(c.values)), c.values...)
+ return &c
+}
+
+// Verbosity implements the Logger interface Verbosity method.
+func (l *logger) Verbosity() Level {
+ return l.verbosity.get()
+}
+
+// Debug implements the Logger interface Debug method.
+func (l *logger) Debug(msg string, keysAndValues ...interface{}) {
+ if int(l.verbosity.get()) >= int(l.v) {
+ if err := l.log(VerbosityDebug, CategoryDebug, nil, msg, keysAndValues...); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ }
+}
+
+// Info implements the Logger interface Info method.
+func (l *logger) Info(msg string, keysAndValues ...interface{}) {
+ if l.verbosity.get() >= VerbosityInfo {
+ if err := l.log(VerbosityInfo, CategoryInfo, nil, msg, keysAndValues...); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ }
+}
+
+// Warning implements the Logger interface Warning method.
+func (l *logger) Warning(msg string, keysAndValues ...interface{}) {
+ if l.verbosity.get() >= VerbosityWarning {
+ if err := l.log(VerbosityWarning, CategoryWarning, nil, msg, keysAndValues...); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ }
+}
+
+// Error implements the Logger interface Error method.
+func (l *logger) Error(err error, msg string, keysAndValues ...interface{}) {
+ if l.verbosity.get() >= VerbosityError {
+ if err := l.log(VerbosityError, CategoryError, err, msg, keysAndValues...); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ }
+}
+
+// setVerbosity changes the verbosity level or the logger.
+func (l *logger) setVerbosity(v Level) {
+ l.verbosity.set(v)
+}
+
+// log logs the given msg and key-value pairs with the given level
+// and the given message category caller (if enabled) to the sink.
+func (l *logger) log(vl Level, mc MessageCategory, err error, msg string, keysAndValues ...interface{}) error {
+ base := make([]interface{}, 0, 14+len(keysAndValues))
+ if l.formatter.opts.logTimestamp {
+ base = append(base, "time", time.Now().Format(l.formatter.opts.timestampLayout))
+ }
+ base = append(base, "level", vl.String(), "logger", l.namesStr)
+ if vl == VerbosityDebug && l.v > 0 {
+ base = append(base, "v", l.v)
+ }
+ if policy := l.formatter.opts.caller; policy == CategoryAll || policy == mc {
+ base = append(base, "caller", l.formatter.caller())
+ }
+ base = append(base, "msg", msg)
+ if vl == VerbosityError {
+ if err != nil {
+ base = append(base, "error", err.Error())
+ }
+ }
+ if len(l.values) > 0 {
+ base = append(base, l.values...)
+ }
+ buf := l.formatter.render(base, keysAndValues)
+
+ var merr *multierror.Error
+ if _, err = l.sink.Write(buf); err != nil {
+ merr = multierror.Append(
+ merr,
+ fmt.Errorf("log %s: failed to write message: %w", vl, err),
+ )
+ }
+ if err := l.levelHooks.fire(vl + Level(l.v)); err != nil {
+ merr = multierror.Append(
+ merr,
+ fmt.Errorf("log %s: failed to fire hooks: %w", vl, err),
+ )
+ }
+ return merr.ErrorOrNil()
+}
+
+// hash is a hashing function for creating unique identifiers.
+func hash(prefix string, v uint, values string, w io.Writer) string {
+ var sink uintptr
+ if reflect.ValueOf(w).Kind() == reflect.Ptr {
+ sink = reflect.ValueOf(w).Pointer()
+ } else {
+ sink = reflect.ValueOf(&w).Pointer()
+ }
+ return fmt.Sprintf("%s[%d][%s]>>%d", prefix, v, values, sink)
+}
+
+// nextPowOf2 rounds up n to the next highest power of 2.
+// See: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+func nextPowOf2(n uint64) uint64 {
+ n--
+ n |= n >> 1
+ n |= n >> 2
+ n |= n >> 4
+ n |= n >> 8
+ n |= n >> 16
+ n |= n >> 32
+ n++
+ return n
+}
+
+// TODO:
+// - Implement the HTTP log middleware
+// - Write benchmarks and do optimizations; consider `func (l *VLogger) getBuffer() *buffer` from glog
diff --git a/pkg/log/metrics.go b/pkg/log/metrics.go
index 2691868921a..0046950a7de 100644
--- a/pkg/log/metrics.go
+++ b/pkg/log/metrics.go
@@ -1,14 +1,39 @@
-// Copyright 2022 The Swarm Authors. All rights reserved.
+//go:build !js
+// +build !js
+
+// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package log
import (
+ "io"
+
m "github.com/ethersphere/bee/v2/pkg/metrics"
"github.com/prometheus/client_golang/prometheus"
)
+// Options specifies parameters that affect logger behavior.
+type Options struct {
+ sink io.Writer
+ verbosity Level
+ levelHooks levelHooks
+ fmtOptions fmtOptions
+ logMetrics *metrics
+}
+
+// WithLogMetrics tells the logger to collect metrics about log messages.
+func WithLogMetrics() Option {
+ return func(opts *Options) {
+ if opts.logMetrics != nil {
+ return
+ }
+ opts.logMetrics = newLogMetrics()
+ WithLevelHooks(VerbosityAll, opts.logMetrics)(opts)
+ }
+}
+
// metrics groups various metrics counters for statistical reasons.
type metrics struct {
ErrorCount prometheus.Counter
diff --git a/pkg/log/metrics_js.go b/pkg/log/metrics_js.go
new file mode 100644
index 00000000000..5364d089081
--- /dev/null
+++ b/pkg/log/metrics_js.go
@@ -0,0 +1,18 @@
+//go:build js
+// +build js
+
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import "io"
+
+// Options specifies parameters that affect logger behavior.
+type Options struct {
+ sink io.Writer
+ verbosity Level
+ levelHooks levelHooks
+ fmtOptions fmtOptions
+}
diff --git a/pkg/log/registry.go b/pkg/log/registry.go
index a467cfc39cf..11fb264007c 100644
--- a/pkg/log/registry.go
+++ b/pkg/log/registry.go
@@ -1,52 +1,9 @@
-// Copyright 2022 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package log
-import (
- "fmt"
- "io"
- "os"
- "regexp"
- "sync"
-
- "github.com/hashicorp/go-multierror"
-)
-
-// defaults specifies the default global options for log
-// package which every new logger will inherit on its creation.
-var defaults = struct {
- pin sync.Once // pin pins the options and formatter settings.
- options *Options
- formatter *formatter
-}{
- options: &Options{
- sink: os.Stderr,
- verbosity: VerbosityDebug,
- fmtOptions: fmtOptions{
- timestampLayout: "2006-01-02 15:04:05.000000",
- maxLogDepth: 16,
- },
- },
-}
-
-// ModifyDefaults modifies the global default options for this log package
-// that each new logger inherits when it is created. The default values can
-// be modified only once, so further calls to this function will be ignored.
-// This function should be called before the first call to the NewLogger
-// factory constructor, otherwise it will have no effect.
-func ModifyDefaults(opts ...Option) {
- defaults.pin.Do(func() {
- for _, modify := range opts {
- modify(defaults.options)
- }
- defaults.formatter = newFormatter(defaults.options.fmtOptions)
- })
-}
-
-// loggers is the central register for Logger instances.
-var loggers = new(sync.Map)
+import "io"
// NewLogger is a factory constructor which returns a new logger instance
// based on the given name. If such an instance already exists in the
@@ -91,52 +48,3 @@ func NewLogger(name string, opts ...Option) Logger {
}
return l
}
-
-// SetVerbosity sets the level
-// of verbosity of the given logger.
-func SetVerbosity(l Logger, v Level) error {
- bl := l.(*logger)
- switch newLvl, maxValue := v.get(), Level(bl.v); {
- case newLvl == VerbosityAll:
- bl.setVerbosity(maxValue)
- case newLvl > maxValue:
- return fmt.Errorf("maximum verbosity %d exceeded for logger: %s", bl.v, bl.id)
- default:
- bl.setVerbosity(newLvl)
- }
- return nil
-}
-
-// SetVerbosityByExp sets all loggers to the given
-// verbosity level v that match the given expression
-// e, which can be a logger id or a regular expression.
-// An error is returned if e fails to compile.
-func SetVerbosityByExp(e string, v Level) error {
- val, ok := loggers.Load(e)
- if ok {
- val.(*logger).setVerbosity(v)
- return nil
- }
-
- rex, err := regexp.Compile(e)
- if err != nil {
- return err
- }
-
- var merr *multierror.Error
- loggers.Range(func(key, val interface{}) bool {
- if rex.MatchString(key.(string)) {
- merr = multierror.Append(merr, SetVerbosity(val.(*logger), v))
- }
- return true
- })
- return merr.ErrorOrNil()
-}
-
-// RegistryIterate iterates through all registered loggers.
-func RegistryIterate(fn func(id, path string, verbosity Level, v uint) (next bool)) {
- loggers.Range(func(_, val interface{}) bool {
- l := val.(*logger)
- return fn(l.id, l.namesStr, l.verbosity.get(), l.v)
- })
-}
diff --git a/pkg/log/registry_js.go b/pkg/log/registry_js.go
new file mode 100644
index 00000000000..bcb114eb125
--- /dev/null
+++ b/pkg/log/registry_js.go
@@ -0,0 +1,49 @@
+//go:build js
+// +build js
+
+package log
+
+import "io"
+
+// NewLogger is a factory constructor which returns a new logger instance
+// based on the given name. If such an instance already exists in the
+// logger registry, then this existing instance is returned instead.
+// The given options take precedence over the default options set
+// by the ModifyDefaults function.
+func NewLogger(name string, opts ...Option) Logger {
+ // Pin the default settings if
+ // they are not already pinned.
+ ModifyDefaults()
+
+ options := *defaults.options
+ for _, modify := range opts {
+ modify(&options)
+ }
+
+ if options.sink == io.Discard {
+ return Noop
+ }
+
+ formatter := defaults.formatter
+ if options.fmtOptions != defaults.options.fmtOptions {
+ formatter = newFormatter(options.fmtOptions)
+ }
+
+ val, ok := loggers.Load(hash(name, 0, "", options.sink))
+ if ok {
+ return val.(*logger)
+ }
+
+ l := &logger{
+ formatter: formatter,
+ verbosity: options.verbosity,
+ sink: options.sink,
+ levelHooks: options.levelHooks,
+ }
+ l.builder = &builder{
+ l: l,
+ names: []string{name},
+ namesStr: name,
+ }
+ return l
+}
diff --git a/pkg/log/registry_shared.go b/pkg/log/registry_shared.go
new file mode 100644
index 00000000000..8d44e77095d
--- /dev/null
+++ b/pkg/log/registry_shared.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+// defaults specifies the default global options for log
+// package which every new logger will inherit on its creation.
+var defaults = struct {
+ pin sync.Once // pin pins the options and formatter settings.
+ options *Options
+ formatter *formatter
+}{
+ options: &Options{
+ sink: os.Stderr,
+ verbosity: VerbosityDebug,
+ fmtOptions: fmtOptions{
+ timestampLayout: "2006-01-02 15:04:05.000000",
+ maxLogDepth: 16,
+ },
+ },
+}
+
+// ModifyDefaults modifies the global default options for this log package
+// that each new logger inherits when it is created. The default values can
+// be modified only once, so further calls to this function will be ignored.
+// This function should be called before the first call to the NewLogger
+// factory constructor, otherwise it will have no effect.
+func ModifyDefaults(opts ...Option) {
+ defaults.pin.Do(func() {
+ for _, modify := range opts {
+ modify(defaults.options)
+ }
+ defaults.formatter = newFormatter(defaults.options.fmtOptions)
+ })
+}
+
+// loggers is the central register for Logger instances.
+var loggers = new(sync.Map)
+
+// SetVerbosity sets the level
+// of verbosity of the given logger.
+func SetVerbosity(l Logger, v Level) error {
+ bl := l.(*logger)
+ switch newLvl, maxValue := v.get(), Level(bl.v); {
+ case newLvl == VerbosityAll:
+ bl.setVerbosity(maxValue)
+ case newLvl > maxValue:
+ return fmt.Errorf("maximum verbosity %d exceeded for logger: %s", bl.v, bl.id)
+ default:
+ bl.setVerbosity(newLvl)
+ }
+ return nil
+}
+
+// SetVerbosityByExp sets all loggers to the given
+// verbosity level v that match the given expression
+// e, which can be a logger id or a regular expression.
+// An error is returned if e fails to compile.
+func SetVerbosityByExp(e string, v Level) error {
+ val, ok := loggers.Load(e)
+ if ok {
+ val.(*logger).setVerbosity(v)
+ return nil
+ }
+
+ rex, err := regexp.Compile(e)
+ if err != nil {
+ return err
+ }
+
+ var merr *multierror.Error
+ loggers.Range(func(key, val interface{}) bool {
+ if rex.MatchString(key.(string)) {
+ merr = multierror.Append(merr, SetVerbosity(val.(*logger), v))
+ }
+ return true
+ })
+ return merr.ErrorOrNil()
+}
+
+// RegistryIterate iterates through all registered loggers.
+func RegistryIterate(fn func(id, path string, verbosity Level, v uint) (next bool)) {
+ loggers.Range(func(_, val interface{}) bool {
+ l := val.(*logger)
+ return fn(l.id, l.namesStr, l.verbosity.get(), l.v)
+ })
+}
diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go
index cbe2ae20c58..c17b9549cf7 100644
--- a/pkg/metrics/metrics.go
+++ b/pkg/metrics/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/node/chain.go b/pkg/node/chain.go
index 909638dd26c..e0bffed60c9 100644
--- a/pkg/node/chain.go
+++ b/pkg/node/chain.go
@@ -351,7 +351,7 @@ func (m noOpChainBackend) Metrics() []prometheus.Collector {
}
func (m noOpChainBackend) CodeAt(context.Context, common.Address, *big.Int) ([]byte, error) {
- return common.FromHex(sw3abi.SimpleSwapFactoryDeployedBinv0_6_9), nil
+ return common.FromHex(sw3abi.SimpleSwapFactoryDeployedBinv0_6_5), nil
}
func (m noOpChainBackend) CallContract(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error) {
diff --git a/pkg/node/devnode.go b/pkg/node/devnode.go
index 9ff5363c455..6d7f0ad1d9f 100644
--- a/pkg/node/devnode.go
+++ b/pkg/node/devnode.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/node/node.go b/pkg/node/node.go
index 9893bfb361e..c36dfd3fdca 100644
--- a/pkg/node/node.go
+++ b/pkg/node/node.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -13,14 +16,12 @@ import (
"encoding/hex"
"errors"
"fmt"
- "io"
stdlog "log"
"math/big"
"net"
"net/http"
"path/filepath"
"runtime"
- "sync"
"sync/atomic"
"time"
@@ -67,7 +68,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/storageincentives/staking"
"github.com/ethersphere/bee/v2/pkg/storer"
"github.com/ethersphere/bee/v2/pkg/swarm"
- "github.com/ethersphere/bee/v2/pkg/topology"
"github.com/ethersphere/bee/v2/pkg/topology/kademlia"
"github.com/ethersphere/bee/v2/pkg/topology/lightnode"
"github.com/ethersphere/bee/v2/pkg/tracing"
@@ -76,122 +76,9 @@ import (
"github.com/ethersphere/bee/v2/pkg/util/ioutil"
"github.com/ethersphere/bee/v2/pkg/util/nbhdutil"
"github.com/ethersphere/bee/v2/pkg/util/syncutil"
- "github.com/hashicorp/go-multierror"
ma "github.com/multiformats/go-multiaddr"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/sha3"
- "golang.org/x/sync/errgroup"
-)
-
-// LoggerName is the tree path name of the logger for this package.
-const LoggerName = "node"
-
-type Bee struct {
- p2pService io.Closer
- p2pHalter p2p.Halter
- ctxCancel context.CancelFunc
- apiCloser io.Closer
- apiServer *http.Server
- resolverCloser io.Closer
- errorLogWriter io.Writer
- tracerCloser io.Closer
- stateStoreCloser io.Closer
- stamperStoreCloser io.Closer
- localstoreCloser io.Closer
- topologyCloser io.Closer
- topologyHalter topology.Halter
- pusherCloser io.Closer
- pullerCloser io.Closer
- accountingCloser io.Closer
- pullSyncCloser io.Closer
- pssCloser io.Closer
- gsocCloser io.Closer
- ethClientCloser func()
- transactionMonitorCloser io.Closer
- transactionCloser io.Closer
- listenerCloser io.Closer
- postageServiceCloser io.Closer
- priceOracleCloser io.Closer
- hiveCloser io.Closer
- saludCloser io.Closer
- storageIncetivesCloser io.Closer
- pushSyncCloser io.Closer
- retrievalCloser io.Closer
- shutdownInProgress bool
- shutdownMutex sync.Mutex
- syncingStopped *syncutil.Signaler
- accesscontrolCloser io.Closer
-}
-
-type Options struct {
- Addr string
- AllowPrivateCIDRs bool
- APIAddr string
- BlockchainRpcEndpoint string
- BlockProfile bool
- BlockTime time.Duration
- BootnodeMode bool
- Bootnodes []string
- CacheCapacity uint64
- ChainID int64
- ChequebookEnable bool
- CORSAllowedOrigins []string
- DataDir string
- DBBlockCacheCapacity uint64
- DBDisableSeeksCompaction bool
- DBOpenFilesLimit uint64
- DBWriteBufferSize uint64
- EnableStorageIncentives bool
- EnableWS bool
- FullNodeMode bool
- Logger log.Logger
- MinimumStorageRadius uint
- MutexProfile bool
- NATAddr string
- NeighborhoodSuggester string
- PaymentEarly int64
- PaymentThreshold string
- PaymentTolerance int64
- PostageContractAddress string
- PostageContractStartBlock uint64
- PriceOracleAddress string
- RedistributionContractAddress string
- ReserveCapacityDoubling int
- ResolverConnectionCfgs []multiresolver.ConnectionConfig
- Resync bool
- RetrievalCaching bool
- SkipPostageSnapshot bool
- StakingContractAddress string
- StatestoreCacheCapacity uint64
- StaticNodes []swarm.Address
- SwapEnable bool
- SwapFactoryAddress string
- SwapInitialDeposit string
- TargetNeighborhood string
- TracingEnabled bool
- TracingEndpoint string
- TracingServiceName string
- TrxDebugMode bool
- UsePostageSnapshot bool
- WarmupTime time.Duration
- WelcomeMessage string
- WhitelistedWithdrawalAddress []string
-}
-
-const (
- refreshRate = int64(4_500_000) // accounting units refreshed per second
- lightFactor = 10 // downscale payment thresholds and their change rate, and refresh rates by this for light nodes
- lightRefreshRate = refreshRate / lightFactor // refresh rate used by / for light nodes
- basePrice = 10_000 // minimal price for retrieval and pushsync requests of maximum proximity
- postageSyncingStallingTimeout = 10 * time.Minute //
- postageSyncingBackoffTimeout = 5 * time.Second //
- minPaymentThreshold = 2 * refreshRate // minimal accepted payment threshold of full nodes
- maxPaymentThreshold = 24 * refreshRate // maximal accepted payment threshold of full nodes
- mainnetNetworkID = uint64(1) //
- reserveWakeUpDuration = 15 * time.Minute // time to wait before waking up reserveWorker
- reserveMinEvictCount = 1_000
- cacheMinEvictCount = 10_000
- maxAllowedDoubling = 1
)
func NewBee(
@@ -725,6 +612,8 @@ func NewBee(
}
b.hiveCloser = hive
+ fmt.Println("Supported protocols: ", p2ps.Protocols())
+
var swapService *swap.Service
kad, err := kademlia.New(swarmAddress, addressbook, hive, p2ps, detector, logger,
@@ -803,30 +692,6 @@ func NewBee(
}
)
- if !o.SkipPostageSnapshot && !batchStoreExists && (networkID == mainnetNetworkID) {
- chainBackend := NewSnapshotLogFilterer(logger)
-
- snapshotEventListener := listener.New(b.syncingStopped, logger, chainBackend, postageStampContractAddress, postageStampContractABI, o.BlockTime, postageSyncingStallingTimeout, postageSyncingBackoffTimeout)
-
- snapshotBatchSvc, err := batchservice.New(stateStore, batchStore, logger, snapshotEventListener, overlayEthAddress.Bytes(), post, sha3.New256, o.Resync)
- if err != nil {
- logger.Error(err, "failed to initialize batch service from snapshot, continuing outside snapshot block...")
- } else {
- err = snapshotBatchSvc.Start(ctx, postageSyncStart, initBatchState)
- syncStatus.Store(true)
- if err != nil {
- syncErr.Store(err)
- logger.Error(err, "failed to start batch service from snapshot, continuing outside snapshot block...")
- } else {
- postageSyncStart = chainBackend.maxBlockHeight
- }
- }
- if errClose := snapshotEventListener.Close(); errClose != nil {
- logger.Error(errClose, "failed to close event listener (snapshot) failure")
- }
-
- }
-
if batchSvc != nil && chainEnabled {
logger.Info("waiting to sync postage contract data, this may take a while... more info available in Debug loglevel")
@@ -1277,149 +1142,3 @@ func NewBee(
return b, nil
}
-
-func (b *Bee) SyncingStopped() chan struct{} {
- return b.syncingStopped.C
-}
-
-func (b *Bee) Shutdown() error {
- var mErr error
-
- // if a shutdown is already in process, return here
- b.shutdownMutex.Lock()
- if b.shutdownInProgress {
- b.shutdownMutex.Unlock()
- return ErrShutdownInProgress
- }
- b.shutdownInProgress = true
- b.shutdownMutex.Unlock()
-
- // halt kademlia while shutting down other
- // components.
- if b.topologyHalter != nil {
- b.topologyHalter.Halt()
- }
-
- // halt p2p layer from accepting new connections
- // while shutting down other components
- if b.p2pHalter != nil {
- b.p2pHalter.Halt()
- }
- // tryClose is a convenient closure which decrease
- // repetitive io.Closer tryClose procedure.
- tryClose := func(c io.Closer, errMsg string) {
- if c == nil {
- return
- }
- if err := c.Close(); err != nil {
- mErr = multierror.Append(mErr, fmt.Errorf("%s: %w", errMsg, err))
- }
- }
-
- tryClose(b.apiCloser, "api")
-
- ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
- defer cancel()
-
- var eg errgroup.Group
- if b.apiServer != nil {
- eg.Go(func() error {
- if err := b.apiServer.Shutdown(ctx); err != nil {
- return fmt.Errorf("api server: %w", err)
- }
- return nil
- })
- }
- if err := eg.Wait(); err != nil {
- mErr = multierror.Append(mErr, err)
- }
-
- var wg sync.WaitGroup
- wg.Add(8)
- go func() {
- defer wg.Done()
- tryClose(b.pssCloser, "pss")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.gsocCloser, "gsoc")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.pusherCloser, "pusher")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.pullerCloser, "puller")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.accountingCloser, "accounting")
- }()
-
- b.ctxCancel()
- go func() {
- defer wg.Done()
- tryClose(b.pullSyncCloser, "pull sync")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.hiveCloser, "hive")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.saludCloser, "salud")
- }()
-
- wg.Wait()
-
- tryClose(b.p2pService, "p2p server")
- tryClose(b.priceOracleCloser, "price oracle service")
-
- wg.Add(3)
- go func() {
- defer wg.Done()
- tryClose(b.transactionMonitorCloser, "transaction monitor")
- tryClose(b.transactionCloser, "transaction")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.listenerCloser, "listener")
- }()
- go func() {
- defer wg.Done()
- tryClose(b.postageServiceCloser, "postage service")
- }()
-
- wg.Wait()
-
- if c := b.ethClientCloser; c != nil {
- c()
- }
-
- tryClose(b.accesscontrolCloser, "accesscontrol")
- tryClose(b.tracerCloser, "tracer")
- tryClose(b.topologyCloser, "topology driver")
- tryClose(b.storageIncetivesCloser, "storage incentives agent")
- tryClose(b.stateStoreCloser, "statestore")
- tryClose(b.stamperStoreCloser, "stamperstore")
- tryClose(b.localstoreCloser, "localstore")
- tryClose(b.resolverCloser, "resolver service")
-
- return mErr
-}
-
-var ErrShutdownInProgress = errors.New("shutdown in progress")
-
-func isChainEnabled(o *Options, swapEndpoint string, logger log.Logger) bool {
- chainDisabled := swapEndpoint == ""
- lightMode := !o.FullNodeMode
-
- if lightMode && chainDisabled { // ultra light mode is LightNode mode with chain disabled
- logger.Info("starting with a disabled chain backend")
- return false
- }
-
- logger.Info("starting with an enabled chain backend")
- return true // all other modes operate require chain enabled
-}
diff --git a/pkg/node/node_js.go b/pkg/node/node_js.go
new file mode 100644
index 00000000000..470cddbc5f4
--- /dev/null
+++ b/pkg/node/node_js.go
@@ -0,0 +1,1068 @@
+//go:build js
+// +build js
+
+package node
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ stdlog "log"
+ "math/big"
+ "net/http"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethersphere/bee/v2/pkg/accesscontrol"
+ "github.com/ethersphere/bee/v2/pkg/accounting"
+ "github.com/ethersphere/bee/v2/pkg/addressbook"
+ "github.com/ethersphere/bee/v2/pkg/api"
+ "github.com/ethersphere/bee/v2/pkg/config"
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/feeds/factory"
+ "github.com/ethersphere/bee/v2/pkg/gsoc"
+ "github.com/ethersphere/bee/v2/pkg/hive"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p"
+ "github.com/ethersphere/bee/v2/pkg/pingpong"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/postage/batchservice"
+ "github.com/ethersphere/bee/v2/pkg/postage/batchstore"
+ "github.com/ethersphere/bee/v2/pkg/postage/listener"
+ "github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
+ "github.com/ethersphere/bee/v2/pkg/pricer"
+ "github.com/ethersphere/bee/v2/pkg/pricing"
+ "github.com/ethersphere/bee/v2/pkg/pss"
+ "github.com/ethersphere/bee/v2/pkg/puller"
+ "github.com/ethersphere/bee/v2/pkg/pullsync"
+ "github.com/ethersphere/bee/v2/pkg/pusher"
+ "github.com/ethersphere/bee/v2/pkg/pushsync"
+ "github.com/ethersphere/bee/v2/pkg/resolver/multiresolver"
+ "github.com/ethersphere/bee/v2/pkg/retrieval"
+ "github.com/ethersphere/bee/v2/pkg/salud"
+ "github.com/ethersphere/bee/v2/pkg/settlement/pseudosettle"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/erc20"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/priceoracle"
+ "github.com/ethersphere/bee/v2/pkg/stabilization"
+ "github.com/ethersphere/bee/v2/pkg/status"
+ "github.com/ethersphere/bee/v2/pkg/steward"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives/redistribution"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives/staking"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology/kademlia"
+ "github.com/ethersphere/bee/v2/pkg/topology/lightnode"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/ethersphere/bee/v2/pkg/transaction"
+ "github.com/ethersphere/bee/v2/pkg/util/abiutil"
+ "github.com/ethersphere/bee/v2/pkg/util/ioutil"
+ "github.com/ethersphere/bee/v2/pkg/util/nbhdutil"
+ "github.com/ethersphere/bee/v2/pkg/util/syncutil"
+ ma "github.com/multiformats/go-multiaddr"
+ "golang.org/x/crypto/sha3"
+
+ wasmhttp "github.com/nlepage/go-wasm-http-server/v2"
+)
+
+func NewBee(
+ ctx context.Context,
+ addr string,
+ publicKey *ecdsa.PublicKey,
+ signer crypto.Signer,
+ networkID uint64,
+ logger log.Logger,
+ libp2pPrivateKey,
+ pssPrivateKey *ecdsa.PrivateKey,
+ session accesscontrol.Session,
+ o *Options,
+) (b *Bee, err error) {
+ tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
+ Enabled: o.TracingEnabled,
+ Endpoint: o.TracingEndpoint,
+ ServiceName: o.TracingServiceName,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("tracer: %w", err)
+ }
+
+ ctx, ctxCancel := context.WithCancel(ctx)
+ defer func() {
+ // if there's been an error on this function
+ // we'd like to cancel the p2p context so that
+ // incoming connections will not be possible
+ if err != nil {
+ ctxCancel()
+ }
+ }()
+
+ // light nodes have zero warmup time for pull/pushsync protocols
+ warmupTime := o.WarmupTime
+ if !o.FullNodeMode {
+ warmupTime = 0
+ }
+
+ sink := ioutil.WriterFunc(func(p []byte) (int, error) {
+ logger.Error(nil, string(p))
+ return len(p), nil
+ })
+
+ b = &Bee{
+ ctxCancel: ctxCancel,
+ errorLogWriter: sink,
+ tracerCloser: tracerCloser,
+ syncingStopped: syncutil.NewSignaler(),
+ }
+
+ defer func(b *Bee) {
+ if err != nil {
+ logger.Error(err, "got error, shutting down...")
+ if err2 := b.Shutdown(); err2 != nil {
+ logger.Error(err2, "got error while shutting down")
+ }
+ }
+ }(b)
+
+ if !o.FullNodeMode && o.ReserveCapacityDoubling != 0 {
+ return nil, fmt.Errorf("reserve capacity doubling is only allowed for full nodes")
+ }
+
+ if o.ReserveCapacityDoubling < 0 || o.ReserveCapacityDoubling > maxAllowedDoubling {
+ return nil, fmt.Errorf("config reserve capacity doubling has to be between default: 0 and maximum: %d", maxAllowedDoubling)
+ }
+ shallowReceiptTolerance := maxAllowedDoubling - o.ReserveCapacityDoubling
+
+ reserveCapacity := (1 << o.ReserveCapacityDoubling) * storer.DefaultReserveCapacity
+
+ stateStore, _, err := InitStateStore(logger, o.DataDir, o.StatestoreCacheCapacity)
+ if err != nil {
+ return nil, fmt.Errorf("init state store: %w", err)
+ }
+
+ pubKey, err := signer.PublicKey()
+ if err != nil {
+ return nil, fmt.Errorf("signer public key: %w", err)
+ }
+
+ nonce, nonceExists, err := overlayNonceExists(stateStore)
+ if err != nil {
+ return nil, fmt.Errorf("check presence of nonce: %w", err)
+ }
+
+ swarmAddress, err := crypto.NewOverlayAddress(*pubKey, networkID, nonce)
+ if err != nil {
+ return nil, fmt.Errorf("compute overlay address: %w", err)
+ }
+
+ targetNeighborhood := o.TargetNeighborhood
+ if targetNeighborhood == "" && !nonceExists && o.NeighborhoodSuggester != "" {
+ logger.Info("fetching target neighborhood from suggester", "url", o.NeighborhoodSuggester)
+ targetNeighborhood, err = nbhdutil.FetchNeighborhood(&http.Client{}, o.NeighborhoodSuggester)
+ if err != nil {
+ return nil, fmt.Errorf("neighborhood suggestion: %w", err)
+ }
+ }
+
+ var changedOverlay, resetReserve bool
+ if targetNeighborhood != "" {
+ neighborhood, err := swarm.ParseBitStrAddress(targetNeighborhood)
+ if err != nil {
+ return nil, fmt.Errorf("invalid neighborhood. %s", targetNeighborhood)
+ }
+
+ if swarm.Proximity(swarmAddress.Bytes(), neighborhood.Bytes()) < uint8(len(targetNeighborhood)) {
+ // mine the overlay
+ logger.Info("mining a new overlay address to target the selected neighborhood", "target", targetNeighborhood)
+ newSwarmAddress, newNonce, err := nbhdutil.MineOverlay(ctx, *pubKey, networkID, targetNeighborhood)
+ if err != nil {
+ return nil, fmt.Errorf("mine overlay address: %w", err)
+ }
+
+ if nonceExists {
+ logger.Info("Override nonce and clean state for neighborhood", "old_none", hex.EncodeToString(nonce), "new_nonce", hex.EncodeToString(newNonce))
+ logger.Warning("you have another 10 seconds to change your mind and kill this process with CTRL-C...")
+ time.Sleep(10 * time.Second)
+
+ err := ioutil.RemoveContent(filepath.Join(o.DataDir, ioutil.DataPathKademlia))
+ if err != nil {
+ return nil, fmt.Errorf("delete %s: %w", ioutil.DataPathKademlia, err)
+ }
+
+ if err := stateStore.ClearForHopping(); err != nil {
+ return nil, fmt.Errorf("clearing stateStore %w", err)
+ }
+ resetReserve = true
+ }
+
+ swarmAddress = newSwarmAddress
+ nonce = newNonce
+ err = setOverlay(stateStore, swarmAddress, nonce)
+ if err != nil {
+ return nil, fmt.Errorf("statestore: save new overlay: %w", err)
+ }
+ changedOverlay = true
+ }
+ }
+
+ b.stateStoreCloser = stateStore
+ // Check if the batchstore exists. If not, we can assume it's missing
+ // due to a migration or it's a fresh install.
+ batchStoreExists, err := batchStoreExists(stateStore)
+ if err != nil {
+ return nil, fmt.Errorf("batchstore: exists: %w", err)
+ }
+
+ addressbook := addressbook.New(stateStore)
+
+ logger.Info("using overlay address", "address", swarmAddress)
+
+ // this will set overlay if it was not set before
+ if err = checkOverlay(stateStore, swarmAddress); err != nil {
+ return nil, fmt.Errorf("check overlay address: %w", err)
+ }
+
+ var (
+ chainBackend transaction.Backend
+ overlayEthAddress common.Address
+ chainID int64
+ transactionService transaction.Service
+ transactionMonitor transaction.Monitor
+ chequebookFactory chequebook.Factory
+ chequebookService chequebook.Service = new(noOpChequebookService)
+ chequeStore chequebook.ChequeStore
+ cashoutService chequebook.CashoutService
+ erc20Service erc20.Service
+ )
+
+ chainEnabled := isChainEnabled(o, o.BlockchainRpcEndpoint, logger)
+
+ var batchStore postage.Storer = new(postage.NoOpBatchStore)
+ var evictFn func([]byte) error
+
+ if chainEnabled {
+ batchStore, err = batchstore.New(
+ stateStore,
+ func(id []byte) error {
+ return evictFn(id)
+ },
+ reserveCapacity,
+ logger,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("batchstore: %w", err)
+ }
+ }
+
+ chainBackend, overlayEthAddress, chainID, transactionMonitor, transactionService, err = InitChain(
+ ctx,
+ logger,
+ stateStore,
+ o.BlockchainRpcEndpoint,
+ o.ChainID,
+ signer,
+ o.BlockTime,
+ chainEnabled)
+ if err != nil {
+ return nil, fmt.Errorf("init chain: %w", err)
+ }
+ b.ethClientCloser = chainBackend.Close
+
+ logger.Info("using chain with network network", "chain_id", chainID, "network_id", networkID)
+
+ if o.ChainID != -1 && o.ChainID != chainID {
+ return nil, fmt.Errorf("connected to wrong blockchain network; network chainID %d; configured chainID %d", chainID, o.ChainID)
+ }
+
+ b.transactionCloser = tracerCloser
+ b.transactionMonitorCloser = transactionMonitor
+
+ beeNodeMode := api.LightMode
+ if o.FullNodeMode {
+ beeNodeMode = api.FullMode
+ } else if !chainEnabled {
+ beeNodeMode = api.UltraLightMode
+ }
+
+ // Create api.Probe in healthy state and switch to ready state after all components have been constructed
+ probe := api.NewProbe()
+ probe.SetHealthy(api.ProbeStatusOK)
+ defer func(probe *api.Probe) {
+ if err != nil {
+ probe.SetHealthy(api.ProbeStatusNOK)
+ } else {
+ probe.SetReady(api.ProbeStatusOK)
+ }
+ }(probe)
+
+ stamperStore, err := InitStamperStore(logger, o.DataDir, stateStore)
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize stamper store: %w", err)
+ }
+ b.stamperStoreCloser = stamperStore
+
+ var apiService *api.Service
+
+ if o.APIAddr != "" {
+ if o.MutexProfile {
+ _ = runtime.SetMutexProfileFraction(1)
+ }
+ if o.BlockProfile {
+ runtime.SetBlockProfileRate(1)
+ }
+
+ apiService = api.New(
+ *publicKey,
+ pssPrivateKey.PublicKey,
+ overlayEthAddress,
+ o.WhitelistedWithdrawalAddress,
+ logger,
+ transactionService,
+ batchStore,
+ beeNodeMode,
+ o.ChequebookEnable,
+ o.SwapEnable,
+ chainBackend,
+ o.CORSAllowedOrigins,
+ stamperStore,
+ )
+
+ apiService.Mount()
+ apiService.SetProbe(probe)
+ apiService.SetIsWarmingUp(true)
+ apiService.SetSwarmAddress(&swarmAddress)
+
+ apiServer := &http.Server{
+ IdleTimeout: 30 * time.Second,
+ ReadHeaderTimeout: 3 * time.Second,
+ Handler: apiService,
+ ErrorLog: stdlog.New(b.errorLogWriter, "", 0),
+ }
+
+ go func() {
+ logger.Info("starting debug & api server", "address")
+
+ if _, err := wasmhttp.Serve(apiServer.Handler); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ logger.Debug("debug & api server failed to start", "error", err)
+ logger.Error(nil, "debug & api server failed to start")
+ }
+ }()
+
+ b.apiServer = apiServer
+ b.apiCloser = apiServer
+ }
+
+ // Sync the with the given Ethereum backend:
+ isSynced, _, err := transaction.IsSynced(ctx, chainBackend, maxDelay)
+ if err != nil {
+ return nil, fmt.Errorf("is synced: %w", err)
+ }
+ if !isSynced {
+ logger.Info("waiting to sync with the blockchain backend")
+
+ err := transaction.WaitSynced(ctx, logger, chainBackend, maxDelay)
+ if err != nil {
+ return nil, fmt.Errorf("waiting backend sync: %w", err)
+ }
+ }
+
+ if o.SwapEnable {
+ chequebookFactory, err = InitChequebookFactory(logger, chainBackend, chainID, transactionService, o.SwapFactoryAddress)
+ if err != nil {
+ return nil, fmt.Errorf("init chequebook factory: %w", err)
+ }
+
+ erc20Address, err := chequebookFactory.ERC20Address(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("factory fail: %w", err)
+ }
+
+ erc20Service = erc20.New(transactionService, erc20Address)
+
+ if o.ChequebookEnable && chainEnabled {
+ chequebookService, err = InitChequebookService(
+ ctx,
+ logger,
+ stateStore,
+ signer,
+ chainID,
+ chainBackend,
+ overlayEthAddress,
+ transactionService,
+ chequebookFactory,
+ o.SwapInitialDeposit,
+ erc20Service,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("init chequebook service: %w", err)
+ }
+ }
+
+ chequeStore, cashoutService = initChequeStoreCashout(
+ stateStore,
+ chainBackend,
+ chequebookFactory,
+ chainID,
+ overlayEthAddress,
+ transactionService,
+ )
+ }
+
+ lightNodes := lightnode.NewContainer(swarmAddress)
+
+ bootnodes := make([]ma.Multiaddr, 0, len(o.Bootnodes))
+
+ for _, a := range o.Bootnodes {
+ addr, err := ma.NewMultiaddr(a)
+ if err != nil {
+ logger.Debug("create bootnode multiaddress from string failed", "string", a, "error", err)
+ logger.Warning("create bootnode multiaddress from string failed", "string", a)
+ continue
+ }
+
+ bootnodes = append(bootnodes, addr)
+ }
+
+ // Perform checks related to payment threshold calculations here to not duplicate
+ // the checks in bootstrap process
+ paymentThreshold, ok := new(big.Int).SetString(o.PaymentThreshold, 10)
+ if !ok {
+ return nil, fmt.Errorf("invalid payment threshold: %s", paymentThreshold)
+ }
+
+ if paymentThreshold.Cmp(big.NewInt(minPaymentThreshold)) < 0 {
+ return nil, fmt.Errorf("payment threshold below minimum generally accepted value, need at least %d", minPaymentThreshold)
+ }
+
+ if paymentThreshold.Cmp(big.NewInt(maxPaymentThreshold)) > 0 {
+ return nil, fmt.Errorf("payment threshold above maximum generally accepted value, needs to be reduced to at most %d", maxPaymentThreshold)
+ }
+
+ if o.PaymentTolerance < 0 {
+ return nil, fmt.Errorf("invalid payment tolerance: %d", o.PaymentTolerance)
+ }
+
+ if o.PaymentEarly > 100 || o.PaymentEarly < 0 {
+ return nil, fmt.Errorf("invalid payment early: %d", o.PaymentEarly)
+ }
+
+ detector, err := stabilization.NewDetector(stabilization.Config{
+ PeriodDuration: 2 * time.Second,
+ NumPeriodsForStabilization: 5,
+ StabilizationFactor: 3,
+ MinimumPeriods: 2,
+ WarmupTime: warmupTime,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("rate stabilizer configuration failed: %w", err)
+ }
+ defer detector.Close()
+
+ detector.OnMonitoringStart = func(t time.Time) {
+ logger.Info("node warmup check initiated. monitoring activity rate to determine readiness.", "startTime", t)
+ }
+
+ detector.OnStabilized = func(t time.Time, totalCount int) {
+ logger.Info("node warmup complete. system is considered stable and ready.", "stabilizationTime", t, "totalMonitoredEvents", totalCount)
+ }
+
+ detector.OnPeriodComplete = func(t time.Time, periodCount int, stDev float64) {
+ logger.Debug("node warmup check: period complete.", "periodEndTime", t, "eventsInPeriod", periodCount, "rateStdDev", stDev)
+ }
+
+ var initBatchState *postage.ChainSnapshot
+ // Bootstrap node with postage snapshot only if it is running on mainnet, is a fresh
+ // install or explicitly asked by user to resync
+ if networkID == mainnetNetworkID && o.UsePostageSnapshot && (!batchStoreExists || o.Resync) {
+ start := time.Now()
+ logger.Info("cold postage start detected. fetching postage stamp snapshot from swarm")
+ initBatchState, err = bootstrapNode(
+ ctx,
+ addr,
+ swarmAddress,
+ nonce,
+ addressbook,
+ bootnodes,
+ lightNodes,
+ stateStore,
+ signer,
+ networkID,
+ log.Noop,
+ libp2pPrivateKey,
+ detector,
+ o,
+ )
+ logger.Info("bootstrapper created", "elapsed", time.Since(start))
+ if err != nil {
+ logger.Error(err, "bootstrapper failed to fetch batch state")
+ }
+ }
+
+ p2ps, err := libp2p.New(ctx, signer, networkID, swarmAddress, addr, addressbook, stateStore, lightNodes, logger, tracer, libp2p.Options{
+ PrivateKey: libp2pPrivateKey,
+ NATAddr: o.NATAddr,
+ EnableWS: o.EnableWS,
+ WelcomeMessage: o.WelcomeMessage,
+ FullNode: o.FullNodeMode,
+ Nonce: nonce,
+ ValidateOverlay: chainEnabled,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("p2p service: %w", err)
+ }
+
+ apiService.SetP2P(p2ps)
+
+ b.p2pService = p2ps
+ b.p2pHalter = p2ps
+
+ post, err := postage.NewService(logger, stamperStore, batchStore, chainID)
+ if err != nil {
+ return nil, fmt.Errorf("postage service: %w", err)
+ }
+ b.postageServiceCloser = post
+ batchStore.SetBatchExpiryHandler(post)
+
+ var (
+ postageStampContractService postagecontract.Interface
+ batchSvc postage.EventUpdater
+ eventListener postage.Listener
+ )
+
+ chainCfg, found := config.GetByChainID(chainID)
+ postageStampContractAddress, postageSyncStart := chainCfg.PostageStampAddress, chainCfg.PostageStampStartBlock
+ if o.PostageContractAddress != "" {
+ if !common.IsHexAddress(o.PostageContractAddress) {
+ return nil, errors.New("malformed postage stamp address")
+ }
+ postageStampContractAddress = common.HexToAddress(o.PostageContractAddress)
+ if o.PostageContractStartBlock == 0 {
+ return nil, errors.New("postage contract start block option not provided")
+ }
+ postageSyncStart = o.PostageContractStartBlock
+ } else if !found {
+ return nil, errors.New("no known postage stamp addresses for this network")
+ }
+
+ postageStampContractABI := abiutil.MustParseABI(chainCfg.PostageStampABI)
+
+ bzzTokenAddress, err := postagecontract.LookupERC20Address(ctx, transactionService, postageStampContractAddress, postageStampContractABI, chainEnabled)
+ if err != nil {
+ return nil, fmt.Errorf("lookup erc20 postage address: %w", err)
+ }
+
+ postageStampContractService = postagecontract.New(
+ overlayEthAddress,
+ postageStampContractAddress,
+ postageStampContractABI,
+ bzzTokenAddress,
+ transactionService,
+ post,
+ batchStore,
+ chainEnabled,
+ o.TrxDebugMode,
+ )
+
+ eventListener = listener.New(b.syncingStopped, logger, chainBackend, postageStampContractAddress, postageStampContractABI, o.BlockTime, postageSyncingStallingTimeout, postageSyncingBackoffTimeout)
+ b.listenerCloser = eventListener
+
+ batchSvc, err = batchservice.New(stateStore, batchStore, logger, eventListener, overlayEthAddress.Bytes(), post, sha3.New256, o.Resync)
+ if err != nil {
+ return nil, fmt.Errorf("init batch service: %w", err)
+ }
+
+ // Construct protocols.
+ pingPong := pingpong.New(p2ps, logger, tracer)
+
+ if err = p2ps.AddProtocol(pingPong.Protocol()); err != nil {
+ return nil, fmt.Errorf("pingpong service: %w", err)
+ }
+
+ hive := hive.New(p2ps, addressbook, networkID, o.BootnodeMode, o.AllowPrivateCIDRs, logger)
+
+ if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
+ return nil, fmt.Errorf("hive service: %w", err)
+ }
+ b.hiveCloser = hive
+
+ fmt.Println("Supported protocols: ", p2ps.Protocols())
+
+ var swapService *swap.Service
+
+ kad, err := kademlia.New(swarmAddress, addressbook, hive, p2ps, detector, logger,
+ kademlia.Options{Bootnodes: bootnodes, BootnodeMode: o.BootnodeMode, StaticNodes: o.StaticNodes, DataDir: o.DataDir})
+ if err != nil {
+ return nil, fmt.Errorf("unable to create kademlia: %w", err)
+ }
+ b.topologyCloser = kad
+ b.topologyHalter = kad
+ hive.SetAddPeersHandler(kad.AddPeers)
+ p2ps.SetPickyNotifier(kad)
+
+ var path string
+
+ if o.DataDir != "" {
+ logger.Info("using datadir", "path", o.DataDir)
+ path = filepath.Join(o.DataDir, ioutil.DataPathLocalstore)
+ }
+
+ lo := &storer.Options{
+ Address: swarmAddress,
+ CacheCapacity: o.CacheCapacity,
+ LdbOpenFilesLimit: o.DBOpenFilesLimit,
+ LdbBlockCacheCapacity: o.DBBlockCacheCapacity,
+ LdbWriteBufferSize: o.DBWriteBufferSize,
+ LdbDisableSeeksCompaction: o.DBDisableSeeksCompaction,
+ Batchstore: batchStore,
+ StateStore: stateStore,
+ RadiusSetter: kad,
+ StartupStabilizer: detector,
+ Logger: logger,
+ Tracer: tracer,
+ CacheMinEvictCount: cacheMinEvictCount,
+ MinimumStorageRadius: o.MinimumStorageRadius,
+ }
+
+ if o.FullNodeMode && !o.BootnodeMode {
+ // configure reserve only for full node
+ lo.ReserveCapacity = reserveCapacity
+ lo.ReserveWakeUpDuration = reserveWakeUpDuration
+ lo.ReserveMinEvictCount = reserveMinEvictCount
+ lo.RadiusSetter = kad
+ lo.ReserveCapacityDoubling = o.ReserveCapacityDoubling
+ }
+
+ localStore, err := storer.New(ctx, path, lo)
+ if err != nil {
+ return nil, fmt.Errorf("localstore: %w", err)
+ }
+ b.localstoreCloser = localStore
+ evictFn = func(id []byte) error { return localStore.EvictBatch(context.Background(), id) }
+
+ if resetReserve {
+ logger.Warning("resetting the reserve")
+ err := localStore.ResetReserve(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reset reserve: %w", err)
+ }
+ }
+
+ actLogic := accesscontrol.NewLogic(session)
+ accesscontrol := accesscontrol.NewController(actLogic)
+ b.accesscontrolCloser = accesscontrol
+
+ var (
+ syncErr atomic.Value
+ syncStatus atomic.Value
+
+ syncStatusFn = func() (isDone bool, err error) {
+ iErr := syncErr.Load()
+ if iErr != nil {
+ err = iErr.(error)
+ }
+ isDone = syncStatus.Load() != nil
+ return isDone, err
+ }
+ )
+
+ if batchSvc != nil && chainEnabled {
+ logger.Info("waiting to sync postage contract data, this may take a while... more info available in Debug loglevel")
+
+ paused, err := postageStampContractService.Paused(ctx)
+ if err != nil {
+ logger.Error(err, "Error checking postage contract is paused")
+ }
+
+ if paused {
+ return nil, errors.New("postage contract is paused")
+ }
+
+ if o.FullNodeMode {
+ err = batchSvc.Start(ctx, postageSyncStart, initBatchState)
+ syncStatus.Store(true)
+ if err != nil {
+ syncErr.Store(err)
+ return nil, fmt.Errorf("unable to start batch service: %w", err)
+ }
+ } else {
+ go func() {
+ logger.Info("started postage contract data sync in the background...")
+ err := batchSvc.Start(ctx, postageSyncStart, initBatchState)
+ syncStatus.Store(true)
+ if err != nil {
+ syncErr.Store(err)
+ logger.Error(err, "unable to sync batches")
+ b.syncingStopped.Signal() // trigger shutdown in start.go
+ }
+ }()
+ }
+
+ }
+
+ minThreshold := big.NewInt(2 * refreshRate)
+ maxThreshold := big.NewInt(24 * refreshRate)
+
+ if !o.FullNodeMode {
+ minThreshold = big.NewInt(2 * lightRefreshRate)
+ }
+
+ lightPaymentThreshold := new(big.Int).Div(paymentThreshold, big.NewInt(lightFactor))
+
+ pricer := pricer.NewFixedPricer(swarmAddress, basePrice)
+
+ if paymentThreshold.Cmp(minThreshold) < 0 {
+ return nil, fmt.Errorf("payment threshold below minimum generally accepted value, need at least %s", minThreshold)
+ }
+
+ if paymentThreshold.Cmp(maxThreshold) > 0 {
+ return nil, fmt.Errorf("payment threshold above maximum generally accepted value, needs to be reduced to at most %s", maxThreshold)
+ }
+
+ pricing := pricing.New(p2ps, logger, paymentThreshold, lightPaymentThreshold, minThreshold)
+
+ if err = p2ps.AddProtocol(pricing.Protocol()); err != nil {
+ return nil, fmt.Errorf("pricing service: %w", err)
+ }
+
+ addrs, err := p2ps.Addresses()
+ if err != nil {
+ return nil, fmt.Errorf("get server addresses: %w", err)
+ }
+
+ for _, addr := range addrs {
+ logger.Debug("p2p address", "address", addr)
+ }
+
+ var enforcedRefreshRate *big.Int
+
+ if o.FullNodeMode {
+ enforcedRefreshRate = big.NewInt(refreshRate)
+ } else {
+ enforcedRefreshRate = big.NewInt(lightRefreshRate)
+ }
+
+ acc, err := accounting.NewAccounting(
+ paymentThreshold,
+ o.PaymentTolerance,
+ o.PaymentEarly,
+ logger,
+ stateStore,
+ pricing,
+ new(big.Int).Set(enforcedRefreshRate),
+ lightFactor,
+ p2ps,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("accounting: %w", err)
+ }
+ b.accountingCloser = acc
+
+ pseudosettleService := pseudosettle.New(p2ps, logger, stateStore, acc, new(big.Int).Set(enforcedRefreshRate), big.NewInt(lightRefreshRate), p2ps)
+ if err = p2ps.AddProtocol(pseudosettleService.Protocol()); err != nil {
+ return nil, fmt.Errorf("pseudosettle service: %w", err)
+ }
+
+ acc.SetRefreshFunc(pseudosettleService.Pay)
+
+ if o.SwapEnable && chainEnabled {
+ var priceOracle priceoracle.Service
+ swapService, priceOracle, err = InitSwap(
+ p2ps,
+ logger,
+ stateStore,
+ networkID,
+ overlayEthAddress,
+ chequebookService,
+ chequeStore,
+ cashoutService,
+ acc,
+ o.PriceOracleAddress,
+ chainID,
+ transactionService,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("init swap service: %w", err)
+ }
+ b.priceOracleCloser = priceOracle
+
+ if o.ChequebookEnable {
+ acc.SetPayFunc(swapService.Pay)
+ }
+ }
+
+ pricing.SetPaymentThresholdObserver(acc)
+
+ pssService := pss.New(pssPrivateKey, logger)
+ gsocService := gsoc.New(logger)
+ b.pssCloser = pssService
+ b.gsocCloser = gsocService
+
+ validStamp := postage.ValidStamp(batchStore)
+
+ nodeStatus := status.NewService(logger, p2ps, kad, beeNodeMode.String(), batchStore, localStore, nil)
+ if err = p2ps.AddProtocol(nodeStatus.Protocol()); err != nil {
+ return nil, fmt.Errorf("status service: %w", err)
+ }
+
+ saludService := salud.New(nodeStatus, kad, localStore, logger, detector, api.FullMode.String(), salud.DefaultMinPeersPerBin, salud.DefaultDurPercentile, salud.DefaultConnsPercentile)
+ b.saludCloser = saludService
+
+ rC, unsub := saludService.SubscribeNetworkStorageRadius()
+ initialRadiusC := make(chan struct{})
+ var networkR atomic.Uint32
+ networkR.Store(uint32(swarm.MaxBins))
+
+ go func() {
+ for {
+ select {
+ case r := <-rC:
+ prev := networkR.Load()
+ networkR.Store(uint32(r))
+ if prev == uint32(swarm.MaxBins) {
+ close(initialRadiusC)
+ }
+ if !o.FullNodeMode { // light and ultra-light nodes do not have a reserve worker to set the radius.
+ kad.SetStorageRadius(r)
+ }
+ case <-ctx.Done():
+ unsub()
+ return
+ }
+ }
+ }()
+
+ waitNetworkRFunc := func() (uint8, error) {
+ if networkR.Load() == uint32(swarm.MaxBins) {
+ select {
+ case <-initialRadiusC:
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ }
+ }
+
+ local, network := localStore.StorageRadius(), uint8(networkR.Load())
+ if local <= uint8(o.MinimumStorageRadius) {
+ return max(network, uint8(o.MinimumStorageRadius)), nil
+ } else {
+ return local, nil
+ }
+ }
+
+ pushSyncProtocol := pushsync.New(swarmAddress, networkID, nonce, p2ps, localStore, waitNetworkRFunc, kad, o.FullNodeMode && !o.BootnodeMode, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, acc, pricer, signer, tracer, detector, uint8(shallowReceiptTolerance))
+ b.pushSyncCloser = pushSyncProtocol
+
+ // set the pushSyncer in the PSS
+ pssService.SetPushSyncer(pushSyncProtocol)
+
+ retrieval := retrieval.New(swarmAddress, waitNetworkRFunc, localStore, p2ps, kad, logger, acc, pricer, tracer, o.RetrievalCaching)
+ localStore.SetRetrievalService(retrieval)
+
+ pusherService := pusher.New(networkID, localStore, pushSyncProtocol, batchStore, logger, detector, pusher.DefaultRetryCount)
+ b.pusherCloser = pusherService
+
+ pusherService.AddFeed(localStore.PusherFeed())
+
+ pullSyncProtocol := pullsync.New(p2ps, localStore, pssService.TryUnwrap, gsocService.Handle, validStamp, logger, pullsync.DefaultMaxPage)
+ b.pullSyncCloser = pullSyncProtocol
+
+ retrieveProtocolSpec := retrieval.Protocol()
+ pushSyncProtocolSpec := pushSyncProtocol.Protocol()
+ pullSyncProtocolSpec := pullSyncProtocol.Protocol()
+
+ if o.FullNodeMode && !o.BootnodeMode {
+ logger.Info("starting in full mode")
+ } else {
+ if chainEnabled {
+ logger.Info("starting in light mode")
+ } else {
+ logger.Info("starting in ultra-light mode")
+ }
+ p2p.WithBlocklistStreams(p2p.DefaultBlocklistTime, retrieveProtocolSpec)
+ p2p.WithBlocklistStreams(p2p.DefaultBlocklistTime, pushSyncProtocolSpec)
+ p2p.WithBlocklistStreams(p2p.DefaultBlocklistTime, pullSyncProtocolSpec)
+ }
+
+ if err = p2ps.AddProtocol(retrieveProtocolSpec); err != nil {
+ return nil, fmt.Errorf("retrieval service: %w", err)
+ }
+ if err = p2ps.AddProtocol(pushSyncProtocolSpec); err != nil {
+ return nil, fmt.Errorf("pushsync service: %w", err)
+ }
+ if err = p2ps.AddProtocol(pullSyncProtocolSpec); err != nil {
+ return nil, fmt.Errorf("pullsync protocol: %w", err)
+ }
+
+ go func() {
+ sub, unsubscribe := detector.Subscribe()
+ defer unsubscribe()
+ <-sub
+ logger.Info("node warmup stabilization complete, updating API status")
+ apiService.SetIsWarmingUp(false)
+ }()
+
+ stakingContractAddress := chainCfg.StakingAddress
+ if o.StakingContractAddress != "" {
+ if !common.IsHexAddress(o.StakingContractAddress) {
+ return nil, errors.New("malformed staking contract address")
+ }
+ stakingContractAddress = common.HexToAddress(o.StakingContractAddress)
+ }
+
+ stakingContract := staking.New(overlayEthAddress, stakingContractAddress, abiutil.MustParseABI(chainCfg.StakingABI), bzzTokenAddress, transactionService, common.BytesToHash(nonce), o.TrxDebugMode, uint8(o.ReserveCapacityDoubling))
+
+ if chainEnabled {
+
+ stake, err := stakingContract.GetPotentialStake(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get potential stake: %w", err)
+ }
+
+ if stake.Cmp(big.NewInt(0)) > 0 {
+
+ if changedOverlay {
+ logger.Debug("changing overlay address in staking contract")
+ tx, err := stakingContract.ChangeStakeOverlay(ctx, common.BytesToHash(nonce))
+ if err != nil {
+ return nil, fmt.Errorf("cannot change staking overlay address: %v", err.Error())
+ }
+ logger.Info("overlay address changed in staking contract", "transaction", tx)
+ }
+
+ // make sure that the staking contract has the up to date height
+ tx, updated, err := stakingContract.UpdateHeight(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("update height in staking contract: %w", err)
+ }
+ if updated {
+ logger.Info("updated new reserve capacity doubling height in the staking contract", "transaction", tx, "new_height", o.ReserveCapacityDoubling)
+ }
+
+ // Check if the staked amount is sufficient to cover the additional neighborhoods.
+ // The staked amount must be at least 2^h * MinimumStake.
+ if o.ReserveCapacityDoubling > 0 && stake.Cmp(big.NewInt(0).Mul(big.NewInt(1<= reserveTreshold && pullerService.SyncRate() == 0 && detector.IsStabilized()
+ }
+
+ agent, err = storageincentives.New(
+ swarmAddress,
+ overlayEthAddress,
+ chainBackend,
+ redistributionContract,
+ postageStampContractService,
+ stakingContract,
+ localStore,
+ isFullySynced,
+ o.BlockTime,
+ storageincentives.DefaultBlocksPerRound,
+ storageincentives.DefaultBlocksPerPhase,
+ stateStore,
+ batchStore,
+ erc20Service,
+ transactionService,
+ saludService,
+ logger,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("storage incentives agent: %w", err)
+ }
+ b.storageIncetivesCloser = agent
+ }
+
+ }
+ multiResolver := multiresolver.NewMultiResolver(
+ multiresolver.WithConnectionConfigs(o.ResolverConnectionCfgs),
+ multiresolver.WithLogger(o.Logger),
+ multiresolver.WithDefaultCIDResolver(),
+ )
+ b.resolverCloser = multiResolver
+
+ feedFactory := factory.New(localStore.Download(true))
+ steward := steward.New(localStore, retrieval, localStore.Cache())
+
+ extraOpts := api.ExtraOptions{
+ Pingpong: pingPong,
+ TopologyDriver: kad,
+ LightNodes: lightNodes,
+ Accounting: acc,
+ Pseudosettle: pseudosettleService,
+ Swap: swapService,
+ Chequebook: chequebookService,
+ BlockTime: o.BlockTime,
+ Storer: localStore,
+ Resolver: multiResolver,
+ Pss: pssService,
+ Gsoc: gsocService,
+ FeedFactory: feedFactory,
+ Post: post,
+ AccessControl: accesscontrol,
+ PostageContract: postageStampContractService,
+ Staking: stakingContract,
+ Steward: steward,
+ SyncStatus: syncStatusFn,
+ NodeStatus: nodeStatus,
+ PinIntegrity: localStore.PinIntegrity(),
+ }
+
+ if o.APIAddr != "" {
+ // register metrics from components
+
+ apiService.Configure(signer, tracer, api.Options{
+ CORSAllowedOrigins: o.CORSAllowedOrigins,
+ WsPingPeriod: 60 * time.Second,
+ }, extraOpts, chainID, erc20Service)
+
+ apiService.EnableFullAPI()
+
+ apiService.SetRedistributionAgent(agent)
+
+ }
+
+ if err := kad.Start(ctx); err != nil {
+ return nil, fmt.Errorf("start kademlia: %w", err)
+ }
+
+ if err := p2ps.Ready(); err != nil {
+ return nil, fmt.Errorf("p2ps ready: %w", err)
+ }
+
+ return b, nil
+}
diff --git a/pkg/node/node_shared.go b/pkg/node/node_shared.go
new file mode 100644
index 00000000000..fb430009bed
--- /dev/null
+++ b/pkg/node/node_shared.go
@@ -0,0 +1,283 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package node defines the concept of a Bee node
+// by bootstrapping and injecting all necessary
+// dependencies.
+package node
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/resolver/multiresolver"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/util/syncutil"
+ "github.com/hashicorp/go-multierror"
+ "golang.org/x/sync/errgroup"
+)
+
+// LoggerName is the tree path name of the logger for this package.
+const LoggerName = "node"
+
+type Bee struct {
+ p2pService io.Closer
+ p2pHalter p2p.Halter
+ ctxCancel context.CancelFunc
+ apiCloser io.Closer
+ apiServer *http.Server
+ resolverCloser io.Closer
+ errorLogWriter io.Writer
+ tracerCloser io.Closer
+ stateStoreCloser io.Closer
+ stamperStoreCloser io.Closer
+ localstoreCloser io.Closer
+ topologyCloser io.Closer
+ topologyHalter topology.Halter
+ pusherCloser io.Closer
+ pullerCloser io.Closer
+ accountingCloser io.Closer
+ pullSyncCloser io.Closer
+ pssCloser io.Closer
+ gsocCloser io.Closer
+ ethClientCloser func()
+ transactionMonitorCloser io.Closer
+ transactionCloser io.Closer
+ listenerCloser io.Closer
+ postageServiceCloser io.Closer
+ priceOracleCloser io.Closer
+ hiveCloser io.Closer
+ saludCloser io.Closer
+ storageIncetivesCloser io.Closer
+ pushSyncCloser io.Closer
+ retrievalCloser io.Closer
+ shutdownInProgress bool
+ shutdownMutex sync.Mutex
+ syncingStopped *syncutil.Signaler
+ accesscontrolCloser io.Closer
+}
+
+type Options struct {
+ Addr string
+ AllowPrivateCIDRs bool
+ APIAddr string
+ BlockchainRpcEndpoint string
+ BlockProfile bool
+ BlockTime time.Duration
+ BootnodeMode bool
+ Bootnodes []string
+ CacheCapacity uint64
+ ChainID int64
+ ChequebookEnable bool
+ CORSAllowedOrigins []string
+ DataDir string
+ DBBlockCacheCapacity uint64
+ DBDisableSeeksCompaction bool
+ DBOpenFilesLimit uint64
+ DBWriteBufferSize uint64
+ EnableStorageIncentives bool
+ EnableWS bool
+ FullNodeMode bool
+ Logger log.Logger
+ MinimumStorageRadius uint
+ MutexProfile bool
+ NATAddr string
+ NeighborhoodSuggester string
+ PaymentEarly int64
+ PaymentThreshold string
+ PaymentTolerance int64
+ PostageContractAddress string
+ PostageContractStartBlock uint64
+ PriceOracleAddress string
+ RedistributionContractAddress string
+ ReserveCapacityDoubling int
+ ResolverConnectionCfgs []multiresolver.ConnectionConfig
+ Resync bool
+ RetrievalCaching bool
+ StakingContractAddress string
+ StatestoreCacheCapacity uint64
+ StaticNodes []swarm.Address
+ SwapEnable bool
+ SwapFactoryAddress string
+ SwapInitialDeposit string
+ TargetNeighborhood string
+ TracingEnabled bool
+ TracingEndpoint string
+ TracingServiceName string
+ TrxDebugMode bool
+ UsePostageSnapshot bool
+ WarmupTime time.Duration
+ WelcomeMessage string
+ WhitelistedWithdrawalAddress []string
+}
+
+const (
+ refreshRate = int64(4_500_000) // accounting units refreshed per second
+ lightFactor = 10 // downscale payment thresholds and their change rate, and refresh rates by this for light nodes
+ lightRefreshRate = refreshRate / lightFactor // refresh rate used by / for light nodes
+ basePrice = 10_000 // minimal price for retrieval and pushsync requests of maximum proximity
+ postageSyncingStallingTimeout = 10 * time.Minute //
+ postageSyncingBackoffTimeout = 5 * time.Second //
+ minPaymentThreshold = 2 * refreshRate // minimal accepted payment threshold of full nodes
+ maxPaymentThreshold = 24 * refreshRate // maximal accepted payment threshold of full nodes
+ mainnetNetworkID = uint64(1) //
+ reserveWakeUpDuration = 15 * time.Minute // time to wait before waking up reserveWorker
+ reserveMinEvictCount = 1_000
+ cacheMinEvictCount = 10_000
+ maxAllowedDoubling = 1
+)
+
+func (b *Bee) SyncingStopped() chan struct{} {
+ return b.syncingStopped.C
+}
+
+func (b *Bee) Shutdown() error {
+ var mErr error
+
+ // if a shutdown is already in process, return here
+ b.shutdownMutex.Lock()
+ if b.shutdownInProgress {
+ b.shutdownMutex.Unlock()
+ return ErrShutdownInProgress
+ }
+ b.shutdownInProgress = true
+ b.shutdownMutex.Unlock()
+
+ // halt kademlia while shutting down other
+ // components.
+ if b.topologyHalter != nil {
+ b.topologyHalter.Halt()
+ }
+
+ // halt p2p layer from accepting new connections
+ // while shutting down other components
+ if b.p2pHalter != nil {
+ b.p2pHalter.Halt()
+ }
+ // tryClose is a convenient closure which decrease
+ // repetitive io.Closer tryClose procedure.
+ tryClose := func(c io.Closer, errMsg string) {
+ if c == nil {
+ return
+ }
+ if err := c.Close(); err != nil {
+ mErr = multierror.Append(mErr, fmt.Errorf("%s: %w", errMsg, err))
+ }
+ }
+
+ tryClose(b.apiCloser, "api")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+
+ var eg errgroup.Group
+ if b.apiServer != nil {
+ eg.Go(func() error {
+ if err := b.apiServer.Shutdown(ctx); err != nil {
+ return fmt.Errorf("api server: %w", err)
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ mErr = multierror.Append(mErr, err)
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(8)
+ go func() {
+ defer wg.Done()
+ tryClose(b.pssCloser, "pss")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.gsocCloser, "gsoc")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.pusherCloser, "pusher")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.pullerCloser, "puller")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.accountingCloser, "accounting")
+ }()
+
+ b.ctxCancel()
+ go func() {
+ defer wg.Done()
+ tryClose(b.pullSyncCloser, "pull sync")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.hiveCloser, "hive")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.saludCloser, "salud")
+ }()
+
+ wg.Wait()
+
+ tryClose(b.p2pService, "p2p server")
+ tryClose(b.priceOracleCloser, "price oracle service")
+
+ wg.Add(3)
+ go func() {
+ defer wg.Done()
+ tryClose(b.transactionMonitorCloser, "transaction monitor")
+ tryClose(b.transactionCloser, "transaction")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.listenerCloser, "listener")
+ }()
+ go func() {
+ defer wg.Done()
+ tryClose(b.postageServiceCloser, "postage service")
+ }()
+
+ wg.Wait()
+
+ if c := b.ethClientCloser; c != nil {
+ c()
+ }
+
+ tryClose(b.accesscontrolCloser, "accesscontrol")
+ tryClose(b.tracerCloser, "tracer")
+ tryClose(b.topologyCloser, "topology driver")
+ tryClose(b.storageIncetivesCloser, "storage incentives agent")
+ tryClose(b.stateStoreCloser, "statestore")
+ tryClose(b.stamperStoreCloser, "stamperstore")
+ tryClose(b.localstoreCloser, "localstore")
+ tryClose(b.resolverCloser, "resolver service")
+
+ return mErr
+}
+
+var ErrShutdownInProgress = errors.New("shutdown in progress")
+
+func isChainEnabled(o *Options, swapEndpoint string, logger log.Logger) bool {
+ chainDisabled := swapEndpoint == ""
+ lightMode := !o.FullNodeMode
+
+ if lightMode && chainDisabled { // ultra light mode is LightNode mode with chain disabled
+ logger.Info("starting with a disabled chain backend")
+ return false
+ }
+
+ logger.Info("starting with an enabled chain backend")
+ return true // all other modes operate require chain enabled
+}
diff --git a/pkg/node/snapshot.go b/pkg/node/snapshot.go
deleted file mode 100644
index fe7963ff6c4..00000000000
--- a/pkg/node/snapshot.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2025 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package node
-
-import (
- "bytes"
- "compress/gzip"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "sort"
- "sync"
-
- "slices"
-
- "github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/core/types"
- archive "github.com/ethersphere/batch-archive"
- "github.com/ethersphere/bee/v2/pkg/log"
- "github.com/ethersphere/bee/v2/pkg/postage/listener"
-)
-
-var _ listener.BlockHeightContractFilterer = (*SnapshotLogFilterer)(nil)
-
-type SnapshotLogFilterer struct {
- logger log.Logger
- loadedLogs []types.Log
- maxBlockHeight uint64
- initOnce sync.Once
-}
-
-func NewSnapshotLogFilterer(logger log.Logger) *SnapshotLogFilterer {
- return &SnapshotLogFilterer{
- logger: logger,
- }
-}
-
-// loadSnapshot is responsible for loading and processing the snapshot data.
-// It is intended to be called exactly once by initOnce.Do.
-func (f *SnapshotLogFilterer) loadSnapshot() error {
- f.logger.Info("loading batch snapshot")
- data := archive.GetBatchSnapshot()
- dataReader := bytes.NewReader(data)
- gzipReader, err := gzip.NewReader(dataReader)
- if err != nil {
- f.logger.Error(err, "failed to create gzip reader for batch import")
- return fmt.Errorf("create gzip reader: %w", err)
- }
- defer gzipReader.Close()
-
- if err := f.parseLogs(gzipReader); err != nil {
- f.logger.Error(err, "failed to parse logs from snapshot")
- return err
- }
-
- f.logger.Info("batch snapshot loaded successfully", "log_count", len(f.loadedLogs), "max_block_height", f.maxBlockHeight)
- return nil
-}
-
-func (f *SnapshotLogFilterer) parseLogs(reader io.Reader) error {
- var parsedLogs []types.Log
- var currentMaxBlockHeight uint64
-
- decoder := json.NewDecoder(reader)
- for {
- var logEntry types.Log
- if err := decoder.Decode(&logEntry); err != nil {
- if err == io.EOF {
- break
- }
- f.logger.Warning("failed to decode log event, skipping", "error", err)
- continue
- }
-
- if logEntry.BlockNumber > currentMaxBlockHeight {
- currentMaxBlockHeight = logEntry.BlockNumber
- }
- parsedLogs = append(parsedLogs, logEntry)
- }
-
- f.loadedLogs = parsedLogs
- f.maxBlockHeight = currentMaxBlockHeight
- return nil
-}
-
-// ensureLoaded calls loadSnapshot via sync.Once to ensure thread-safe, one-time initialization.
-func (f *SnapshotLogFilterer) ensureLoaded() error {
- var err error
- f.initOnce.Do(func() {
- err = f.loadSnapshot()
- })
- return err
-}
-
-func (f *SnapshotLogFilterer) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
- if err := f.ensureLoaded(); err != nil {
- return nil, fmt.Errorf("failed to ensure snapshot was loaded for FilterLogs: %w", err)
- }
-
- f.logger.Debug("filtering pre-loaded logs", "total_logs", len(f.loadedLogs), "query_from_block", query.FromBlock, "query_to_block", query.ToBlock, "query_addresses_count", len(query.Addresses), "query_topics_count", len(query.Topics))
-
- filtered := make([]types.Log, 0)
-
- startIndex := 0
- if query.FromBlock != nil {
- fromBlockNum := query.FromBlock.Uint64()
- startIndex = sort.Search(len(f.loadedLogs), func(i int) bool {
- return f.loadedLogs[i].BlockNumber >= fromBlockNum
- })
- }
-
- scannedCount := 0
- for i := startIndex; i < len(f.loadedLogs); i++ {
- logEntry := f.loadedLogs[i]
- scannedCount++
-
- if query.ToBlock != nil && logEntry.BlockNumber > query.ToBlock.Uint64() {
- break
- }
-
- if len(query.Addresses) > 0 && !slices.Contains(query.Addresses, logEntry.Address) {
- continue
- }
-
- if len(query.Topics) > 0 {
- match := true
- for topicIndex, topicCriteria := range query.Topics {
- if len(topicCriteria) == 0 {
- continue
- }
- if topicIndex >= len(logEntry.Topics) {
- match = false
- break
- }
-
- if !slices.Contains(topicCriteria, logEntry.Topics[topicIndex]) {
- match = false
- break
- }
- }
- if !match {
- continue
- }
- }
-
- filtered = append(filtered, logEntry)
- }
-
- f.logger.Debug("filtered logs complete", "input_log_count", len(f.loadedLogs), "potential_logs_in_block_range", scannedCount, "output_count", len(filtered))
- return filtered, nil
-}
-
-func (f *SnapshotLogFilterer) BlockNumber(_ context.Context) (uint64, error) {
- if err := f.ensureLoaded(); err != nil {
- return 0, fmt.Errorf("failed to ensure snapshot was loaded for BlockNumber: %w", err)
- }
- return f.maxBlockHeight, nil
-}
diff --git a/pkg/node/statestore.go b/pkg/node/statestore.go
index 626848cf88e..74d04832641 100644
--- a/pkg/node/statestore.go
+++ b/pkg/node/statestore.go
@@ -1,12 +1,10 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+
+//+go:build !js
package node
import (
- "errors"
- "fmt"
"path/filepath"
"github.com/ethersphere/bee/v2/pkg/log"
@@ -15,7 +13,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storage/cache"
"github.com/ethersphere/bee/v2/pkg/storage/leveldbstore"
- "github.com/ethersphere/bee/v2/pkg/swarm"
)
// InitStateStore will initialize the stateStore with the given path to the
@@ -41,62 +38,3 @@ func InitStateStore(logger log.Logger, dataDir string, cacheCapacity uint64) (st
return stateStore, caching, err
}
-
-// InitStamperStore will create new stamper store with the given path to the
-// data directory. When given an empty directory path, the function will instead
-// initialize an in-memory state store that will not be persisted.
-func InitStamperStore(logger log.Logger, dataDir string, stateStore storage.StateStorer) (storage.Store, error) {
- if dataDir == "" {
- logger.Warning("using in-mem stamper store, no node state will be persisted")
- } else {
- dataDir = filepath.Join(dataDir, "stamperstore")
- }
- stamperStore, err := leveldbstore.New(dataDir, nil)
- if err != nil {
- return nil, err
- }
-
- return stamperStore, nil
-}
-
-const (
- overlayNonce = "overlayV2_nonce"
- noncedOverlayKey = "nonce-overlay"
-)
-
-// checkOverlay checks the overlay is the same as stored in the statestore
-func checkOverlay(storer storage.StateStorer, overlay swarm.Address) error {
-
- var storedOverlay swarm.Address
- err := storer.Get(noncedOverlayKey, &storedOverlay)
- if err != nil {
- if !errors.Is(err, storage.ErrNotFound) {
- return err
- }
- return storer.Put(noncedOverlayKey, overlay)
- }
-
- if !storedOverlay.Equal(overlay) {
- return fmt.Errorf("overlay address changed. was %s before but now is %s", storedOverlay, overlay)
- }
-
- return nil
-}
-
-func overlayNonceExists(s storage.StateStorer) ([]byte, bool, error) {
- nonce := make([]byte, 32)
- if err := s.Get(overlayNonce, &nonce); err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- return nonce, false, nil
- }
- return nil, false, err
- }
- return nonce, true, nil
-}
-
-func setOverlay(s storage.StateStorer, overlay swarm.Address, nonce []byte) error {
- return errors.Join(
- s.Put(overlayNonce, nonce),
- s.Put(noncedOverlayKey, overlay),
- )
-}
diff --git a/pkg/node/statestore_js.go b/pkg/node/statestore_js.go
new file mode 100644
index 00000000000..9ca5df92a3e
--- /dev/null
+++ b/pkg/node/statestore_js.go
@@ -0,0 +1,39 @@
+//go:build js
+
+//+go:build js
+
+package node
+
+import (
+ "path/filepath"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/statestore/storeadapter"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage/cache"
+ "github.com/ethersphere/bee/v2/pkg/storage/leveldbstore"
+)
+
+// InitStateStore will initialize the stateStore with the given path to the
+// data directory. When given an empty directory path, the function will instead
+// initialize an in-memory state store that will not be persisted.
+func InitStateStore(logger log.Logger, dataDir string, cacheCapacity uint64) (storage.StateStorerManager, *cache.Cache, error) {
+ if dataDir == "" {
+ logger.Warning("using in-mem state store, no node state will be persisted")
+ } else {
+ dataDir = filepath.Join(dataDir, "statestore")
+ }
+ ldb, err := leveldbstore.New(dataDir, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ caching, err := cache.Wrap(ldb, int(cacheCapacity))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ stateStore, err := storeadapter.NewStateStorerAdapter(caching)
+
+ return stateStore, caching, err
+}
diff --git a/pkg/node/statestore_shared.go b/pkg/node/statestore_shared.go
new file mode 100644
index 00000000000..d3da07746e4
--- /dev/null
+++ b/pkg/node/statestore_shared.go
@@ -0,0 +1,75 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package node
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage/leveldbstore"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// InitStamperStore will create new stamper store with the given path to the
+// data directory. When given an empty directory path, the function will instead
+// initialize an in-memory state store that will not be persisted.
+func InitStamperStore(logger log.Logger, dataDir string, stateStore storage.StateStorer) (storage.Store, error) {
+ if dataDir == "" {
+ logger.Warning("using in-mem stamper store, no node state will be persisted")
+ } else {
+ dataDir = filepath.Join(dataDir, "stamperstore")
+ }
+ stamperStore, err := leveldbstore.New(dataDir, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return stamperStore, nil
+}
+
+const (
+ overlayNonce = "overlayV2_nonce"
+ noncedOverlayKey = "nonce-overlay"
+)
+
+// checkOverlay checks the overlay is the same as stored in the statestore
+func checkOverlay(storer storage.StateStorer, overlay swarm.Address) error {
+
+ var storedOverlay swarm.Address
+ err := storer.Get(noncedOverlayKey, &storedOverlay)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return err
+ }
+ return storer.Put(noncedOverlayKey, overlay)
+ }
+
+ if !storedOverlay.Equal(overlay) {
+ return fmt.Errorf("overlay address changed. was %s before but now is %s", storedOverlay, overlay)
+ }
+
+ return nil
+}
+
+func overlayNonceExists(s storage.StateStorer) ([]byte, bool, error) {
+ nonce := make([]byte, 32)
+ if err := s.Get(overlayNonce, &nonce); err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ return nonce, false, nil
+ }
+ return nil, false, err
+ }
+ return nonce, true, nil
+}
+
+func setOverlay(s storage.StateStorer, overlay swarm.Address, nonce []byte) error {
+ return errors.Join(
+ s.Put(overlayNonce, nonce),
+ s.Put(noncedOverlayKey, overlay),
+ )
+}
diff --git a/pkg/p2p/discover.go b/pkg/p2p/discover.go
index 98687273d48..417eeedda0a 100644
--- a/pkg/p2p/discover.go
+++ b/pkg/p2p/discover.go
@@ -6,9 +6,15 @@ package p2p
import (
"context"
+ "encoding/json"
"errors"
"fmt"
+ "io"
"math/rand"
+ "net"
+ "net/http"
+ "strings"
+ "time"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
@@ -21,12 +27,108 @@ func isDNSProtocol(protoCode int) bool {
return false
}
+// CustomDNSResolver implements madns.BasicResolver interface
+type CustomDNSResolver struct {
+ client *http.Client
+}
+
+func NewCustomDNSResolver() *CustomDNSResolver {
+ return &CustomDNSResolver{
+ client: &http.Client{
+ Timeout: 5 * time.Second,
+ },
+ }
+}
+
+type dnsResponse struct {
+ Answer []struct {
+ Type int `json:"type"`
+ Data string `json:"data"`
+ } `json:"Answer"`
+}
+
+func (r *CustomDNSResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) {
+ // Use Google's DNS-over-HTTPS API
+ url := fmt.Sprintf("https://dns.google/resolve?name=%s&type=A", host)
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("create request: %w", err)
+ }
+
+ resp, err := r.client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("dns query: %w", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("read response: %w", err)
+ }
+
+ var dnsResp dnsResponse
+ if err := json.Unmarshal(body, &dnsResp); err != nil {
+ return nil, fmt.Errorf("parse response: %w", err)
+ }
+
+ var addrs []net.IPAddr
+ for _, answer := range dnsResp.Answer {
+ if answer.Type == 1 { // A record
+ ip := net.ParseIP(answer.Data)
+ if ip != nil {
+ addrs = append(addrs, net.IPAddr{IP: ip})
+ }
+ }
+ }
+ return addrs, nil
+}
+
+func (r *CustomDNSResolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
+ // Use Google's DNS-over-HTTPS API
+ url := fmt.Sprintf("https://dns.google/resolve?name=%s&type=TXT", name)
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("create request: %w", err)
+ }
+
+ resp, err := r.client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("dns query: %w", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("read response: %w", err)
+ }
+
+ var dnsResp dnsResponse
+ if err := json.Unmarshal(body, &dnsResp); err != nil {
+ return nil, fmt.Errorf("parse response: %w", err)
+ }
+
+ var txts []string
+ for _, answer := range dnsResp.Answer {
+ if answer.Type == 16 { // TXT record
+ // Remove quotes from TXT record data
+ txt := strings.Trim(answer.Data, "\"")
+ txts = append(txts, txt)
+ }
+ }
+ return txts, nil
+}
+
func Discover(ctx context.Context, addr ma.Multiaddr, f func(ma.Multiaddr) (bool, error)) (bool, error) {
if comp, _ := ma.SplitFirst(addr); !isDNSProtocol(comp.Protocol().Code) {
return f(addr)
}
- dnsResolver := madns.DefaultResolver
+ // Create a custom DNS resolver using Google's DNS-over-HTTPS
+ customResolver := NewCustomDNSResolver()
+ dnsResolver, err := madns.NewResolver(madns.WithDefaultResolver(customResolver))
+ if err != nil {
+ return false, fmt.Errorf("create dns resolver: %w", err)
+ }
addrs, err := dnsResolver.Resolve(ctx, addr)
if err != nil {
return false, fmt.Errorf("dns resolve address %s: %w", addr, err)
diff --git a/pkg/p2p/libp2p/connections_test.go b/pkg/p2p/libp2p/connections_test.go
index c85b016d801..654f7bde063 100644
--- a/pkg/p2p/libp2p/connections_test.go
+++ b/pkg/p2p/libp2p/connections_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/headers_test.go b/pkg/p2p/libp2p/headers_test.go
index 5805dafa0d4..9528e3bd878 100644
--- a/pkg/p2p/libp2p/headers_test.go
+++ b/pkg/p2p/libp2p/headers_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/internal/handshake/handshake.go b/pkg/p2p/libp2p/internal/handshake/handshake.go
index a1d7cb2d587..2cb71299108 100644
--- a/pkg/p2p/libp2p/internal/handshake/handshake.go
+++ b/pkg/p2p/libp2p/internal/handshake/handshake.go
@@ -1,15 +1,12 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package handshake
import (
"context"
- "errors"
"fmt"
"sync/atomic"
- "time"
"github.com/ethersphere/bee/v2/pkg/bzz"
"github.com/ethersphere/bee/v2/pkg/crypto"
@@ -18,48 +15,10 @@ import (
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake/pb"
"github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
"github.com/ethersphere/bee/v2/pkg/swarm"
-
libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "handshake"
-
-const (
- // ProtocolName is the text of the name of the handshake protocol.
- ProtocolName = "handshake"
- // ProtocolVersion is the current handshake protocol version.
- ProtocolVersion = "13.0.0"
- // StreamName is the name of the stream used for handshake purposes.
- StreamName = "handshake"
- // MaxWelcomeMessageLength is maximum number of characters allowed in the welcome message.
- MaxWelcomeMessageLength = 140
- handshakeTimeout = 15 * time.Second
-)
-
-var (
- // ErrNetworkIDIncompatible is returned if response from the other peer does not have valid networkID.
- ErrNetworkIDIncompatible = errors.New("incompatible network ID")
-
- // ErrInvalidAck is returned if data in received in ack is not valid (invalid signature for example).
- ErrInvalidAck = errors.New("invalid ack")
-
- // ErrInvalidSyn is returned if observable address in ack is not a valid..
- ErrInvalidSyn = errors.New("invalid syn")
-
- // ErrWelcomeMessageLength is returned if the welcome message is longer than the maximum length
- ErrWelcomeMessageLength = fmt.Errorf("handshake welcome message longer than maximum of %d characters", MaxWelcomeMessageLength)
-
- // ErrPicker is returned if the picker (kademlia) rejects the peer
- ErrPicker = errors.New("picker rejection")
-)
-
-// AdvertisableAddressResolver can Resolve a Multiaddress.
-type AdvertisableAddressResolver interface {
- Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error)
-}
-
// Service can perform initiate or handle a handshake between peers.
type Service struct {
signer crypto.Signer
@@ -76,20 +35,6 @@ type Service struct {
picker p2p.Picker
}
-// Info contains the information received from the handshake.
-type Info struct {
- BzzAddress *bzz.Address
- FullNode bool
-}
-
-func (i *Info) LightString() string {
- if !i.FullNode {
- return " (light)"
- }
-
- return ""
-}
-
// New creates a new handshake Service.
func New(signer crypto.Signer, advertisableAddresser AdvertisableAddressResolver, overlay swarm.Address, networkID uint64, fullNode bool, nonce []byte, welcomeMessage string, validateOverlay bool, ownPeerID libp2ppeer.ID, logger log.Logger) (*Service, error) {
if len(welcomeMessage) > MaxWelcomeMessageLength {
@@ -113,107 +58,6 @@ func New(signer crypto.Signer, advertisableAddresser AdvertisableAddressResolver
return svc, nil
}
-func (s *Service) SetPicker(n p2p.Picker) {
- s.picker = n
-}
-
-// Handshake initiates a handshake with a peer.
-func (s *Service) Handshake(ctx context.Context, stream p2p.Stream, peerMultiaddr ma.Multiaddr, peerID libp2ppeer.ID) (i *Info, err error) {
- loggerV1 := s.logger.V(1).Register()
-
- ctx, cancel := context.WithTimeout(ctx, handshakeTimeout)
- defer cancel()
-
- w, r := protobuf.NewWriterAndReader(stream)
- fullRemoteMA, err := buildFullMA(peerMultiaddr, peerID)
- if err != nil {
- return nil, err
- }
-
- fullRemoteMABytes, err := fullRemoteMA.MarshalBinary()
- if err != nil {
- return nil, err
- }
-
- if err := w.WriteMsgWithContext(ctx, &pb.Syn{
- ObservedUnderlay: fullRemoteMABytes,
- }); err != nil {
- return nil, fmt.Errorf("write syn message: %w", err)
- }
-
- var resp pb.SynAck
- if err := r.ReadMsgWithContext(ctx, &resp); err != nil {
- return nil, fmt.Errorf("read synack message: %w", err)
- }
-
- observedUnderlay, err := ma.NewMultiaddrBytes(resp.Syn.ObservedUnderlay)
- if err != nil {
- return nil, ErrInvalidSyn
- }
-
- observedUnderlayAddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(observedUnderlay)
- if err != nil {
- return nil, fmt.Errorf("extract addr from P2P: %w", err)
- }
-
- if s.libp2pID != observedUnderlayAddrInfo.ID {
- // NOTE eventually we will return error here, but for now we want to gather some statistics
- s.logger.Warning("received peer ID does not match ours", "their", observedUnderlayAddrInfo.ID, "ours", s.libp2pID)
- }
-
- advertisableUnderlay, err := s.advertisableAddresser.Resolve(observedUnderlay)
- if err != nil {
- return nil, err
- }
-
- bzzAddress, err := bzz.NewAddress(s.signer, advertisableUnderlay, s.overlay, s.networkID, s.nonce)
- if err != nil {
- return nil, err
- }
-
- advertisableUnderlayBytes, err := bzzAddress.Underlay.MarshalBinary()
- if err != nil {
- return nil, err
- }
-
- if resp.Ack.NetworkID != s.networkID {
- return nil, ErrNetworkIDIncompatible
- }
-
- remoteBzzAddress, err := s.parseCheckAck(resp.Ack)
- if err != nil {
- return nil, err
- }
-
- // Synced read:
- welcomeMessage := s.GetWelcomeMessage()
- msg := &pb.Ack{
- Address: &pb.BzzAddress{
- Underlay: advertisableUnderlayBytes,
- Overlay: bzzAddress.Overlay.Bytes(),
- Signature: bzzAddress.Signature,
- },
- NetworkID: s.networkID,
- FullNode: s.fullNode,
- Nonce: s.nonce,
- WelcomeMessage: welcomeMessage,
- }
-
- if err := w.WriteMsgWithContext(ctx, msg); err != nil {
- return nil, fmt.Errorf("write ack message: %w", err)
- }
-
- loggerV1.Debug("handshake finished for peer (outbound)", "peer_address", remoteBzzAddress.Overlay)
- if len(resp.Ack.WelcomeMessage) > 0 {
- s.logger.Debug("greeting message from peer", "peer_address", remoteBzzAddress.Overlay, "message", resp.Ack.WelcomeMessage)
- }
-
- return &Info{
- BzzAddress: remoteBzzAddress,
- FullNode: resp.Ack.FullNode,
- }, nil
-}
-
// Handle handles an incoming handshake from a peer.
func (s *Service) Handle(ctx context.Context, stream p2p.Stream, remoteMultiaddr ma.Multiaddr, remotePeerID libp2ppeer.ID) (i *Info, err error) {
loggerV1 := s.logger.V(1).Register()
@@ -316,30 +160,3 @@ func (s *Service) Handle(ctx context.Context, stream p2p.Stream, remoteMultiaddr
FullNode: ack.FullNode,
}, nil
}
-
-// SetWelcomeMessage sets the new handshake welcome message.
-func (s *Service) SetWelcomeMessage(msg string) (err error) {
- if len(msg) > MaxWelcomeMessageLength {
- return ErrWelcomeMessageLength
- }
- s.welcomeMessage.Store(msg)
- return nil
-}
-
-// GetWelcomeMessage returns the current handshake welcome message.
-func (s *Service) GetWelcomeMessage() string {
- return s.welcomeMessage.Load().(string)
-}
-
-func buildFullMA(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
- return ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", addr.String(), peerID.String()))
-}
-
-func (s *Service) parseCheckAck(ack *pb.Ack) (*bzz.Address, error) {
- bzzAddress, err := bzz.ParseAddress(ack.Address.Underlay, ack.Address.Overlay, ack.Address.Signature, ack.Nonce, s.validateOverlay, s.networkID)
- if err != nil {
- return nil, ErrInvalidAck
- }
-
- return bzzAddress, nil
-}
diff --git a/pkg/p2p/libp2p/internal/handshake/handshake_js.go b/pkg/p2p/libp2p/internal/handshake/handshake_js.go
new file mode 100644
index 00000000000..c799b192127
--- /dev/null
+++ b/pkg/p2p/libp2p/internal/handshake/handshake_js.go
@@ -0,0 +1,153 @@
+//go:build js
+// +build js
+
+package handshake
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+
+ "github.com/ethersphere/bee/v2/pkg/bzz"
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake/pb"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// Service can perform initiate or handle a handshake between peers.
+type Service struct {
+ signer crypto.Signer
+ advertisableAddresser AdvertisableAddressResolver
+ overlay swarm.Address
+ fullNode bool
+ nonce []byte
+ networkID uint64
+ validateOverlay bool
+ welcomeMessage atomic.Value
+ logger log.Logger
+ libp2pID libp2ppeer.ID
+ picker p2p.Picker
+}
+
+// New creates a new handshake Service.
+func New(signer crypto.Signer, advertisableAddresser AdvertisableAddressResolver, overlay swarm.Address, networkID uint64, fullNode bool, nonce []byte, welcomeMessage string, validateOverlay bool, ownPeerID libp2ppeer.ID, logger log.Logger) (*Service, error) {
+ if len(welcomeMessage) > MaxWelcomeMessageLength {
+ return nil, ErrWelcomeMessageLength
+ }
+
+ svc := &Service{
+ signer: signer,
+ advertisableAddresser: advertisableAddresser,
+ overlay: overlay,
+ networkID: networkID,
+ fullNode: fullNode,
+ validateOverlay: validateOverlay,
+ nonce: nonce,
+ libp2pID: ownPeerID,
+ logger: logger.WithName(loggerName).Register(),
+ }
+ svc.welcomeMessage.Store(welcomeMessage)
+
+ return svc, nil
+}
+
+// Handle handles an incoming handshake from a peer.
+func (s *Service) Handle(ctx context.Context, stream p2p.Stream, remoteMultiaddr ma.Multiaddr, remotePeerID libp2ppeer.ID) (i *Info, err error) {
+ loggerV1 := s.logger.V(1).Register()
+
+ ctx, cancel := context.WithTimeout(ctx, handshakeTimeout)
+ defer cancel()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ fullRemoteMA, err := buildFullMA(remoteMultiaddr, remotePeerID)
+ if err != nil {
+ return nil, err
+ }
+
+ fullRemoteMABytes, err := fullRemoteMA.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+
+ var syn pb.Syn
+ if err := r.ReadMsgWithContext(ctx, &syn); err != nil {
+ return nil, fmt.Errorf("read syn message: %w", err)
+ }
+ observedUnderlay, err := ma.NewMultiaddrBytes(syn.ObservedUnderlay)
+ if err != nil {
+ return nil, ErrInvalidSyn
+ }
+
+ advertisableUnderlay, err := s.advertisableAddresser.Resolve(observedUnderlay)
+ if err != nil {
+ return nil, err
+ }
+
+ bzzAddress, err := bzz.NewAddress(s.signer, advertisableUnderlay, s.overlay, s.networkID, s.nonce)
+ if err != nil {
+ return nil, err
+ }
+
+ advertisableUnderlayBytes, err := bzzAddress.Underlay.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+
+ welcomeMessage := s.GetWelcomeMessage()
+
+ if err := w.WriteMsgWithContext(ctx, &pb.SynAck{
+ Syn: &pb.Syn{
+ ObservedUnderlay: fullRemoteMABytes,
+ },
+ Ack: &pb.Ack{
+ Address: &pb.BzzAddress{
+ Underlay: advertisableUnderlayBytes,
+ Overlay: bzzAddress.Overlay.Bytes(),
+ Signature: bzzAddress.Signature,
+ },
+ NetworkID: s.networkID,
+ FullNode: s.fullNode,
+ Nonce: s.nonce,
+ WelcomeMessage: welcomeMessage,
+ },
+ }); err != nil {
+ return nil, fmt.Errorf("write synack message: %w", err)
+ }
+
+ var ack pb.Ack
+ if err := r.ReadMsgWithContext(ctx, &ack); err != nil {
+ return nil, fmt.Errorf("read ack message: %w", err)
+ }
+
+ if ack.NetworkID != s.networkID {
+ return nil, ErrNetworkIDIncompatible
+ }
+
+ overlay := swarm.NewAddress(ack.Address.Overlay)
+
+ if s.picker != nil {
+ if !s.picker.Pick(p2p.Peer{Address: overlay, FullNode: ack.FullNode}) {
+ return nil, ErrPicker
+ }
+ }
+
+ remoteBzzAddress, err := s.parseCheckAck(&ack)
+ if err != nil {
+ return nil, err
+ }
+
+ loggerV1.Debug("handshake finished for peer (inbound)", "peer_address", remoteBzzAddress.Overlay)
+ if len(ack.WelcomeMessage) > 0 {
+ loggerV1.Debug("greeting message from peer", "peer_address", remoteBzzAddress.Overlay, "message", ack.WelcomeMessage)
+ }
+
+ return &Info{
+ BzzAddress: remoteBzzAddress,
+ FullNode: ack.FullNode,
+ }, nil
+}
diff --git a/pkg/p2p/libp2p/internal/handshake/handshake_shared.go b/pkg/p2p/libp2p/internal/handshake/handshake_shared.go
new file mode 100644
index 00000000000..ffd2f12aa45
--- /dev/null
+++ b/pkg/p2p/libp2p/internal/handshake/handshake_shared.go
@@ -0,0 +1,199 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handshake
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/bzz"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake/pb"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+
+ libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "handshake"
+
+const (
+ // ProtocolName is the text of the name of the handshake protocol.
+ ProtocolName = "handshake"
+ // ProtocolVersion is the current handshake protocol version.
+ ProtocolVersion = "14.0.0"
+ // StreamName is the name of the stream used for handshake purposes.
+ StreamName = "handshake"
+ // MaxWelcomeMessageLength is maximum number of characters allowed in the welcome message.
+ MaxWelcomeMessageLength = 140
+ handshakeTimeout = 15 * time.Second
+)
+
+var (
+ // ErrNetworkIDIncompatible is returned if response from the other peer does not have valid networkID.
+ ErrNetworkIDIncompatible = errors.New("incompatible network ID")
+
+ // ErrInvalidAck is returned if data in received in ack is not valid (invalid signature for example).
+ ErrInvalidAck = errors.New("invalid ack")
+
+ // ErrInvalidSyn is returned if observable address in ack is not a valid..
+ ErrInvalidSyn = errors.New("invalid syn")
+
+ // ErrWelcomeMessageLength is returned if the welcome message is longer than the maximum length
+ ErrWelcomeMessageLength = fmt.Errorf("handshake welcome message longer than maximum of %d characters", MaxWelcomeMessageLength)
+
+ // ErrPicker is returned if the picker (kademlia) rejects the peer
+ ErrPicker = errors.New("picker rejection")
+)
+
+// AdvertisableAddressResolver can Resolve a Multiaddress.
+type AdvertisableAddressResolver interface {
+ Resolve(observedAddress ma.Multiaddr) (ma.Multiaddr, error)
+}
+
+// Info contains the information received from the handshake.
+type Info struct {
+ BzzAddress *bzz.Address
+ FullNode bool
+}
+
+func (i *Info) LightString() string {
+ if !i.FullNode {
+ return " (light)"
+ }
+
+ return ""
+}
+
+func (s *Service) SetPicker(n p2p.Picker) {
+ s.picker = n
+}
+
+// Handshake initiates a handshake with a peer.
+func (s *Service) Handshake(ctx context.Context, stream p2p.Stream, peerMultiaddr ma.Multiaddr, peerID libp2ppeer.ID) (i *Info, err error) {
+ loggerV1 := s.logger.V(1).Register()
+
+ ctx, cancel := context.WithTimeout(ctx, handshakeTimeout)
+ defer cancel()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ fullRemoteMA, err := buildFullMA(peerMultiaddr, peerID)
+ if err != nil {
+ return nil, err
+ }
+
+ fullRemoteMABytes, err := fullRemoteMA.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := w.WriteMsgWithContext(ctx, &pb.Syn{
+ ObservedUnderlay: fullRemoteMABytes,
+ }); err != nil {
+ return nil, fmt.Errorf("write syn message: %w", err)
+ }
+
+ var resp pb.SynAck
+ if err := r.ReadMsgWithContext(ctx, &resp); err != nil {
+ return nil, fmt.Errorf("read synack message: %w", err)
+ }
+
+ observedUnderlay, err := ma.NewMultiaddrBytes(resp.Syn.ObservedUnderlay)
+ if err != nil {
+ return nil, ErrInvalidSyn
+ }
+
+ observedUnderlayAddrInfo, err := libp2ppeer.AddrInfoFromP2pAddr(observedUnderlay)
+ if err != nil {
+ return nil, fmt.Errorf("extract addr from P2P: %w", err)
+ }
+
+ if s.libp2pID != observedUnderlayAddrInfo.ID {
+ // NOTE eventually we will return error here, but for now we want to gather some statistics
+ s.logger.Warning("received peer ID does not match ours", "their", observedUnderlayAddrInfo.ID, "ours", s.libp2pID)
+ }
+
+ advertisableUnderlay, err := s.advertisableAddresser.Resolve(observedUnderlay)
+ if err != nil {
+ return nil, err
+ }
+
+ bzzAddress, err := bzz.NewAddress(s.signer, advertisableUnderlay, s.overlay, s.networkID, s.nonce)
+ if err != nil {
+ return nil, err
+ }
+
+ advertisableUnderlayBytes, err := bzzAddress.Underlay.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.Ack.NetworkID != s.networkID {
+ return nil, ErrNetworkIDIncompatible
+ }
+
+ remoteBzzAddress, err := s.parseCheckAck(resp.Ack)
+ if err != nil {
+ return nil, err
+ }
+
+ // Synced read:
+ welcomeMessage := s.GetWelcomeMessage()
+ msg := &pb.Ack{
+ Address: &pb.BzzAddress{
+ Underlay: advertisableUnderlayBytes,
+ Overlay: bzzAddress.Overlay.Bytes(),
+ Signature: bzzAddress.Signature,
+ },
+ NetworkID: s.networkID,
+ FullNode: s.fullNode,
+ Nonce: s.nonce,
+ WelcomeMessage: welcomeMessage,
+ }
+
+ if err := w.WriteMsgWithContext(ctx, msg); err != nil {
+ return nil, fmt.Errorf("write ack message: %w", err)
+ }
+
+ loggerV1.Debug("handshake finished for peer (outbound)", "peer_address", remoteBzzAddress.Overlay)
+ if len(resp.Ack.WelcomeMessage) > 0 {
+ s.logger.Debug("greeting message from peer", "peer_address", remoteBzzAddress.Overlay, "message", resp.Ack.WelcomeMessage)
+ }
+
+ return &Info{
+ BzzAddress: remoteBzzAddress,
+ FullNode: resp.Ack.FullNode,
+ }, nil
+}
+
+// SetWelcomeMessage sets the new handshake welcome message.
+func (s *Service) SetWelcomeMessage(msg string) (err error) {
+ if len(msg) > MaxWelcomeMessageLength {
+ return ErrWelcomeMessageLength
+ }
+ s.welcomeMessage.Store(msg)
+ return nil
+}
+
+// GetWelcomeMessage returns the current handshake welcome message.
+func (s *Service) GetWelcomeMessage() string {
+ return s.welcomeMessage.Load().(string)
+}
+
+func buildFullMA(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
+ return ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", addr.String(), peerID.String()))
+}
+
+func (s *Service) parseCheckAck(ack *pb.Ack) (*bzz.Address, error) {
+ bzzAddress, err := bzz.ParseAddress(ack.Address.Underlay, ack.Address.Overlay, ack.Address.Signature, ack.Nonce, s.validateOverlay, s.networkID)
+ if err != nil {
+ return nil, ErrInvalidAck
+ }
+
+ return bzzAddress, nil
+}
diff --git a/pkg/p2p/libp2p/internal/handshake/metrics.go b/pkg/p2p/libp2p/internal/handshake/metrics.go
index c8177d3f93e..fe9691f21a1 100644
--- a/pkg/p2p/libp2p/internal/handshake/metrics.go
+++ b/pkg/p2p/libp2p/internal/handshake/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/internal/reacher/metrics.go b/pkg/p2p/libp2p/internal/reacher/metrics.go
index a6019ba4a4e..3f621b0eb91 100644
--- a/pkg/p2p/libp2p/internal/reacher/metrics.go
+++ b/pkg/p2p/libp2p/internal/reacher/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/internal/reacher/reacher.go b/pkg/p2p/libp2p/internal/reacher/reacher.go
index 18b1431229f..a11fa8324a5 100644
--- a/pkg/p2p/libp2p/internal/reacher/reacher.go
+++ b/pkg/p2p/libp2p/internal/reacher/reacher.go
@@ -1,9 +1,6 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package reacher runs a background worker that will ping peers
-// from an internal queue and report back the reachability to the notifier.
package reacher
import (
@@ -12,22 +9,8 @@ import (
"time"
"github.com/ethersphere/bee/v2/pkg/p2p"
- "github.com/ethersphere/bee/v2/pkg/swarm"
- ma "github.com/multiformats/go-multiaddr"
)
-const (
- pingTimeout = time.Second * 15
- workers = 16
- retryAfterDuration = time.Minute * 5
-)
-
-type peer struct {
- overlay swarm.Address
- addr ma.Multiaddr
- retryAfter time.Time
-}
-
type reacher struct {
mu sync.Mutex
peers map[string]*peer
@@ -44,12 +27,6 @@ type reacher struct {
options *Options
}
-type Options struct {
- PingTimeout time.Duration
- Workers int
- RetryAfterDuration time.Duration
-}
-
func New(streamer p2p.Pinger, notifier p2p.ReachableNotifier, o *Options) *reacher {
r := &reacher{
@@ -76,59 +53,6 @@ func New(streamer p2p.Pinger, notifier p2p.ReachableNotifier, o *Options) *reach
return r
}
-func (r *reacher) manage() {
-
- defer r.wg.Done()
-
- c := make(chan *peer)
- defer close(c)
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- r.wg.Add(r.options.Workers)
- for i := 0; i < r.options.Workers; i++ {
- go r.ping(c, ctx)
- }
-
- for {
-
- p, tryAfter := r.tryAcquirePeer()
-
- // if no peer is returned,
- // wait until either more work or the closest retry-after time.
-
- // wait for work and tryAfter
- if tryAfter > 0 {
- select {
- case <-r.quit:
- return
- case <-r.newPeer:
- continue
- case <-time.After(tryAfter):
- continue
- }
- }
-
- // wait for work
- if p == nil {
- select {
- case <-r.quit:
- return
- case <-r.newPeer:
- continue
- }
- }
-
- // ping peer
- select {
- case <-r.quit:
- return
- case c <- p:
- }
- }
-}
-
func (r *reacher) ping(c chan *peer, ctx context.Context) {
defer r.wg.Done()
@@ -153,70 +77,3 @@ func (r *reacher) ping(c chan *peer, ctx context.Context) {
}
}
}
-
-func (r *reacher) tryAcquirePeer() (*peer, time.Duration) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- var (
- now = time.Now()
- nextClosest time.Time
- )
-
- for _, p := range r.peers {
-
- // retry after has expired, retry
- if now.After(p.retryAfter) {
- p.retryAfter = time.Now().Add(r.options.RetryAfterDuration)
- return p, 0
- }
-
- // here, we find the peer with the earliest retry after
- if nextClosest.IsZero() || p.retryAfter.Before(nextClosest) {
- nextClosest = p.retryAfter
- }
- }
-
- if nextClosest.IsZero() {
- return nil, 0
- }
-
- // return the time to wait until the closest retry after
- return nil, time.Until(nextClosest)
-}
-
-// Connected adds a new peer to the queue for testing reachability.
-func (r *reacher) Connected(overlay swarm.Address, addr ma.Multiaddr) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if _, ok := r.peers[overlay.ByteString()]; !ok {
- r.peers[overlay.ByteString()] = &peer{overlay: overlay, addr: addr}
- }
-
- select {
- case r.newPeer <- struct{}{}:
- default:
- }
-}
-
-// Disconnected removes a peer from the queue.
-func (r *reacher) Disconnected(overlay swarm.Address) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- delete(r.peers, overlay.ByteString())
-}
-
-// Close stops the worker. Must be called once.
-func (r *reacher) Close() error {
- select {
- case <-r.quit:
- return nil
- default:
- }
-
- close(r.quit)
- r.wg.Wait()
- return nil
-}
diff --git a/pkg/p2p/libp2p/internal/reacher/reacher_js.go b/pkg/p2p/libp2p/internal/reacher/reacher_js.go
new file mode 100644
index 00000000000..07c8b61a935
--- /dev/null
+++ b/pkg/p2p/libp2p/internal/reacher/reacher_js.go
@@ -0,0 +1,72 @@
+//go:build js
+// +build js
+
+package reacher
+
+import (
+ "context"
+ "sync"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+)
+
+type reacher struct {
+ mu sync.Mutex
+ peers map[string]*peer
+
+ newPeer chan struct{}
+ quit chan struct{}
+
+ pinger p2p.Pinger
+ notifier p2p.ReachableNotifier
+
+ wg sync.WaitGroup
+
+ options *Options
+}
+
+func New(streamer p2p.Pinger, notifier p2p.ReachableNotifier, o *Options) *reacher {
+
+ r := &reacher{
+ newPeer: make(chan struct{}, 1),
+ quit: make(chan struct{}),
+ pinger: streamer,
+ peers: make(map[string]*peer),
+ notifier: notifier,
+ }
+
+ if o == nil {
+ o = &Options{
+ PingTimeout: pingTimeout,
+ Workers: workers,
+ RetryAfterDuration: retryAfterDuration,
+ }
+ }
+ r.options = o
+
+ r.wg.Add(1)
+ go r.manage()
+
+ return r
+}
+
+func (r *reacher) ping(c chan *peer, ctx context.Context) {
+
+ defer r.wg.Done()
+
+ for p := range c {
+
+ ctxt, cancel := context.WithTimeout(ctx, r.options.PingTimeout)
+ _, err := r.pinger.Ping(ctxt, p.addr)
+ cancel()
+
+ // ping was successful
+ if err == nil {
+
+ r.notifier.Reachable(p.overlay, p2p.ReachabilityStatusPublic)
+ } else {
+
+ r.notifier.Reachable(p.overlay, p2p.ReachabilityStatusPrivate)
+ }
+ }
+}
diff --git a/pkg/p2p/libp2p/internal/reacher/reacher_shared.go b/pkg/p2p/libp2p/internal/reacher/reacher_shared.go
new file mode 100644
index 00000000000..09cf3ac8cfb
--- /dev/null
+++ b/pkg/p2p/libp2p/internal/reacher/reacher_shared.go
@@ -0,0 +1,153 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reacher runs a background worker that will ping peers
+// from an internal queue and report back the reachability to the notifier.
+package reacher
+
+import (
+ "context"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+const (
+ pingTimeout = time.Second * 15
+ workers = 16
+ retryAfterDuration = time.Minute * 5
+)
+
+type peer struct {
+ overlay swarm.Address
+ addr ma.Multiaddr
+ retryAfter time.Time
+}
+
+type Options struct {
+ PingTimeout time.Duration
+ Workers int
+ RetryAfterDuration time.Duration
+}
+
+func (r *reacher) manage() {
+
+ defer r.wg.Done()
+
+ c := make(chan *peer)
+ defer close(c)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ r.wg.Add(r.options.Workers)
+ for i := 0; i < r.options.Workers; i++ {
+ go r.ping(c, ctx)
+ }
+
+ for {
+
+ p, tryAfter := r.tryAcquirePeer()
+
+ // if no peer is returned,
+ // wait until either more work or the closest retry-after time.
+
+ // wait for work and tryAfter
+ if tryAfter > 0 {
+ select {
+ case <-r.quit:
+ return
+ case <-r.newPeer:
+ continue
+ case <-time.After(tryAfter):
+ continue
+ }
+ }
+
+ // wait for work
+ if p == nil {
+ select {
+ case <-r.quit:
+ return
+ case <-r.newPeer:
+ continue
+ }
+ }
+
+ // ping peer
+ select {
+ case <-r.quit:
+ return
+ case c <- p:
+ }
+ }
+}
+
+func (r *reacher) tryAcquirePeer() (*peer, time.Duration) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ var (
+ now = time.Now()
+ nextClosest time.Time
+ )
+
+ for _, p := range r.peers {
+
+ // retry after has expired, retry
+ if now.After(p.retryAfter) {
+ p.retryAfter = time.Now().Add(r.options.RetryAfterDuration)
+ return p, 0
+ }
+
+ // here, we find the peer with the earliest retry after
+ if nextClosest.IsZero() || p.retryAfter.Before(nextClosest) {
+ nextClosest = p.retryAfter
+ }
+ }
+
+ if nextClosest.IsZero() {
+ return nil, 0
+ }
+
+ // return the time to wait until the closest retry after
+ return nil, time.Until(nextClosest)
+}
+
+// Connected adds a new peer to the queue for testing reachability.
+func (r *reacher) Connected(overlay swarm.Address, addr ma.Multiaddr) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if _, ok := r.peers[overlay.ByteString()]; !ok {
+ r.peers[overlay.ByteString()] = &peer{overlay: overlay, addr: addr}
+ }
+
+ select {
+ case r.newPeer <- struct{}{}:
+ default:
+ }
+}
+
+// Disconnected removes a peer from the queue.
+func (r *reacher) Disconnected(overlay swarm.Address) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ delete(r.peers, overlay.ByteString())
+}
+
+// Close stops the worker. Must be called once.
+func (r *reacher) Close() error {
+ select {
+ case <-r.quit:
+ return nil
+ default:
+ }
+
+ close(r.quit)
+ r.wg.Wait()
+ return nil
+}
diff --git a/pkg/p2p/libp2p/libp2p.go b/pkg/p2p/libp2p/libp2p.go
index 4d4c3773f92..e5521e3d57f 100644
--- a/pkg/p2p/libp2p/libp2p.go
+++ b/pkg/p2p/libp2p/libp2p.go
@@ -1,6 +1,5 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package libp2p
@@ -10,31 +9,26 @@ import (
"errors"
"fmt"
"net"
- "os"
- "runtime"
"strconv"
- "strings"
"sync"
"time"
- "github.com/ethersphere/bee/v2"
+ ocprom "contrib.go.opencensus.io/exporter/prometheus"
"github.com/ethersphere/bee/v2/pkg/addressbook"
"github.com/ethersphere/bee/v2/pkg/bzz"
beecrypto "github.com/ethersphere/bee/v2/pkg/crypto"
"github.com/ethersphere/bee/v2/pkg/log"
+ m2 "github.com/ethersphere/bee/v2/pkg/metrics"
"github.com/ethersphere/bee/v2/pkg/p2p"
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/blocklist"
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/breaker"
"github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake"
- "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/reacher"
"github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/swarm"
- "github.com/ethersphere/bee/v2/pkg/topology"
"github.com/ethersphere/bee/v2/pkg/topology/lightnode"
"github.com/ethersphere/bee/v2/pkg/tracing"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
- "github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
@@ -44,41 +38,14 @@ import (
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
- lp2pswarm "github.com/libp2p/go-libp2p/p2p/net/swarm"
- libp2pping "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
-
ma "github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multistream"
- "go.uber.org/atomic"
-
- ocprom "contrib.go.opencensus.io/exporter/prometheus"
- m2 "github.com/ethersphere/bee/v2/pkg/metrics"
- rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
"github.com/prometheus/client_golang/prometheus"
-)
-
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "libp2p"
-
-var (
- _ p2p.Service = (*Service)(nil)
- _ p2p.DebugService = (*Service)(nil)
-
- // reachabilityOverridePublic overrides autonat to simply report
- // public reachability status, it is set in the makefile.
- reachabilityOverridePublic = "false"
-)
-
-const (
- defaultLightNodeLimit = 100
- peerUserAgentTimeout = time.Second
-
- defaultHeadersRWTimeout = 10 * time.Second
-
- IncomingStreamCountLimit = 5_000
- OutgoingStreamCountLimit = 10_000
+ "go.uber.org/atomic"
)
type Service struct {
@@ -111,14 +78,6 @@ type Service struct {
autoNAT autonat.AutoNAT
}
-type lightnodes interface {
- Connected(context.Context, p2p.Peer)
- Disconnected(p2p.Peer)
- Count() int
- RandomPeer(swarm.Address) (swarm.Address, error)
- EachPeer(pf topology.EachPeerFunc) error
-}
-
type Options struct {
PrivateKey *ecdsa.PrivateKey
NATAddr string
@@ -154,13 +113,13 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
}
var listenAddrs []string
+
if ip4Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", ip4Addr, port))
if o.EnableWS {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", ip4Addr, port))
}
}
-
if ip6Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", ip6Addr, port))
if o.EnableWS {
@@ -168,7 +127,8 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
}
}
- security := libp2p.DefaultSecurity
+ var security = libp2p.DefaultSecurity
+
libp2pPeerstore, err := pstoremem.NewPeerstore()
if err != nil {
return nil, err
@@ -214,12 +174,14 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
var natManager basichost.NATManager
opts := []libp2p.Option{
+ libp2p.ShareTCPListener(),
libp2p.ListenAddrStrings(listenAddrs...),
security,
// Use dedicated peerstore instead the global DefaultPeerstore
libp2p.Peerstore(libp2pPeerstore),
libp2p.UserAgent(userAgent()),
libp2p.ResourceManager(rm),
+ libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport),
}
if o.NATAddr == "" {
@@ -241,9 +203,9 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
)
}
- transports := []libp2p.Option{
- libp2p.Transport(tcp.NewTCPTransport, tcp.DisableReuseport()),
- }
+ transports := []libp2p.Option{}
+
+ transports = append(transports, libp2p.Transport(tcp.NewTCPTransport))
if o.EnableWS {
transports = append(transports, libp2p.Transport(ws.New))
@@ -257,6 +219,7 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
}
h, err := o.hostFactory(opts...)
+
if err != nil {
return nil, err
}
@@ -367,31 +330,109 @@ func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay
return s, nil
}
-func (s *Service) reachabilityWorker() error {
- sub, err := s.host.EventBus().Subscribe([]interface{}{new(event.EvtLocalReachabilityChanged)})
- if err != nil {
- return fmt.Errorf("failed subscribing to reachability event %w", err)
+func newConnMetricNotify(m metrics) *connectionNotifier {
+ return &connectionNotifier{
+ metrics: m,
+ Notifiee: new(network.NoopNotifiee),
}
+}
+
+type connectionNotifier struct {
+ metrics metrics
+ network.Notifiee
+}
+
+func (c *connectionNotifier) Connected(_ network.Network, _ network.Conn) {
+ c.metrics.HandledConnectionCount.Inc()
+}
+
+func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
+ for _, ss := range p.StreamSpecs {
+ id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name))
+ matcher, err := s.protocolSemverMatcher(id)
+ if err != nil {
+ return fmt.Errorf("protocol version match %s: %w", id, err)
+ }
- go func() {
- defer sub.Close()
- for {
- select {
- case <-s.ctx.Done():
+ s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) {
+ peerID := streamlibp2p.Conn().RemotePeer()
+ overlay, found := s.peers.overlay(peerID)
+ if !found {
+ _ = streamlibp2p.Reset()
+ s.logger.Debug("overlay address for peer not found", "peer_id", peerID)
return
- case e := <-sub.Out():
- if r, ok := e.(event.EvtLocalReachabilityChanged); ok {
- select {
- case <-s.ready:
- case <-s.halt:
- return
+ }
+ full, found := s.peers.fullnode(peerID)
+ if !found {
+ _ = streamlibp2p.Reset()
+ s.logger.Debug("fullnode info for peer not found", "peer_id", peerID)
+ return
+ }
+
+ stream := newStream(streamlibp2p, s.metrics)
+
+ // exchange headers
+ headersStartTime := time.Now()
+ ctx, cancel := context.WithTimeout(s.ctx, s.HeadersRWTimeout)
+ defer cancel()
+ if err := handleHeaders(ctx, ss.Headler, stream, overlay); err != nil {
+ s.logger.Debug("handle protocol: handle headers failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
+ _ = stream.Reset()
+ return
+ }
+ s.metrics.HeadersExchangeDuration.Observe(time.Since(headersStartTime).Seconds())
+
+ ctx, cancel = context.WithCancel(s.ctx)
+
+ s.peers.addStream(peerID, streamlibp2p, cancel)
+ defer s.peers.removeStream(peerID, streamlibp2p)
+
+ // tracing: get span tracing context and add it to the context
+ // silently ignore if the peer is not providing tracing
+ ctx, err := s.tracer.WithContextFromHeaders(ctx, stream.Headers())
+ if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
+ s.logger.Debug("handle protocol: get tracing context failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
+ _ = stream.Reset()
+ return
+ }
+
+ logger := tracing.NewLoggerWithTraceID(ctx, s.logger)
+ loggerV1 := logger.V(1).Build()
+
+ s.metrics.HandledStreamCount.Inc()
+ if err := ss.Handler(ctx, p2p.Peer{Address: overlay, FullNode: full}, stream); err != nil {
+ var de *p2p.DisconnectError
+ if errors.As(err, &de) {
+ loggerV1.Debug("libp2p handler: disconnecting due to disconnect error", "protocol", p.Name, "address", overlay)
+ _ = stream.Reset()
+ _ = s.Disconnect(overlay, de.Error())
+ }
+
+ var bpe *p2p.BlockPeerError
+ if errors.As(err, &bpe) {
+ _ = stream.Reset()
+ if err := s.Blocklist(overlay, bpe.Duration(), bpe.Error()); err != nil {
+ logger.Debug("blocklist: could not blocklist peer", "peer_id", peerID, "error", err)
+ logger.Error(nil, "unable to blocklist peer", "peer_id", peerID)
}
- s.logger.Debug("reachability changed", "new_reachability", r.Reachability.String())
- s.notifier.UpdateReachability(p2p.ReachabilityStatus(r.Reachability))
+ loggerV1.Debug("handler: peer blocklisted", "protocol", p.Name, "peer_address", overlay)
+ }
+ // count unexpected requests
+ if errors.Is(err, p2p.ErrUnexpected) {
+ s.metrics.UnexpectedProtocolReqCount.Inc()
}
+ if errors.Is(err, network.ErrReset) {
+ s.metrics.StreamHandlerErrResetCount.Inc()
+ }
+ logger.Debug("handle protocol failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
+ return
}
- }
- }()
+ })
+ }
+
+ s.protocolsmu.Lock()
+ s.protocols = append(s.protocols, p)
+ s.protocolsmu.Unlock()
return nil
}
@@ -548,126 +589,6 @@ func (s *Service) handleIncoming(stream network.Stream) {
s.logger.Debug("stream handler: successfully connected to peer (inbound)", "address", i.BzzAddress.Overlay, "light", i.LightString(), "user_agent", peerUserAgent)
}
-func (s *Service) SetPickyNotifier(n p2p.PickyNotifier) {
- s.handshakeService.SetPicker(n)
- s.notifier = n
- s.reacher = reacher.New(s, n, nil)
-}
-
-func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
- for _, ss := range p.StreamSpecs {
- id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name))
- matcher, err := s.protocolSemverMatcher(id)
- if err != nil {
- return fmt.Errorf("protocol version match %s: %w", id, err)
- }
-
- s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) {
- peerID := streamlibp2p.Conn().RemotePeer()
- overlay, found := s.peers.overlay(peerID)
- if !found {
- _ = streamlibp2p.Reset()
- s.logger.Debug("overlay address for peer not found", "peer_id", peerID)
- return
- }
- full, found := s.peers.fullnode(peerID)
- if !found {
- _ = streamlibp2p.Reset()
- s.logger.Debug("fullnode info for peer not found", "peer_id", peerID)
- return
- }
-
- stream := newStream(streamlibp2p, s.metrics)
-
- // exchange headers
- headersStartTime := time.Now()
- ctx, cancel := context.WithTimeout(s.ctx, s.HeadersRWTimeout)
- defer cancel()
- if err := handleHeaders(ctx, ss.Headler, stream, overlay); err != nil {
- s.logger.Debug("handle protocol: handle headers failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
- _ = stream.Reset()
- return
- }
- s.metrics.HeadersExchangeDuration.Observe(time.Since(headersStartTime).Seconds())
-
- ctx, cancel = context.WithCancel(s.ctx)
-
- s.peers.addStream(peerID, streamlibp2p, cancel)
- defer s.peers.removeStream(peerID, streamlibp2p)
-
- // tracing: get span tracing context and add it to the context
- // silently ignore if the peer is not providing tracing
- ctx, err := s.tracer.WithContextFromHeaders(ctx, stream.Headers())
- if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
- s.logger.Debug("handle protocol: get tracing context failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
- _ = stream.Reset()
- return
- }
-
- logger := tracing.NewLoggerWithTraceID(ctx, s.logger)
- loggerV1 := logger.V(1).Build()
-
- s.metrics.HandledStreamCount.Inc()
- if err := ss.Handler(ctx, p2p.Peer{Address: overlay, FullNode: full}, stream); err != nil {
- var de *p2p.DisconnectError
- if errors.As(err, &de) {
- loggerV1.Debug("libp2p handler: disconnecting due to disconnect error", "protocol", p.Name, "address", overlay)
- _ = stream.Reset()
- _ = s.Disconnect(overlay, de.Error())
- }
-
- var bpe *p2p.BlockPeerError
- if errors.As(err, &bpe) {
- _ = stream.Reset()
- if err := s.Blocklist(overlay, bpe.Duration(), bpe.Error()); err != nil {
- logger.Debug("blocklist: could not blocklist peer", "peer_id", peerID, "error", err)
- logger.Error(nil, "unable to blocklist peer", "peer_id", peerID)
- }
- loggerV1.Debug("handler: peer blocklisted", "protocol", p.Name, "peer_address", overlay)
- }
- // count unexpected requests
- if errors.Is(err, p2p.ErrUnexpected) {
- s.metrics.UnexpectedProtocolReqCount.Inc()
- }
- if errors.Is(err, network.ErrReset) {
- s.metrics.StreamHandlerErrResetCount.Inc()
- }
- logger.Debug("handle protocol failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
- return
- }
- })
- }
-
- s.protocolsmu.Lock()
- s.protocols = append(s.protocols, p)
- s.protocolsmu.Unlock()
- return nil
-}
-
-func (s *Service) Addresses() (addresses []ma.Multiaddr, err error) {
- for _, addr := range s.host.Addrs() {
- a, err := buildUnderlayAddress(addr, s.host.ID())
- if err != nil {
- return nil, err
- }
-
- addresses = append(addresses, a)
- }
- if s.natAddrResolver != nil && len(addresses) > 0 {
- a, err := s.natAddrResolver.Resolve(addresses[0])
- if err != nil {
- return nil, err
- }
- addresses = append(addresses, a)
- }
-
- return addresses, nil
-}
-
-func (s *Service) NATManager() basichost.NATManager {
- return s.natManager
-}
-
func (s *Service) Blocklist(overlay swarm.Address, duration time.Duration, reason string) error {
loggerV1 := s.logger.V(1).Register()
@@ -694,20 +615,6 @@ func (s *Service) Blocklist(overlay swarm.Address, duration time.Duration, reaso
return nil
}
-func buildHostAddress(peerID libp2ppeer.ID) (ma.Multiaddr, error) {
- return ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peerID.String()))
-}
-
-func buildUnderlayAddress(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
- // Build host multiaddress
- hostAddr, err := buildHostAddress(peerID)
- if err != nil {
- return nil, err
- }
-
- return addr.Encapsulate(hostAddr), nil
-}
-
func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error) {
loggerV1 := s.logger.V(1).Register()
@@ -875,51 +782,6 @@ func (s *Service) Disconnect(overlay swarm.Address, reason string) (err error) {
return nil
}
-// disconnected is a registered peer registry event
-func (s *Service) disconnected(address swarm.Address) {
- peer := p2p.Peer{Address: address}
- peerID, found := s.peers.peerID(address)
- if found {
- // peerID might not always be found on shutdown
- full, found := s.peers.fullnode(peerID)
- if found {
- peer.FullNode = full
- }
- }
- s.protocolsmu.RLock()
- for _, tn := range s.protocols {
- if tn.DisconnectIn != nil {
- if err := tn.DisconnectIn(peer); err != nil {
- s.logger.Debug("disconnectIn failed", tn.Name, "version", tn.Version, "peer", address, "error", err)
- }
- }
- }
-
- s.protocolsmu.RUnlock()
-
- if s.notifier != nil {
- s.notifier.Disconnected(peer)
- }
- if s.lightNodes != nil {
- s.lightNodes.Disconnected(peer)
- }
- if s.reacher != nil {
- s.reacher.Disconnected(address)
- }
-}
-
-func (s *Service) Peers() []p2p.Peer {
- return s.peers.peers()
-}
-
-func (s *Service) Blocklisted(overlay swarm.Address) (bool, error) {
- return s.blocklist.Exists(overlay)
-}
-
-func (s *Service) BlocklistedPeers() ([]p2p.BlockListedPeer, error) {
- return s.blocklist.Peers()
-}
-
func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) {
select {
case <-ctx.Done():
@@ -944,6 +806,7 @@ func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers
headers = make(p2p.Headers)
}
if err := s.tracer.AddContextHeader(ctx, headers); err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
+
_ = stream.Reset()
return nil, fmt.Errorf("new stream add context header fail: %w", err)
}
@@ -955,7 +818,6 @@ func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers
_ = stream.Reset()
return nil, fmt.Errorf("send headers: %w", err)
}
-
return stream, nil
}
@@ -967,6 +829,7 @@ func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID,
s.logger.Debug("stream experienced unexpected early close")
_ = st.Close()
}
+
var errNotSupported multistream.ErrNotSupported[protocol.ID]
if errors.As(err, &errNotSupported) {
return nil, p2p.NewIncompatibleStreamError(err)
@@ -1008,173 +871,3 @@ func (s *Service) Close() error {
return s.host.Close()
}
-
-// SetWelcomeMessage sets the welcome message for the handshake protocol.
-func (s *Service) SetWelcomeMessage(val string) error {
- return s.handshakeService.SetWelcomeMessage(val)
-}
-
-// GetWelcomeMessage returns the value of the welcome message.
-func (s *Service) GetWelcomeMessage() string {
- return s.handshakeService.GetWelcomeMessage()
-}
-
-func (s *Service) Ready() error {
- if err := s.reachabilityWorker(); err != nil {
- return fmt.Errorf("reachability worker: %w", err)
- }
-
- close(s.ready)
- return nil
-}
-
-func (s *Service) Halt() {
- close(s.halt)
-}
-
-func (s *Service) Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error) {
- info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
- if err != nil {
- return rtt, fmt.Errorf("unable to parse underlay address: %w", err)
- }
-
- // Add the address to libp2p peerstore for it to be dialable
- s.pingDialer.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL)
-
- // Cleanup connection after ping is done
- defer func() {
- _ = s.pingDialer.Network().ClosePeer(info.ID)
- }()
-
- select {
- case <-ctx.Done():
- return rtt, ctx.Err()
- case res := <-libp2pping.Ping(ctx, s.pingDialer, info.ID):
- return res.RTT, res.Error
- }
-}
-
-// peerUserAgent returns User Agent string of the connected peer if the peer
-// provides it. It ignores the default libp2p user agent string
-// "github.com/libp2p/go-libp2p" and returns empty string in that case.
-func (s *Service) peerUserAgent(ctx context.Context, peerID libp2ppeer.ID) string {
- ctx, cancel := context.WithTimeout(ctx, peerUserAgentTimeout)
- defer cancel()
- var (
- v interface{}
- err error
- )
- // Peerstore may not contain all keys and values right after the connections is created.
- // This retry mechanism ensures more reliable user agent propagation.
- for iterate := true; iterate; {
- v, err = s.host.Peerstore().Get(peerID, "AgentVersion")
- if err == nil {
- break
- }
- select {
- case <-ctx.Done():
- iterate = false
- case <-time.After(50 * time.Millisecond):
- }
- }
- if err != nil {
- // error is ignored as user agent is informative only
- return ""
- }
- ua, ok := v.(string)
- if !ok {
- return ""
- }
- // Ignore the default user agent.
- if ua == "github.com/libp2p/go-libp2p" {
- return ""
- }
- return ua
-}
-
-// NetworkStatus implements the p2p.NetworkStatuser interface.
-func (s *Service) NetworkStatus() p2p.NetworkStatus {
- return p2p.NetworkStatus(s.networkStatus.Load())
-}
-
-// determineCurrentNetworkStatus determines if the network
-// is available/unavailable based on the given error, and
-// returns ErrNetworkUnavailable if unavailable.
-// The result of this operation is stored and can be reflected
-// in the results of future NetworkStatus method calls.
-func (s *Service) determineCurrentNetworkStatus(err error) error {
- switch {
- case err == nil:
- s.networkStatus.Store(int32(p2p.NetworkStatusAvailable))
- case errors.Is(err, lp2pswarm.ErrDialBackoff):
- if s.NetworkStatus() == p2p.NetworkStatusUnavailable {
- err = errors.Join(err, p2p.ErrNetworkUnavailable)
- }
- case isNetworkOrHostUnreachableError(err):
- s.networkStatus.Store(int32(p2p.NetworkStatusUnavailable))
- err = errors.Join(err, p2p.ErrNetworkUnavailable)
- default:
- err = fmt.Errorf("network status unknown: %w", err)
- }
- return err
-}
-
-// appendSpace adds a leading space character if the string is not empty.
-// It is useful for constructing log messages with conditional substrings.
-func appendSpace(s string) string {
- if s == "" {
- return ""
- }
- return " " + s
-}
-
-// userAgent returns a User Agent string passed to the libp2p host to identify peer node.
-func userAgent() string {
- return fmt.Sprintf("bee/%s %s %s/%s", bee.Version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
-}
-
-func newConnMetricNotify(m metrics) *connectionNotifier {
- return &connectionNotifier{
- metrics: m,
- Notifiee: new(network.NoopNotifiee),
- }
-}
-
-type connectionNotifier struct {
- metrics metrics
- network.Notifiee
-}
-
-func (c *connectionNotifier) Connected(_ network.Network, _ network.Conn) {
- c.metrics.HandledConnectionCount.Inc()
-}
-
-// isNetworkOrHostUnreachableError determines based on the
-// given error whether the host or network is reachable.
-func isNetworkOrHostUnreachableError(err error) bool {
- var de *lp2pswarm.DialError
- if !errors.As(err, &de) {
- return false
- }
-
- // Since TransportError doesn't implement the Unwrap
- // method we need to inspect the errors manually.
- for i := range de.DialErrors {
- var te *lp2pswarm.TransportError
- if !errors.As(&de.DialErrors[i], &te) {
- continue
- }
-
- var ne *net.OpError
- if !errors.As(te.Cause, &ne) || ne.Op != "dial" {
- continue
- }
-
- var se *os.SyscallError
- if errors.As(ne, &se) && strings.HasPrefix(se.Syscall, "connect") &&
- (errors.Is(se.Err, errHostUnreachable) || errors.Is(se.Err, errNetworkUnreachable)) {
- return true
- }
- }
- return false
-}
diff --git a/pkg/p2p/libp2p/libp2p_js.go b/pkg/p2p/libp2p/libp2p_js.go
new file mode 100644
index 00000000000..fe961fa2cc7
--- /dev/null
+++ b/pkg/p2p/libp2p/libp2p_js.go
@@ -0,0 +1,738 @@
+//go:build js
+// +build js
+
+package libp2p
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/addressbook"
+ "github.com/ethersphere/bee/v2/pkg/bzz"
+ beecrypto "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/blocklist"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/breaker"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/handshake"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology/lightnode"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multistream"
+ wasmws "github.com/talentlessguy/go-libp2p-wasmws"
+ "go.uber.org/atomic"
+)
+
+type Service struct {
+ ctx context.Context
+ host host.Host
+ natManager basichost.NATManager
+ natAddrResolver *staticAddressResolver
+ pingDialer host.Host
+ libp2pPeerstore peerstore.Peerstore
+ networkID uint64
+ handshakeService *handshake.Service
+ addressbook addressbook.Putter
+ peers *peerRegistry
+ connectionBreaker breaker.Interface
+ blocklist *blocklist.Blocklist
+ protocols []p2p.ProtocolSpec
+ notifier p2p.PickyNotifier
+ logger log.Logger
+ tracer *tracing.Tracer
+ ready chan struct{}
+ halt chan struct{}
+ lightNodes lightnodes
+ lightNodeLimit int
+ protocolsmu sync.RWMutex
+ reacher p2p.Reacher
+ networkStatus atomic.Int32
+ HeadersRWTimeout time.Duration
+}
+
+type Options struct {
+ PrivateKey *ecdsa.PrivateKey
+ NATAddr string
+ EnableWS bool
+ FullNode bool
+ LightNodeLimit int
+ WelcomeMessage string
+ Nonce []byte
+ ValidateOverlay bool
+ hostFactory func(...libp2p.Option) (host.Host, error)
+ HeadersRWTimeout time.Duration
+}
+
+func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay swarm.Address, addr string, ab addressbook.Putter, storer storage.StateStorer, lightNodes *lightnode.Container, logger log.Logger, tracer *tracing.Tracer, o Options) (*Service, error) {
+
+ var listenAddrs []string
+
+ var security libp2p.Option = libp2p.Security(noise.ID, noise.New)
+
+ libp2pPeerstore, err := pstoremem.NewPeerstore()
+ if err != nil {
+ return nil, err
+ }
+
+ // Tweak certain settings
+ cfg := rcmgr.PartialLimitConfig{
+ System: rcmgr.ResourceLimits{
+ Streams: IncomingStreamCountLimit + OutgoingStreamCountLimit,
+ StreamsOutbound: OutgoingStreamCountLimit,
+ StreamsInbound: IncomingStreamCountLimit,
+ },
+ }
+
+ // Create our limits by using our cfg and replacing the default values with values from `scaledDefaultLimits`
+ limits := cfg.Build(rcmgr.InfiniteLimits)
+
+ // The resource manager expects a limiter, se we create one from our limits.
+ limiter := rcmgr.NewFixedLimiter(limits)
+
+ str, err := rcmgrObs.NewStatsTraceReporter()
+ if err != nil {
+ return nil, err
+ }
+
+ rm, err := rcmgr.NewResourceManager(limiter, rcmgr.WithTraceReporter(str))
+ if err != nil {
+ return nil, err
+ }
+
+ opts := []libp2p.Option{
+ libp2p.ShareTCPListener(),
+ libp2p.ListenAddrStrings(listenAddrs...),
+ security,
+ // Use dedicated peerstore instead the global DefaultPeerstore
+ libp2p.Peerstore(libp2pPeerstore),
+ libp2p.UserAgent(userAgent()),
+ libp2p.ResourceManager(rm),
+ libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport),
+ }
+
+ if o.PrivateKey != nil {
+ myKey, _, err := crypto.ECDSAKeyPairFromKey(o.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ opts = append(opts,
+ libp2p.Identity(myKey),
+ )
+ }
+
+ transports := []libp2p.Option{}
+
+ if o.EnableWS {
+ transports = append(transports, libp2p.Transport(wasmws.New))
+ }
+
+ opts = append(opts, transports...)
+
+ if o.hostFactory == nil {
+ // Use the default libp2p host creation
+ o.hostFactory = libp2p.New
+ }
+
+ h, err := o.hostFactory(opts...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if o.HeadersRWTimeout == 0 {
+ o.HeadersRWTimeout = defaultHeadersRWTimeout
+ }
+
+ var advertisableAddresser handshake.AdvertisableAddressResolver
+ var natAddrResolver *staticAddressResolver
+ if o.NATAddr == "" {
+ advertisableAddresser = &UpnpAddressResolver{
+ host: h,
+ }
+ } else {
+ natAddrResolver, err = newStaticAddressResolver(o.NATAddr, net.LookupIP)
+ if err != nil {
+ return nil, fmt.Errorf("static nat: %w", err)
+ }
+ advertisableAddresser = natAddrResolver
+ }
+
+ handshakeService, err := handshake.New(signer, advertisableAddresser, overlay, networkID, o.FullNode, o.Nonce, o.WelcomeMessage, o.ValidateOverlay, h.ID(), logger)
+ if err != nil {
+ return nil, fmt.Errorf("handshake service: %w", err)
+ }
+
+ // Create a new dialer for libp2p ping protocol. This ensures that the protocol
+ // uses a different set of keys to do ping. It prevents inconsistencies in peerstore as
+ // the addresses used are not dialable and hence should be cleaned up. We should create
+ // this host with the same transports and security options to be able to dial to other
+ // peers.
+ pingDialer, err := o.hostFactory(append(transports, security, libp2p.NoListenAddrs)...)
+ if err != nil {
+ return nil, err
+ }
+
+ peerRegistry := newPeerRegistry()
+ s := &Service{
+ ctx: ctx,
+ host: h,
+ natManager: nil,
+ natAddrResolver: natAddrResolver,
+ pingDialer: pingDialer,
+ handshakeService: handshakeService,
+ libp2pPeerstore: libp2pPeerstore,
+ networkID: networkID,
+ peers: peerRegistry,
+ addressbook: ab,
+ blocklist: blocklist.NewBlocklist(storer),
+ logger: logger.WithName(loggerName).Register(),
+ tracer: tracer,
+ connectionBreaker: breaker.NewBreaker(breaker.Options{}), // use default options
+ ready: make(chan struct{}),
+ halt: make(chan struct{}),
+ lightNodes: lightNodes,
+ HeadersRWTimeout: o.HeadersRWTimeout,
+ }
+
+ peerRegistry.setDisconnecter(s)
+
+ s.lightNodeLimit = defaultLightNodeLimit
+ if o.LightNodeLimit > 0 {
+ s.lightNodeLimit = o.LightNodeLimit
+ }
+
+ // Construct protocols.
+ id := protocol.ID(p2p.NewSwarmStreamName(handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName))
+ matcher, err := s.protocolSemverMatcher(id)
+ if err != nil {
+ return nil, fmt.Errorf("protocol version match %s: %w", id, err)
+ }
+
+ s.host.SetStreamHandlerMatch(id, matcher, s.handleIncoming)
+
+ h.Network().Notify(peerRegistry) // update peer registry on network events
+
+ return s, nil
+}
+
+func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
+ for _, ss := range p.StreamSpecs {
+ id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name))
+ matcher, err := s.protocolSemverMatcher(id)
+ if err != nil {
+ return fmt.Errorf("protocol version match %s: %w", id, err)
+ }
+
+ s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) {
+ peerID := streamlibp2p.Conn().RemotePeer()
+ overlay, found := s.peers.overlay(peerID)
+ if !found {
+ _ = streamlibp2p.Reset()
+ s.logger.Debug("overlay address for peer not found", "peer_id", peerID)
+ return
+ }
+ full, found := s.peers.fullnode(peerID)
+ if !found {
+ _ = streamlibp2p.Reset()
+ s.logger.Debug("fullnode info for peer not found", "peer_id", peerID)
+ return
+ }
+
+ stream := newStream(streamlibp2p)
+
+ ctx, cancel := context.WithTimeout(s.ctx, s.HeadersRWTimeout)
+ defer cancel()
+ if err := handleHeaders(ctx, ss.Headler, stream, overlay); err != nil {
+ s.logger.Debug("handle protocol: handle headers failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
+ _ = stream.Reset()
+ return
+ }
+
+ ctx, cancel = context.WithCancel(s.ctx)
+
+ s.peers.addStream(peerID, streamlibp2p, cancel)
+ defer s.peers.removeStream(peerID, streamlibp2p)
+
+ // tracing: get span tracing context and add it to the context
+ // silently ignore if the peer is not providing tracing
+ ctx, err := s.tracer.WithContextFromHeaders(ctx, stream.Headers())
+ if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
+ s.logger.Debug("handle protocol: get tracing context failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
+ _ = stream.Reset()
+ return
+ }
+
+ logger := tracing.NewLoggerWithTraceID(ctx, s.logger)
+ loggerV1 := logger.V(1).Build()
+
+ if err := ss.Handler(ctx, p2p.Peer{Address: overlay, FullNode: full}, stream); err != nil {
+ var de *p2p.DisconnectError
+ if errors.As(err, &de) {
+ loggerV1.Debug("libp2p handler: disconnecting due to disconnect error", "protocol", p.Name, "address", overlay)
+ _ = stream.Reset()
+ _ = s.Disconnect(overlay, de.Error())
+ }
+
+ var bpe *p2p.BlockPeerError
+ if errors.As(err, &bpe) {
+ _ = stream.Reset()
+ if err := s.Blocklist(overlay, bpe.Duration(), bpe.Error()); err != nil {
+ logger.Debug("blocklist: could not blocklist peer", "peer_id", peerID, "error", err)
+ logger.Error(nil, "unable to blocklist peer", "peer_id", peerID)
+ }
+ loggerV1.Debug("handler: peer blocklisted", "protocol", p.Name, "peer_address", overlay)
+ }
+
+ logger.Debug("handle protocol failed", "protocol", p.Name, "version", p.Version, "stream", ss.Name, "peer", overlay, "error", err)
+ return
+ }
+ })
+ }
+
+ s.protocolsmu.Lock()
+ s.protocols = append(s.protocols, p)
+ s.protocolsmu.Unlock()
+ return nil
+}
+
+func (s *Service) handleIncoming(stream network.Stream) {
+ loggerV1 := s.logger.V(1).Register()
+
+ select {
+ case <-s.ready:
+ case <-s.halt:
+ _ = stream.Reset()
+ return
+ case <-s.ctx.Done():
+ _ = stream.Reset()
+ return
+ }
+
+ peerID := stream.Conn().RemotePeer()
+ handshakeStream := newStream(stream)
+ i, err := s.handshakeService.Handle(s.ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), peerID)
+ if err != nil {
+ s.logger.Debug("stream handler: handshake: handle failed", "peer_id", peerID, "error", err)
+ s.logger.Error(nil, "stream handler: handshake: handle failed", "peer_id", peerID)
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(peerID)
+ return
+ }
+
+ overlay := i.BzzAddress.Overlay
+
+ blocked, err := s.blocklist.Exists(overlay)
+ if err != nil {
+ s.logger.Debug("stream handler: blocklisting: exists failed", "peer_address", overlay, "error", err)
+ s.logger.Error(nil, "stream handler: internal error while connecting with peer", "peer_address", overlay)
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(peerID)
+ return
+ }
+
+ if blocked {
+ s.logger.Error(nil, "stream handler: blocked connection from blocklisted peer", "peer_address", overlay)
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(peerID)
+ return
+ }
+
+ if exists := s.peers.addIfNotExists(stream.Conn(), overlay, i.FullNode); exists {
+ s.logger.Debug("stream handler: peer already exists", "peer_address", overlay)
+ if err = handshakeStream.FullClose(); err != nil {
+ s.logger.Debug("stream handler: could not close stream", "peer_address", overlay, "error", err)
+ s.logger.Error(nil, "stream handler: unable to handshake with peer", "peer_address", overlay)
+ _ = s.Disconnect(overlay, "unable to close handshake stream")
+ }
+ return
+ }
+
+ if err = handshakeStream.FullClose(); err != nil {
+ s.logger.Debug("stream handler: could not close stream", "peer_address", overlay, "error", err)
+ s.logger.Error(nil, "stream handler: unable to handshake with peer", "peer_address", overlay)
+ _ = s.Disconnect(overlay, "could not fully close stream on handshake")
+ return
+ }
+
+ if i.FullNode {
+ err = s.addressbook.Put(i.BzzAddress.Overlay, *i.BzzAddress)
+ if err != nil {
+ s.logger.Debug("stream handler: addressbook put error", "peer_id", peerID, "error", err)
+ s.logger.Error(nil, "stream handler: unable to persist peer", "peer_id", peerID)
+ _ = s.Disconnect(i.BzzAddress.Overlay, "unable to persist peer in addressbook")
+ return
+ }
+ }
+
+ peer := p2p.Peer{Address: overlay, FullNode: i.FullNode, EthereumAddress: i.BzzAddress.EthereumAddress}
+
+ s.protocolsmu.RLock()
+ for _, tn := range s.protocols {
+ if tn.ConnectIn != nil {
+ if err := tn.ConnectIn(s.ctx, peer); err != nil {
+ s.logger.Debug("stream handler: connectIn failed", "protocol", tn.Name, "version", tn.Version, "peer", overlay, "error", err)
+ _ = s.Disconnect(overlay, "failed to process inbound connection notifier")
+ s.protocolsmu.RUnlock()
+ return
+ }
+ }
+ }
+ s.protocolsmu.RUnlock()
+
+ if s.notifier != nil {
+ if !i.FullNode {
+ s.lightNodes.Connected(s.ctx, peer)
+ // light node announces explicitly
+ if err := s.notifier.Announce(s.ctx, peer.Address, i.FullNode); err != nil {
+ s.logger.Debug("stream handler: notifier.Announce failed", "peer", peer.Address, "error", err)
+ }
+
+ if s.lightNodes.Count() > s.lightNodeLimit {
+ // kick another node to fit this one in
+ p, err := s.lightNodes.RandomPeer(peer.Address)
+ if err != nil {
+ s.logger.Debug("stream handler: can't find a peer slot for light node", "error", err)
+ _ = s.Disconnect(peer.Address, "unable to find peer slot for light node")
+ return
+ } else {
+ loggerV1.Debug("stream handler: kicking away light node to make room for new node", "old_peer", p.String(), "new_peer", peer.Address)
+
+ _ = s.Disconnect(p, "kicking away light node to make room for peer")
+ return
+ }
+ }
+ } else {
+ if err := s.notifier.Connected(s.ctx, peer, false); err != nil {
+ s.logger.Debug("stream handler: notifier.Connected: peer disconnected", "peer", i.BzzAddress.Overlay, "error", err)
+ // note: this cannot be unit tested since the node
+ // waiting on handshakeStream.FullClose() on the other side
+ // might actually get a stream reset when we disconnect here
+ // resulting in a flaky response from the Connect method on
+ // the other side.
+ // that is why the Pick method has been added to the notifier
+ // interface, in addition to the possibility of deciding whether
+ // a peer connection is wanted prior to adding the peer to the
+ // peer registry and starting the protocols.
+ _ = s.Disconnect(overlay, "unable to signal connection notifier")
+ return
+ }
+ // when a full node connects, we gossip about it to the
+ // light nodes so that they can also have a chance at building
+ // a solid topology.
+ _ = s.lightNodes.EachPeer(func(addr swarm.Address, _ uint8) (bool, bool, error) {
+ go func(addressee, peer swarm.Address, fullnode bool) {
+ if err := s.notifier.AnnounceTo(s.ctx, addressee, peer, fullnode); err != nil {
+ s.logger.Debug("stream handler: notifier.AnnounceTo failed", "addressee", addressee, "peer", peer, "error", err)
+ }
+ }(addr, peer.Address, i.FullNode)
+ return false, false, nil
+ })
+ }
+ }
+
+ if !s.peers.Exists(overlay) {
+ s.logger.Warning("stream handler: inbound peer does not exist, disconnecting", "peer", overlay)
+ _ = s.Disconnect(overlay, "unknown inbound peer")
+ return
+ }
+
+ if s.reacher != nil {
+ s.reacher.Connected(overlay, i.BzzAddress.Underlay)
+ }
+
+ peerUserAgent := appendSpace(s.peerUserAgent(s.ctx, peerID))
+ s.networkStatus.Store(int32(p2p.NetworkStatusAvailable))
+
+ loggerV1.Debug("stream handler: successfully connected to peer (inbound)", "addresses", i.BzzAddress.ShortString(), "light", i.LightString(), "user_agent", peerUserAgent)
+ s.logger.Debug("stream handler: successfully connected to peer (inbound)", "address", i.BzzAddress.Overlay, "light", i.LightString(), "user_agent", peerUserAgent)
+}
+
+func (s *Service) Blocklist(overlay swarm.Address, duration time.Duration, reason string) error {
+ loggerV1 := s.logger.V(1).Register()
+
+ if s.NetworkStatus() != p2p.NetworkStatusAvailable {
+ return errors.New("cannot blocklist peer when network not available")
+ }
+
+ id, ok := s.peers.peerID(overlay)
+ if !ok {
+ return p2p.ErrPeerNotFound
+ }
+
+ full, _ := s.peers.fullnode(id)
+
+ loggerV1.Debug("libp2p blocklisting peer", "peer_address", overlay.String(), "duration", duration, "reason", reason)
+ if err := s.blocklist.Add(overlay, duration, reason, full); err != nil {
+
+ _ = s.Disconnect(overlay, "failed blocklisting peer")
+ return fmt.Errorf("blocklist peer %s: %w", overlay, err)
+ }
+
+ _ = s.Disconnect(overlay, reason)
+ return nil
+}
+
+func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error) {
+ loggerV1 := s.logger.V(1).Register()
+
+ defer func() {
+ err = s.determineCurrentNetworkStatus(err)
+ }()
+
+ // Extract the peer ID from the multiaddr.
+ info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
+ if err != nil {
+ return nil, fmt.Errorf("addr from p2p: %w", err)
+ }
+
+ hostAddr, err := buildHostAddress(info.ID)
+ if err != nil {
+ return nil, fmt.Errorf("build host address: %w", err)
+ }
+
+ remoteAddr := addr.Decapsulate(hostAddr)
+
+ if overlay, found := s.peers.isConnected(info.ID, remoteAddr); found {
+ address = &bzz.Address{
+ Overlay: overlay,
+ Underlay: addr,
+ }
+ return address, p2p.ErrAlreadyConnected
+ }
+
+ if err := s.connectionBreaker.Execute(func() error { return s.host.Connect(ctx, *info) }); err != nil {
+ if errors.Is(err, breaker.ErrClosed) {
+ return nil, p2p.NewConnectionBackoffError(err, s.connectionBreaker.ClosedUntil())
+ }
+ return nil, err
+ }
+
+ stream, err := s.newStreamForPeerID(ctx, info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName)
+ if err != nil {
+ _ = s.host.Network().ClosePeer(info.ID)
+ return nil, fmt.Errorf("connect new stream: %w", err)
+ }
+
+ handshakeStream := newStream(stream)
+ i, err := s.handshakeService.Handshake(ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), stream.Conn().RemotePeer())
+ if err != nil {
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(info.ID)
+ return nil, fmt.Errorf("handshake: %w", err)
+ }
+
+ if !i.FullNode {
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(info.ID)
+ return nil, p2p.ErrDialLightNode
+ }
+
+ overlay := i.BzzAddress.Overlay
+
+ blocked, err := s.blocklist.Exists(overlay)
+ if err != nil {
+ s.logger.Debug("blocklisting: exists failed", "peer_id", info.ID, "error", err)
+ s.logger.Error(nil, "internal error while connecting with peer", "peer_id", info.ID)
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(info.ID)
+ return nil, err
+ }
+
+ if blocked {
+ s.logger.Error(nil, "blocked connection to blocklisted peer", "peer_id", info.ID)
+ _ = handshakeStream.Reset()
+ _ = s.host.Network().ClosePeer(info.ID)
+ return nil, p2p.ErrPeerBlocklisted
+ }
+
+ if exists := s.peers.addIfNotExists(stream.Conn(), overlay, i.FullNode); exists {
+ if err := handshakeStream.FullClose(); err != nil {
+ _ = s.Disconnect(overlay, "failed closing handshake stream after connect")
+ return nil, fmt.Errorf("peer exists, full close: %w", err)
+ }
+
+ return i.BzzAddress, nil
+ }
+
+ if err := handshakeStream.FullClose(); err != nil {
+ _ = s.Disconnect(overlay, "could not fully close handshake stream after connect")
+ return nil, fmt.Errorf("connect full close %w", err)
+ }
+
+ if i.FullNode {
+ err = s.addressbook.Put(overlay, *i.BzzAddress)
+ if err != nil {
+ _ = s.Disconnect(overlay, "failed storing peer in addressbook")
+ return nil, fmt.Errorf("storing bzz address: %w", err)
+ }
+ }
+
+ s.protocolsmu.RLock()
+ for _, tn := range s.protocols {
+ if tn.ConnectOut != nil {
+ if err := tn.ConnectOut(ctx, p2p.Peer{Address: overlay, FullNode: i.FullNode, EthereumAddress: i.BzzAddress.EthereumAddress}); err != nil {
+ s.logger.Debug("connectOut: failed to connect", "protocol", tn.Name, "version", tn.Version, "peer", overlay, "error", err)
+ _ = s.Disconnect(overlay, "failed to process outbound connection notifier")
+ s.protocolsmu.RUnlock()
+ return nil, fmt.Errorf("connectOut: protocol: %s, version:%s: %w", tn.Name, tn.Version, err)
+ }
+ }
+ }
+ s.protocolsmu.RUnlock()
+
+ if !s.peers.Exists(overlay) {
+ _ = s.Disconnect(overlay, "outbound peer does not exist")
+ return nil, fmt.Errorf("libp2p connect: peer %s does not exist %w", overlay, p2p.ErrPeerNotFound)
+ }
+
+ if s.reacher != nil {
+ s.reacher.Connected(overlay, i.BzzAddress.Underlay)
+ }
+
+ peerUserAgent := appendSpace(s.peerUserAgent(ctx, info.ID))
+
+ loggerV1.Debug("successfully connected to peer (outbound)", "addresses", i.BzzAddress.ShortString(), "light", i.LightString(), "user_agent", peerUserAgent)
+ s.logger.Debug("successfully connected to peer (outbound)", "address", i.BzzAddress.Overlay, "light", i.LightString(), "user_agent", peerUserAgent)
+ return i.BzzAddress, nil
+}
+
+func (s *Service) Disconnect(overlay swarm.Address, reason string) (err error) {
+
+ s.logger.Debug("libp2p disconnect: disconnecting peer", "peer_address", overlay, "reason", reason)
+
+ // found is checked at the bottom of the function
+ found, full, peerID := s.peers.remove(overlay)
+
+ _ = s.host.Network().ClosePeer(peerID)
+
+ peer := p2p.Peer{Address: overlay, FullNode: full}
+
+ s.protocolsmu.RLock()
+ for _, tn := range s.protocols {
+ if tn.DisconnectOut != nil {
+ if err := tn.DisconnectOut(peer); err != nil {
+ s.logger.Debug("disconnectOut failed", "protocol", tn.Name, "version", tn.Version, "peer", overlay, "error", err)
+ }
+ }
+ }
+ s.protocolsmu.RUnlock()
+
+ if s.notifier != nil {
+ s.notifier.Disconnected(peer)
+ }
+ if s.lightNodes != nil {
+ s.lightNodes.Disconnected(peer)
+ }
+ if s.reacher != nil {
+ s.reacher.Disconnected(overlay)
+ }
+
+ if !found {
+ s.logger.Debug("libp2p disconnect: peer not found", "peer_address", overlay)
+ return p2p.ErrPeerNotFound
+ }
+
+ return nil
+}
+
+func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ peerID, found := s.peers.peerID(overlay)
+ if !found {
+ return nil, p2p.ErrPeerNotFound
+ }
+
+ streamlibp2p, err := s.newStreamForPeerID(ctx, peerID, protocolName, protocolVersion, streamName)
+ if err != nil {
+ return nil, fmt.Errorf("new stream for peerid: %w", err)
+ }
+
+ stream := newStream(streamlibp2p)
+
+ // tracing: add span context header
+ if headers == nil {
+ headers = make(p2p.Headers)
+ }
+ if err := s.tracer.AddContextHeader(ctx, headers); err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
+
+ _ = stream.Reset()
+ return nil, fmt.Errorf("new stream add context header fail: %w", err)
+ }
+
+ // exchange headers
+ ctx, cancel := context.WithTimeout(ctx, s.HeadersRWTimeout)
+ defer cancel()
+ if err := sendHeaders(ctx, headers, stream); err != nil {
+ _ = stream.Reset()
+ return nil, fmt.Errorf("send headers: %w", err)
+ }
+ return stream, nil
+}
+
+func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, protocolName, protocolVersion, streamName string) (network.Stream, error) {
+ swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName)
+ st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName))
+ if err != nil {
+ if st != nil {
+ s.logger.Debug("stream experienced unexpected early close")
+ _ = st.Close()
+ }
+
+ var errNotSupported multistream.ErrNotSupported[protocol.ID]
+ if errors.As(err, &errNotSupported) {
+ return nil, p2p.NewIncompatibleStreamError(err)
+ }
+ if errors.Is(err, multistream.ErrIncorrectVersion) {
+ return nil, p2p.NewIncompatibleStreamError(err)
+ }
+ return nil, fmt.Errorf("create stream %s to %s: %w", swarmStreamName, peerID, err)
+ }
+ return st, nil
+}
+
+func (s *Service) Close() error {
+ if err := s.libp2pPeerstore.Close(); err != nil {
+ return err
+ }
+ if s.natManager != nil {
+ if err := s.natManager.Close(); err != nil {
+ return err
+ }
+ }
+
+ if err := s.pingDialer.Close(); err != nil {
+ return err
+ }
+ if s.reacher != nil {
+ if err := s.reacher.Close(); err != nil {
+ return err
+ }
+ }
+
+ return s.host.Close()
+}
diff --git a/pkg/p2p/libp2p/libp2p_shared.go b/pkg/p2p/libp2p/libp2p_shared.go
new file mode 100644
index 00000000000..60dacf04e08
--- /dev/null
+++ b/pkg/p2p/libp2p/libp2p_shared.go
@@ -0,0 +1,336 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package libp2p
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/ethersphere/bee/v2"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/libp2p/internal/reacher"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/libp2p/go-libp2p/core/event"
+ libp2ppeer "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
+
+ lp2pswarm "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ libp2pping "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "libp2p"
+
+var (
+ _ p2p.Service = (*Service)(nil)
+ _ p2p.DebugService = (*Service)(nil)
+
+ // reachabilityOverridePublic overrides autonat to simply report
+ // public reachability status, it is set in the makefile.
+ reachabilityOverridePublic = "false"
+)
+
+const (
+ defaultLightNodeLimit = 100
+ peerUserAgentTimeout = time.Second
+
+ defaultHeadersRWTimeout = 10 * time.Second
+
+ IncomingStreamCountLimit = 5_000
+ OutgoingStreamCountLimit = 10_000
+)
+
+type lightnodes interface {
+ Connected(context.Context, p2p.Peer)
+ Disconnected(p2p.Peer)
+ Count() int
+ RandomPeer(swarm.Address) (swarm.Address, error)
+ EachPeer(pf topology.EachPeerFunc) error
+}
+
+func (s *Service) reachabilityWorker() error {
+ sub, err := s.host.EventBus().Subscribe([]interface{}{new(event.EvtLocalReachabilityChanged)})
+ if err != nil {
+ return fmt.Errorf("failed subscribing to reachability event %w", err)
+ }
+
+ go func() {
+ defer sub.Close()
+ for {
+ select {
+ case <-s.ctx.Done():
+ return
+ case e := <-sub.Out():
+ if r, ok := e.(event.EvtLocalReachabilityChanged); ok {
+ select {
+ case <-s.ready:
+ case <-s.halt:
+ return
+ }
+ s.logger.Debug("reachability changed", "new_reachability", r.Reachability.String())
+ s.notifier.UpdateReachability(p2p.ReachabilityStatus(r.Reachability))
+ }
+ }
+ }
+ }()
+ return nil
+}
+
+func (s *Service) SetPickyNotifier(n p2p.PickyNotifier) {
+ s.handshakeService.SetPicker(n)
+ s.notifier = n
+ s.reacher = reacher.New(s, n, nil)
+}
+
+func (s *Service) Protocols() []protocol.ID {
+ return s.host.Mux().Protocols()
+}
+
+func (s *Service) Addresses() (addresses []ma.Multiaddr, err error) {
+ for _, addr := range s.host.Addrs() {
+ a, err := buildUnderlayAddress(addr, s.host.ID())
+ if err != nil {
+ return nil, err
+ }
+
+ addresses = append(addresses, a)
+ }
+ if s.natAddrResolver != nil && len(addresses) > 0 {
+ a, err := s.natAddrResolver.Resolve(addresses[0])
+ if err != nil {
+ return nil, err
+ }
+ addresses = append(addresses, a)
+ }
+
+ return addresses, nil
+}
+
+func (s *Service) NATManager() basichost.NATManager {
+ return s.natManager
+}
+
+func buildHostAddress(peerID libp2ppeer.ID) (ma.Multiaddr, error) {
+ return ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peerID.String()))
+}
+
+func buildUnderlayAddress(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
+ // Build host multiaddress
+ hostAddr, err := buildHostAddress(peerID)
+ if err != nil {
+ return nil, err
+ }
+
+ return addr.Encapsulate(hostAddr), nil
+}
+
+// disconnected is a registered peer registry event
+func (s *Service) disconnected(address swarm.Address) {
+ peer := p2p.Peer{Address: address}
+ peerID, found := s.peers.peerID(address)
+ if found {
+ // peerID might not always be found on shutdown
+ full, found := s.peers.fullnode(peerID)
+ if found {
+ peer.FullNode = full
+ }
+ }
+ s.protocolsmu.RLock()
+ for _, tn := range s.protocols {
+ if tn.DisconnectIn != nil {
+ if err := tn.DisconnectIn(peer); err != nil {
+ s.logger.Debug("disconnectIn failed", tn.Name, "version", tn.Version, "peer", address, "error", err)
+ }
+ }
+ }
+
+ s.protocolsmu.RUnlock()
+
+ if s.notifier != nil {
+ s.notifier.Disconnected(peer)
+ }
+ if s.lightNodes != nil {
+ s.lightNodes.Disconnected(peer)
+ }
+ if s.reacher != nil {
+ s.reacher.Disconnected(address)
+ }
+}
+
+func (s *Service) Peers() []p2p.Peer {
+ return s.peers.peers()
+}
+
+func (s *Service) Blocklisted(overlay swarm.Address) (bool, error) {
+ return s.blocklist.Exists(overlay)
+}
+
+func (s *Service) BlocklistedPeers() ([]p2p.BlockListedPeer, error) {
+ return s.blocklist.Peers()
+}
+
+// SetWelcomeMessage sets the welcome message for the handshake protocol.
+func (s *Service) SetWelcomeMessage(val string) error {
+ return s.handshakeService.SetWelcomeMessage(val)
+}
+
+// GetWelcomeMessage returns the value of the welcome message.
+func (s *Service) GetWelcomeMessage() string {
+ return s.handshakeService.GetWelcomeMessage()
+}
+
+func (s *Service) Ready() error {
+ if err := s.reachabilityWorker(); err != nil {
+ return fmt.Errorf("reachability worker: %w", err)
+ }
+
+ close(s.ready)
+ return nil
+}
+
+func (s *Service) Halt() {
+ close(s.halt)
+}
+
+func (s *Service) Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error) {
+ info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
+ if err != nil {
+ return rtt, fmt.Errorf("unable to parse underlay address: %w", err)
+ }
+
+ // Add the address to libp2p peerstore for it to be dialable
+ s.pingDialer.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL)
+
+ // Cleanup connection after ping is done
+ defer func() {
+ _ = s.pingDialer.Network().ClosePeer(info.ID)
+ }()
+
+ select {
+ case <-ctx.Done():
+ return rtt, ctx.Err()
+ case res := <-libp2pping.Ping(ctx, s.pingDialer, info.ID):
+ return res.RTT, res.Error
+ }
+}
+
+// peerUserAgent returns User Agent string of the connected peer if the peer
+// provides it. It ignores the default libp2p user agent string
+// "github.com/libp2p/go-libp2p" and returns empty string in that case.
+func (s *Service) peerUserAgent(ctx context.Context, peerID libp2ppeer.ID) string {
+ ctx, cancel := context.WithTimeout(ctx, peerUserAgentTimeout)
+ defer cancel()
+ var (
+ v interface{}
+ err error
+ )
+ // Peerstore may not contain all keys and values right after the connections is created.
+ // This retry mechanism ensures more reliable user agent propagation.
+ for iterate := true; iterate; {
+ v, err = s.host.Peerstore().Get(peerID, "AgentVersion")
+ if err == nil {
+ break
+ }
+ select {
+ case <-ctx.Done():
+ iterate = false
+ case <-time.After(50 * time.Millisecond):
+ }
+ }
+ if err != nil {
+ // error is ignored as user agent is informative only
+ return ""
+ }
+ ua, ok := v.(string)
+ if !ok {
+ return ""
+ }
+ // Ignore the default user agent.
+ if ua == "github.com/libp2p/go-libp2p" {
+ return ""
+ }
+ return ua
+}
+
+// NetworkStatus implements the p2p.NetworkStatuser interface.
+func (s *Service) NetworkStatus() p2p.NetworkStatus {
+ return p2p.NetworkStatus(s.networkStatus.Load())
+}
+
+// determineCurrentNetworkStatus determines if the network
+// is available/unavailable based on the given error, and
+// returns ErrNetworkUnavailable if unavailable.
+// The result of this operation is stored and can be reflected
+// in the results of future NetworkStatus method calls.
+func (s *Service) determineCurrentNetworkStatus(err error) error {
+ switch {
+ case err == nil:
+ s.networkStatus.Store(int32(p2p.NetworkStatusAvailable))
+ case errors.Is(err, lp2pswarm.ErrDialBackoff):
+ if s.NetworkStatus() == p2p.NetworkStatusUnavailable {
+ err = errors.Join(err, p2p.ErrNetworkUnavailable)
+ }
+ case isNetworkOrHostUnreachableError(err):
+ s.networkStatus.Store(int32(p2p.NetworkStatusUnavailable))
+ err = errors.Join(err, p2p.ErrNetworkUnavailable)
+ default:
+ err = fmt.Errorf("network status unknown: %w", err)
+ }
+ return err
+}
+
+// appendSpace adds a leading space character if the string is not empty.
+// It is useful for constructing log messages with conditional substrings.
+func appendSpace(s string) string {
+ if s == "" {
+ return ""
+ }
+ return " " + s
+}
+
+// userAgent returns a User Agent string passed to the libp2p host to identify peer node.
+func userAgent() string {
+ return fmt.Sprintf("bee/%s %s %s/%s", bee.Version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+}
+
+// isNetworkOrHostUnreachableError determines based on the
+// given error whether the host or network is reachable.
+func isNetworkOrHostUnreachableError(err error) bool {
+ var de *lp2pswarm.DialError
+ if !errors.As(err, &de) {
+ return false
+ }
+
+ // Since TransportError doesn't implement the Unwrap
+ // method we need to inspect the errors manually.
+ for i := range de.DialErrors {
+ var te *lp2pswarm.TransportError
+ if !errors.As(&de.DialErrors[i], &te) {
+ continue
+ }
+
+ var ne *net.OpError
+ if !errors.As(te.Cause, &ne) || ne.Op != "dial" {
+ continue
+ }
+
+ var se *os.SyscallError
+ if errors.As(ne, &se) && strings.HasPrefix(se.Syscall, "connect") &&
+ (errors.Is(se.Err, errHostUnreachable) || errors.Is(se.Err, errNetworkUnreachable)) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/p2p/libp2p/main_test.go b/pkg/p2p/libp2p/main_test.go
index fd6219974c7..0bda966a466 100644
--- a/pkg/p2p/libp2p/main_test.go
+++ b/pkg/p2p/libp2p/main_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2022 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/metrics.go b/pkg/p2p/libp2p/metrics.go
index 378ec26f6b0..f9a222de878 100644
--- a/pkg/p2p/libp2p/metrics.go
+++ b/pkg/p2p/libp2p/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/protocols_test.go b/pkg/p2p/libp2p/protocols_test.go
index 4563083726a..7e34cf87483 100644
--- a/pkg/p2p/libp2p/protocols_test.go
+++ b/pkg/p2p/libp2p/protocols_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/stream.go b/pkg/p2p/libp2p/stream.go
index 93e7ce6181a..7c892c64373 100644
--- a/pkg/p2p/libp2p/stream.go
+++ b/pkg/p2p/libp2p/stream.go
@@ -1,6 +1,5 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package libp2p
@@ -9,16 +8,9 @@ import (
"io"
"time"
- "github.com/ethersphere/bee/v2/pkg/p2p"
"github.com/libp2p/go-libp2p/core/network"
)
-var (
- closeDeadline = 30 * time.Second
- errExpectedEof = errors.New("read: expected eof")
-)
-var _ p2p.Stream = (*stream)(nil)
-
type stream struct {
network.Stream
headers map[string][]byte
@@ -29,13 +21,6 @@ type stream struct {
func newStream(s network.Stream, metrics metrics) *stream {
return &stream{Stream: s, metrics: metrics}
}
-func (s *stream) Headers() p2p.Headers {
- return s.headers
-}
-
-func (s *stream) ResponseHeaders() p2p.Headers {
- return s.responseHeaders
-}
func (s *stream) Reset() error {
defer s.metrics.StreamResetCount.Inc()
diff --git a/pkg/p2p/libp2p/stream_js.go b/pkg/p2p/libp2p/stream_js.go
new file mode 100644
index 00000000000..b4d2f63dcef
--- /dev/null
+++ b/pkg/p2p/libp2p/stream_js.go
@@ -0,0 +1,56 @@
+//go:build js
+// +build js
+
+package libp2p
+
+import (
+ "errors"
+ "io"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+type stream struct {
+ network.Stream
+ headers map[string][]byte
+ responseHeaders map[string][]byte
+}
+
+func newStream(s network.Stream) *stream {
+ return &stream{Stream: s}
+}
+
+func (s *stream) Reset() error {
+ return s.Stream.Reset()
+}
+
+func (s *stream) FullClose() error {
+ // close the stream to make sure it is gc'd
+ defer s.Close()
+
+ if err := s.CloseWrite(); err != nil {
+ _ = s.Stream.Reset()
+ return err
+ }
+
+ // So we don't wait forever
+ _ = s.SetDeadline(time.Now().Add(closeDeadline))
+
+ // We *have* to observe the EOF. Otherwise, we leak the stream.
+ // Now, technically, we should do this *before*
+ // returning from SendMessage as the message
+ // hasn't really been sent yet until we see the
+ // EOF but we don't actually *know* what
+ // protocol the other side is speaking.
+ n, err := s.Read([]byte{0})
+ if n > 0 || err == nil {
+ _ = s.Stream.Reset()
+ return errExpectedEof
+ }
+ if !errors.Is(err, io.EOF) {
+ _ = s.Stream.Reset()
+ return err
+ }
+ return nil
+}
diff --git a/pkg/p2p/libp2p/stream_shared.go b/pkg/p2p/libp2p/stream_shared.go
new file mode 100644
index 00000000000..725887665e3
--- /dev/null
+++ b/pkg/p2p/libp2p/stream_shared.go
@@ -0,0 +1,26 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package libp2p
+
+import (
+ "errors"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+)
+
+var (
+ closeDeadline = 30 * time.Second
+ errExpectedEof = errors.New("read: expected eof")
+)
+var _ p2p.Stream = (*stream)(nil)
+
+func (s *stream) Headers() p2p.Headers {
+ return s.headers
+}
+
+func (s *stream) ResponseHeaders() p2p.Headers {
+ return s.responseHeaders
+}
diff --git a/pkg/p2p/libp2p/tracing_test.go b/pkg/p2p/libp2p/tracing_test.go
index 7e02f8e2d7c..ebcff6472eb 100644
--- a/pkg/p2p/libp2p/tracing_test.go
+++ b/pkg/p2p/libp2p/tracing_test.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/p2p/libp2p/unreachable_errors_wasm.go b/pkg/p2p/libp2p/unreachable_errors_wasm.go
new file mode 100644
index 00000000000..f4e3426a18f
--- /dev/null
+++ b/pkg/p2p/libp2p/unreachable_errors_wasm.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js || wasip1
+
+package libp2p
+
+// Collection of errors returned by the underlying
+// operating system that signals network unavailability.
+var (
+ errHostUnreachable error = nil
+ errNetworkUnreachable error = nil
+)
diff --git a/pkg/pingpong/metrics.go b/pkg/pingpong/metrics.go
index 155b3abdb8e..be3e153a5fe 100644
--- a/pkg/pingpong/metrics.go
+++ b/pkg/pingpong/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/pingpong/pingpong.go b/pkg/pingpong/pingpong.go
index ba09d7eaab1..e5fe438fb14 100644
--- a/pkg/pingpong/pingpong.go
+++ b/pkg/pingpong/pingpong.go
@@ -1,9 +1,6 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package pingpong exposes the simple ping-pong protocol
-// which measures round-trip-time with other peers.
package pingpong
import (
@@ -21,19 +18,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/tracing"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "pinpong"
-
-const (
- protocolName = "pingpong"
- protocolVersion = "1.0.0"
- streamName = "pingpong"
-)
-
-type Interface interface {
- Ping(ctx context.Context, address swarm.Address, msgs ...string) (rtt time.Duration, err error)
-}
-
type Service struct {
streamer p2p.Streamer
logger log.Logger
@@ -50,19 +34,6 @@ func New(streamer p2p.Streamer, logger log.Logger, tracer *tracing.Tracer) *Serv
}
}
-func (s *Service) Protocol() p2p.ProtocolSpec {
- return p2p.ProtocolSpec{
- Name: protocolName,
- Version: protocolVersion,
- StreamSpecs: []p2p.StreamSpec{
- {
- Name: streamName,
- Handler: s.handler,
- },
- },
- }
-}
-
func (s *Service) Ping(ctx context.Context, address swarm.Address, msgs ...string) (rtt time.Duration, err error) {
span, _, ctx := s.tracer.StartSpanFromContext(ctx, "pingpong-p2p-ping", s.logger)
defer span.Finish()
diff --git a/pkg/pingpong/pingpong_js.go b/pkg/pingpong/pingpong_js.go
new file mode 100644
index 00000000000..bde6c1782ad
--- /dev/null
+++ b/pkg/pingpong/pingpong_js.go
@@ -0,0 +1,92 @@
+//go:build js
+// +build js
+
+package pingpong
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/pingpong/pb"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+)
+
+type Service struct {
+ streamer p2p.Streamer
+ logger log.Logger
+ tracer *tracing.Tracer
+}
+
+func New(streamer p2p.Streamer, logger log.Logger, tracer *tracing.Tracer) *Service {
+ return &Service{
+ streamer: streamer,
+ logger: logger.WithName(loggerName).Register(),
+ tracer: tracer,
+ }
+}
+
+func (s *Service) Ping(ctx context.Context, address swarm.Address, msgs ...string) (rtt time.Duration, err error) {
+ span, _, ctx := s.tracer.StartSpanFromContext(ctx, "pingpong-p2p-ping", s.logger)
+ defer span.Finish()
+
+ start := time.Now()
+ stream, err := s.streamer.NewStream(ctx, address, nil, protocolName, protocolVersion, streamName)
+ if err != nil {
+ return 0, fmt.Errorf("new stream: %w", err)
+ }
+ defer func() {
+ go stream.FullClose()
+ }()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+
+ var pong pb.Pong
+ for _, msg := range msgs {
+ if err := w.WriteMsgWithContext(ctx, &pb.Ping{
+ Greeting: msg,
+ }); err != nil {
+ return 0, fmt.Errorf("write message: %w", err)
+ }
+
+ if err := r.ReadMsgWithContext(ctx, &pong); err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ return 0, fmt.Errorf("read message: %w", err)
+ }
+
+ }
+ return time.Since(start), nil
+}
+
+func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) error {
+ w, r := protobuf.NewWriterAndReader(stream)
+ defer stream.FullClose()
+
+ span, _, ctx := s.tracer.StartSpanFromContext(ctx, "pingpong-p2p-handler", s.logger)
+ defer span.Finish()
+
+ var ping pb.Ping
+ for {
+ if err := r.ReadMsgWithContext(ctx, &ping); err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ return fmt.Errorf("read message: %w", err)
+ }
+
+ if err := w.WriteMsgWithContext(ctx, &pb.Pong{
+ Response: "{" + ping.Greeting + "}",
+ }); err != nil {
+ return fmt.Errorf("write message: %w", err)
+ }
+ }
+ return nil
+}
diff --git a/pkg/pingpong/pingpong_shared.go b/pkg/pingpong/pingpong_shared.go
new file mode 100644
index 00000000000..49c0d31c6e8
--- /dev/null
+++ b/pkg/pingpong/pingpong_shared.go
@@ -0,0 +1,41 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pingpong exposes the simple ping-pong protocol
+// which measures round-trip-time with other peers.
+package pingpong
+
+import (
+ "context"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "pingpong"
+
+const (
+ protocolName = "pingpong"
+ protocolVersion = "1.0.0"
+ streamName = "pingpong"
+)
+
+type Interface interface {
+ Ping(ctx context.Context, address swarm.Address, msgs ...string) (rtt time.Duration, err error)
+}
+
+func (s *Service) Protocol() p2p.ProtocolSpec {
+ return p2p.ProtocolSpec{
+ Name: protocolName,
+ Version: protocolVersion,
+ StreamSpecs: []p2p.StreamSpec{
+ {
+ Name: streamName,
+ Handler: s.handler,
+ },
+ },
+ }
+}
diff --git a/pkg/postage/batchstore/metrics.go b/pkg/postage/batchstore/metrics.go
index de78aa5ef54..bb5cba0330e 100644
--- a/pkg/postage/batchstore/metrics.go
+++ b/pkg/postage/batchstore/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/postage/batchstore/store.go b/pkg/postage/batchstore/store.go
index 3c665feaa64..28ecd4fa416 100644
--- a/pkg/postage/batchstore/store.go
+++ b/pkg/postage/batchstore/store.go
@@ -1,14 +1,10 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package batchstore
import (
- "context"
- "encoding/hex"
"errors"
- "fmt"
"math"
"math/big"
"sync"
@@ -19,22 +15,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/storage"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "batchstore"
-
-const (
- batchKeyPrefix = "batchstore_batch_"
- valueKeyPrefix = "batchstore_value_"
- chainStateKey = "batchstore_chainstate"
- reserveRadiusKey = "batchstore_radius"
-)
-
-// ErrNotFound signals that the element was not found.
-var ErrNotFound = errors.New("batchstore: not found")
-var ErrStorageRadiusExceeds = errors.New("batchstore: storage radius must not exceed reserve radius")
-
-type evictFn func(batchID []byte) error
-
// store implements postage.Storer
type store struct {
capacity int
@@ -89,262 +69,6 @@ func New(st storage.StateStorer, ev evictFn, capacity int, logger log.Logger) (p
return s, nil
}
-func (s *store) Radius() uint8 {
- return uint8(s.radius.Load())
-}
-
-func (s *store) GetChainState() *postage.ChainState {
- return s.cs.Load()
-}
-
-// Get returns a batch from the batchstore with the given ID.
-func (s *store) Get(id []byte) (*postage.Batch, error) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
- return s.get(id)
-}
-
-func (s *store) get(id []byte) (*postage.Batch, error) {
-
- b := &postage.Batch{}
- err := s.store.Get(batchKey(id), b)
- if err != nil {
- return nil, fmt.Errorf("get batch %s: %w", hex.EncodeToString(id), err)
- }
- return b, nil
-}
-
-// Exists is implementation of postage.Storer interface Exists method.
-func (s *store) Exists(id []byte) (bool, error) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
-
- switch err := s.store.Get(batchKey(id), new(postage.Batch)); {
- case err == nil:
- return true, nil
- case errors.Is(err, storage.ErrNotFound):
- return false, nil
- default:
- return false, err
- }
-}
-
-// Iterate is implementation of postage.Storer interface Iterate method.
-func (s *store) Iterate(cb func(*postage.Batch) (bool, error)) error {
- return s.store.Iterate(batchKeyPrefix, func(key, value []byte) (bool, error) {
- b := &postage.Batch{}
- if err := b.UnmarshalBinary(value); err != nil {
- return false, err
- }
- return cb(b)
- })
-}
-
-// Save is implementation of postage.Storer interface Save method.
-// This method has side effects; it also updates the radius of the node if successful.
-func (s *store) Save(batch *postage.Batch) error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- switch err := s.store.Get(batchKey(batch.ID), new(postage.Batch)); {
- case errors.Is(err, storage.ErrNotFound):
- if err := s.store.Put(batchKey(batch.ID), batch); err != nil {
- return err
- }
-
- if err := s.saveBatch(batch); err != nil {
- return err
- }
-
- s.logger.Debug("batch saved", "batch_id", hex.EncodeToString(batch.ID), "batch_depth", batch.Depth, "batch_value", batch.Value.Int64(), "radius", s.radius.Load())
-
- return nil
- case err != nil:
- return fmt.Errorf("batchstore: save batch %s depth %d value %d failed: get batch: %w", hex.EncodeToString(batch.ID), batch.Depth, batch.Value.Int64(), err)
- }
-
- return fmt.Errorf("batchstore: save batch %s depth %d value %d failed: already exists", hex.EncodeToString(batch.ID), batch.Depth, batch.Value.Int64())
-}
-
-// Update is implementation of postage.Storer interface Update method.
-// This method has side effects; it also updates the radius of the node if successful.
-func (s *store) Update(batch *postage.Batch, value *big.Int, depth uint8) error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- oldBatch := &postage.Batch{}
-
- s.logger.Debug("update batch", "batch_id", hex.EncodeToString(batch.ID), "new_batch_depth", depth, "new_batch_value", value.Int64())
-
- switch err := s.store.Get(batchKey(batch.ID), oldBatch); {
- case errors.Is(err, storage.ErrNotFound):
- return ErrNotFound
- case err != nil:
- return fmt.Errorf("get batch %s: %w", hex.EncodeToString(batch.ID), err)
- }
-
- if err := s.store.Delete(valueKey(batch.Value, batch.ID)); err != nil {
- return err
- }
-
- batch.Value.Set(value)
- batch.Depth = depth
-
- err := s.store.Put(batchKey(batch.ID), batch)
- if err != nil {
- return err
- }
-
- err = s.saveBatch(batch)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// PutChainState is implementation of postage.Storer interface PutChainState method.
-// This method has side effects; it purges expired batches and unreserves underfunded
-// ones before it stores the chain state in the store.
-func (s *store) PutChainState(cs *postage.ChainState) error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- s.cs.Store(cs)
-
- s.logger.Debug("put chain state", "block", cs.Block, "amount", cs.TotalAmount.Int64(), "price", cs.CurrentPrice.Int64())
-
- err := s.cleanup()
- if err != nil {
- return fmt.Errorf("batchstore: put chain state clean up: %w", err)
- }
-
- err = s.computeRadius()
- if err != nil {
- return fmt.Errorf("batchstore: put chain state adjust radius: %w", err)
- }
-
- return s.store.Put(chainStateKey, cs)
-}
-
-func (s *store) Commitment() (uint64, error) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
-
- var totalCommitment int
- err := s.store.Iterate(batchKeyPrefix, func(key, value []byte) (bool, error) {
-
- b := &postage.Batch{}
- if err := b.UnmarshalBinary(value); err != nil {
- return false, err
- }
-
- totalCommitment += exp2(uint(b.Depth))
-
- return false, nil
- })
- if err != nil {
- return 0, err
- }
- return uint64(totalCommitment), err
-}
-
-// Reset is implementation of postage.Storer interface Reset method.
-func (s *store) Reset() error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- const prefix = "batchstore_"
- if err := s.store.Iterate(prefix, func(k, _ []byte) (bool, error) {
- return false, s.store.Delete(string(k))
- }); err != nil {
- return err
- }
-
- s.cs.Store(&postage.ChainState{
- Block: 0,
- TotalAmount: big.NewInt(0),
- CurrentPrice: big.NewInt(0),
- })
-
- s.radius = atomic.Uint32{}
-
- return nil
-}
-
-// saveBatch adds a new batch to the batchstore by creating a new value item, cleaning up
-// expired batches, and computing a new radius.
-// Must be called under lock.
-func (s *store) saveBatch(b *postage.Batch) error {
-
- if err := s.store.Put(valueKey(b.Value, b.ID), nil); err != nil {
- return fmt.Errorf("batchstore: allocate batch %x: %w", b.ID, err)
- }
-
- err := s.cleanup()
- if err != nil {
- return fmt.Errorf("batchstore: allocate batch cleanup %x: %w", b.ID, err)
- }
-
- err = s.computeRadius()
- if err != nil {
- return fmt.Errorf("batchstore: allocate batch adjust radius %x: %w", b.ID, err)
- }
-
- return nil
-}
-
-// cleanup evicts and removes expired batch.
-// Must be called under lock.
-func (s *store) cleanup() error {
-
- var evictions []*postage.Batch
-
- err := s.store.Iterate(valueKeyPrefix, func(key, value []byte) (stop bool, err error) {
-
- b, err := s.get(valueKeyToID(key))
- if err != nil {
- return false, err
- }
-
- // batches whose balance is below the total cumulative payout
- if b.Value.Cmp(s.cs.Load().TotalAmount) <= 0 {
- evictions = append(evictions, b)
- } else {
- return true, nil // stop early as an optimization at first value above the total cumulative payout
- }
-
- return false, nil
- })
- if err != nil {
- return err
- }
-
- for _, b := range evictions {
- s.logger.Debug("batch expired", "batch_id", hex.EncodeToString(b.ID))
- if s.batchExpiry != nil {
- err = s.batchExpiry.HandleStampExpiry(context.Background(), b.ID)
- if err != nil {
- return fmt.Errorf("handle stamp expiry, batch %x: %w", b.ID, err)
- }
- }
- err = s.evictFn(b.ID)
- if err != nil {
- return fmt.Errorf("evict batch %x: %w", b.ID, err)
- }
- err := s.store.Delete(valueKey(b.Value, b.ID))
- if err != nil {
- return fmt.Errorf("delete value key for batch %x: %w", b.ID, err)
- }
- err = s.store.Delete(batchKey(b.ID))
- if err != nil {
- return fmt.Errorf("delete batch %x: %w", b.ID, err)
- }
- }
-
- return nil
-}
-
// computeRadius calculates the radius by using the sum of all batch depths
// and the node capacity using the formula totalCommitment/node_capacity = 2^R.
// Must be called under lock.
@@ -381,30 +105,3 @@ func (s *store) computeRadius() error {
return s.store.Put(reserveRadiusKey, &radius)
}
-
-// exp2 returns the e-th power of 2
-func exp2(e uint) int {
- return 1 << e
-}
-
-// batchKey returns the index key for the batch ID used in the by-ID batch index.
-func batchKey(batchID []byte) string {
- return batchKeyPrefix + string(batchID)
-}
-
-// valueKey returns the index key for the batch ID used in the by-ID batch index.
-func valueKey(val *big.Int, batchID []byte) string {
- value := make([]byte, 32)
- val.FillBytes(value) // zero-extended big-endian byte slice
- return valueKeyPrefix + string(value) + string(batchID)
-}
-
-// valueKeyToID extracts the batch ID from a value key - used in value-based iteration.
-func valueKeyToID(key []byte) []byte {
- l := len(key)
- return key[l-32 : l]
-}
-
-func (s *store) SetBatchExpiryHandler(be postage.BatchExpiryHandler) {
- s.batchExpiry = be
-}
diff --git a/pkg/postage/batchstore/store_js.go b/pkg/postage/batchstore/store_js.go
new file mode 100644
index 00000000000..765625848a3
--- /dev/null
+++ b/pkg/postage/batchstore/store_js.go
@@ -0,0 +1,102 @@
+//go:build js
+// +build js
+
+package batchstore
+
+import (
+ "errors"
+ "math"
+ "math/big"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+)
+
+// store implements postage.Storer
+type store struct {
+ capacity int
+ store storage.StateStorer // State store backend to persist batches.
+
+ cs atomic.Pointer[postage.ChainState]
+
+ radius atomic.Uint32
+ evictFn evictFn // evict function
+ logger log.Logger
+
+ batchExpiry postage.BatchExpiryHandler
+
+ mtx sync.RWMutex
+}
+
+// New constructs a new postage batch store.
+// It initialises both chain state and reserve state from the persistent state store.
+func New(st storage.StateStorer, ev evictFn, capacity int, logger log.Logger) (postage.Storer, error) {
+ cs := &postage.ChainState{}
+ err := st.Get(chainStateKey, cs)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, err
+ }
+ cs = &postage.ChainState{
+ Block: 0,
+ TotalAmount: big.NewInt(0),
+ CurrentPrice: big.NewInt(0),
+ }
+ }
+ var radius uint8
+ err = st.Get(reserveRadiusKey, &radius)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, err
+ }
+ }
+
+ s := &store{
+ capacity: capacity,
+ store: st,
+ evictFn: ev,
+ logger: logger.WithName(loggerName).Register(),
+ }
+ s.cs.Store(cs)
+
+ s.radius.Store(uint32(radius))
+
+ return s, nil
+}
+
+// computeRadius calculates the radius by using the sum of all batch depths
+// and the node capacity using the formula totalCommitment/node_capacity = 2^R.
+// Must be called under lock.
+func (s *store) computeRadius() error {
+
+ var totalCommitment int
+
+ err := s.store.Iterate(batchKeyPrefix, func(key, value []byte) (bool, error) {
+
+ b := &postage.Batch{}
+ if err := b.UnmarshalBinary(value); err != nil {
+ return false, err
+ }
+
+ totalCommitment += exp2(uint(b.Depth))
+
+ return false, nil
+ })
+ if err != nil {
+ return err
+ }
+
+ var radius uint8
+ if totalCommitment > s.capacity {
+ // totalCommitment/node_capacity = 2^R
+ // log2(totalCommitment/node_capacity) = R
+ radius = uint8(math.Ceil(math.Log2(float64(totalCommitment) / float64(s.capacity))))
+ }
+
+ s.radius.Store(uint32(radius))
+
+ return s.store.Put(reserveRadiusKey, &radius)
+}
diff --git a/pkg/postage/batchstore/store_shared.go b/pkg/postage/batchstore/store_shared.go
new file mode 100644
index 00000000000..3d0276e7e84
--- /dev/null
+++ b/pkg/postage/batchstore/store_shared.go
@@ -0,0 +1,316 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package batchstore
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync/atomic"
+
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "batchstore"
+
+const (
+ batchKeyPrefix = "batchstore_batch_"
+ valueKeyPrefix = "batchstore_value_"
+ chainStateKey = "batchstore_chainstate"
+ reserveRadiusKey = "batchstore_radius"
+)
+
+// ErrNotFound signals that the element was not found.
+var ErrNotFound = errors.New("batchstore: not found")
+var ErrStorageRadiusExceeds = errors.New("batchstore: storage radius must not exceed reserve radius")
+
+type evictFn func(batchID []byte) error
+
+func (s *store) Radius() uint8 {
+ return uint8(s.radius.Load())
+}
+
+func (s *store) GetChainState() *postage.ChainState {
+ return s.cs.Load()
+}
+
+// Get returns a batch from the batchstore with the given ID.
+func (s *store) Get(id []byte) (*postage.Batch, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ return s.get(id)
+}
+
+func (s *store) get(id []byte) (*postage.Batch, error) {
+
+ b := &postage.Batch{}
+ err := s.store.Get(batchKey(id), b)
+ if err != nil {
+ return nil, fmt.Errorf("get batch %s: %w", hex.EncodeToString(id), err)
+ }
+ return b, nil
+}
+
+// Exists is implementation of postage.Storer interface Exists method.
+func (s *store) Exists(id []byte) (bool, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+
+ switch err := s.store.Get(batchKey(id), new(postage.Batch)); {
+ case err == nil:
+ return true, nil
+ case errors.Is(err, storage.ErrNotFound):
+ return false, nil
+ default:
+ return false, err
+ }
+}
+
+// Iterate is implementation of postage.Storer interface Iterate method.
+func (s *store) Iterate(cb func(*postage.Batch) (bool, error)) error {
+ return s.store.Iterate(batchKeyPrefix, func(key, value []byte) (bool, error) {
+ b := &postage.Batch{}
+ if err := b.UnmarshalBinary(value); err != nil {
+ return false, err
+ }
+ return cb(b)
+ })
+}
+
+// Save is implementation of postage.Storer interface Save method.
+// This method has side effects; it also updates the radius of the node if successful.
+func (s *store) Save(batch *postage.Batch) error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ switch err := s.store.Get(batchKey(batch.ID), new(postage.Batch)); {
+ case errors.Is(err, storage.ErrNotFound):
+ if err := s.store.Put(batchKey(batch.ID), batch); err != nil {
+ return err
+ }
+
+ if err := s.saveBatch(batch); err != nil {
+ return err
+ }
+
+ s.logger.Debug("batch saved", "batch_id", hex.EncodeToString(batch.ID), "batch_depth", batch.Depth, "batch_value", batch.Value.Int64(), "radius", s.radius.Load())
+
+ return nil
+ case err != nil:
+ return fmt.Errorf("batchstore: save batch %s depth %d value %d failed: get batch: %w", hex.EncodeToString(batch.ID), batch.Depth, batch.Value.Int64(), err)
+ }
+
+ return fmt.Errorf("batchstore: save batch %s depth %d value %d failed: already exists", hex.EncodeToString(batch.ID), batch.Depth, batch.Value.Int64())
+}
+
+// Update is implementation of postage.Storer interface Update method.
+// This method has side effects; it also updates the radius of the node if successful.
+func (s *store) Update(batch *postage.Batch, value *big.Int, depth uint8) error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ oldBatch := &postage.Batch{}
+
+ s.logger.Debug("update batch", "batch_id", hex.EncodeToString(batch.ID), "new_batch_depth", depth, "new_batch_value", value.Int64())
+
+ switch err := s.store.Get(batchKey(batch.ID), oldBatch); {
+ case errors.Is(err, storage.ErrNotFound):
+ return ErrNotFound
+ case err != nil:
+ return fmt.Errorf("get batch %s: %w", hex.EncodeToString(batch.ID), err)
+ }
+
+ if err := s.store.Delete(valueKey(batch.Value, batch.ID)); err != nil {
+ return err
+ }
+
+ batch.Value.Set(value)
+ batch.Depth = depth
+
+ err := s.store.Put(batchKey(batch.ID), batch)
+ if err != nil {
+ return err
+ }
+
+ err = s.saveBatch(batch)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// PutChainState is implementation of postage.Storer interface PutChainState method.
+// This method has side effects; it purges expired batches and unreserves underfunded
+// ones before it stores the chain state in the store.
+func (s *store) PutChainState(cs *postage.ChainState) error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ s.cs.Store(cs)
+
+ s.logger.Debug("put chain state", "block", cs.Block, "amount", cs.TotalAmount.Int64(), "price", cs.CurrentPrice.Int64())
+
+ err := s.cleanup()
+ if err != nil {
+ return fmt.Errorf("batchstore: put chain state clean up: %w", err)
+ }
+
+ err = s.computeRadius()
+ if err != nil {
+ return fmt.Errorf("batchstore: put chain state adjust radius: %w", err)
+ }
+
+ return s.store.Put(chainStateKey, cs)
+}
+
+func (s *store) Commitment() (uint64, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+
+ var totalCommitment int
+ err := s.store.Iterate(batchKeyPrefix, func(key, value []byte) (bool, error) {
+
+ b := &postage.Batch{}
+ if err := b.UnmarshalBinary(value); err != nil {
+ return false, err
+ }
+
+ totalCommitment += exp2(uint(b.Depth))
+
+ return false, nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ return uint64(totalCommitment), err
+}
+
+// Reset is implementation of postage.Storer interface Reset method.
+func (s *store) Reset() error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ const prefix = "batchstore_"
+ if err := s.store.Iterate(prefix, func(k, _ []byte) (bool, error) {
+ return false, s.store.Delete(string(k))
+ }); err != nil {
+ return err
+ }
+
+ s.cs.Store(&postage.ChainState{
+ Block: 0,
+ TotalAmount: big.NewInt(0),
+ CurrentPrice: big.NewInt(0),
+ })
+
+ s.radius = atomic.Uint32{}
+
+ return nil
+}
+
+// saveBatch adds a new batch to the batchstore by creating a new value item, cleaning up
+// expired batches, and computing a new radius.
+// Must be called under lock.
+func (s *store) saveBatch(b *postage.Batch) error {
+
+ if err := s.store.Put(valueKey(b.Value, b.ID), nil); err != nil {
+ return fmt.Errorf("batchstore: allocate batch %x: %w", b.ID, err)
+ }
+
+ err := s.cleanup()
+ if err != nil {
+ return fmt.Errorf("batchstore: allocate batch cleanup %x: %w", b.ID, err)
+ }
+
+ err = s.computeRadius()
+ if err != nil {
+ return fmt.Errorf("batchstore: allocate batch adjust radius %x: %w", b.ID, err)
+ }
+
+ return nil
+}
+
+// cleanup evicts and removes expired batch.
+// Must be called under lock.
+func (s *store) cleanup() error {
+
+ var evictions []*postage.Batch
+
+ err := s.store.Iterate(valueKeyPrefix, func(key, value []byte) (stop bool, err error) {
+
+ b, err := s.get(valueKeyToID(key))
+ if err != nil {
+ return false, err
+ }
+
+ // batches whose balance is below the total cumulative payout
+ if b.Value.Cmp(s.cs.Load().TotalAmount) <= 0 {
+ evictions = append(evictions, b)
+ } else {
+ return true, nil // stop early as an optimization at first value above the total cumulative payout
+ }
+
+ return false, nil
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, b := range evictions {
+ s.logger.Debug("batch expired", "batch_id", hex.EncodeToString(b.ID))
+ if s.batchExpiry != nil {
+ err = s.batchExpiry.HandleStampExpiry(context.Background(), b.ID)
+ if err != nil {
+ return fmt.Errorf("handle stamp expiry, batch %x: %w", b.ID, err)
+ }
+ }
+ err = s.evictFn(b.ID)
+ if err != nil {
+ return fmt.Errorf("evict batch %x: %w", b.ID, err)
+ }
+ err := s.store.Delete(valueKey(b.Value, b.ID))
+ if err != nil {
+ return fmt.Errorf("delete value key for batch %x: %w", b.ID, err)
+ }
+ err = s.store.Delete(batchKey(b.ID))
+ if err != nil {
+ return fmt.Errorf("delete batch %x: %w", b.ID, err)
+ }
+ }
+
+ return nil
+}
+
+// exp2 returns the e-th power of 2
+func exp2(e uint) int {
+ return 1 << e
+}
+
+// batchKey returns the index key for the batch ID used in the by-ID batch index.
+func batchKey(batchID []byte) string {
+ return batchKeyPrefix + string(batchID)
+}
+
+// valueKey returns the index key for the batch ID used in the by-ID batch index.
+func valueKey(val *big.Int, batchID []byte) string {
+ value := make([]byte, 32)
+ val.FillBytes(value) // zero-extended big-endian byte slice
+ return valueKeyPrefix + string(value) + string(batchID)
+}
+
+// valueKeyToID extracts the batch ID from a value key - used in value-based iteration.
+func valueKeyToID(key []byte) []byte {
+ l := len(key)
+ return key[l-32 : l]
+}
+
+func (s *store) SetBatchExpiryHandler(be postage.BatchExpiryHandler) {
+ s.batchExpiry = be
+}
diff --git a/pkg/postage/listener/listener.go b/pkg/postage/listener/listener.go
index 9750c09b7e3..ce07fefefc5 100644
--- a/pkg/postage/listener/listener.go
+++ b/pkg/postage/listener/listener.go
@@ -1,6 +1,5 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package listener
@@ -12,7 +11,6 @@ import (
"sync"
"time"
- "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -21,33 +19,8 @@ import (
"github.com/ethersphere/bee/v2/pkg/postage/batchservice"
"github.com/ethersphere/bee/v2/pkg/transaction"
"github.com/ethersphere/bee/v2/pkg/util/syncutil"
- "github.com/prometheus/client_golang/prometheus"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "listener"
-
-const (
- blockPage = 5000 // how many blocks to sync every time we page
- tailSize = 4 // how many blocks to tail from the tip of the chain
- defaultBatchFactor = uint64(5) // // minimal number of blocks to sync at once
-)
-
-var (
- // for testing, set externally
- batchFactorOverridePublic = "5"
-)
-
-var (
- ErrPostageSyncingStalled = errors.New("postage syncing stalled")
- ErrPostagePaused = errors.New("postage contract is paused")
-)
-
-type BlockHeightContractFilterer interface {
- FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
- BlockNumber(context.Context) (uint64, error)
-}
-
type listener struct {
logger log.Logger
ev BlockHeightContractFilterer
@@ -100,25 +73,6 @@ func New(
}
}
-func (l *listener) filterQuery(from, to *big.Int) ethereum.FilterQuery {
- return ethereum.FilterQuery{
- FromBlock: from,
- ToBlock: to,
- Addresses: []common.Address{
- l.postageStampContractAddress,
- },
- Topics: [][]common.Hash{
- {
- l.batchCreatedTopic,
- l.batchTopUpTopic,
- l.batchDepthIncreaseTopic,
- l.priceUpdateTopic,
- l.pausedTopic,
- },
- },
- }
-}
-
func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error {
defer l.metrics.EventsProcessed.Inc()
switch e.Topics[0] {
@@ -363,51 +317,3 @@ func (l *listener) Listen(ctx context.Context, from uint64, updater postage.Even
return synced
}
-
-func (l *listener) Close() error {
- close(l.quit)
-
- done := make(chan struct{})
- go func() {
- defer close(done)
- l.wg.Wait()
- }()
-
- select {
- case <-done:
- case <-time.After(5 * time.Second):
- return errors.New("postage listener closed with running goroutines")
- }
- return nil
-}
-
-type batchCreatedEvent struct {
- BatchId [32]byte
- TotalAmount *big.Int
- NormalisedBalance *big.Int
- Owner common.Address
- Depth uint8
- BucketDepth uint8
- ImmutableFlag bool
-}
-
-type batchTopUpEvent struct {
- BatchId [32]byte
- TopupAmount *big.Int
- NormalisedBalance *big.Int
-}
-
-type batchDepthIncreaseEvent struct {
- BatchId [32]byte
- NewDepth uint8
- NormalisedBalance *big.Int
-}
-
-type priceUpdateEvent struct {
- Price *big.Int
-}
-
-func totalTimeMetric(metric prometheus.Counter, start time.Time) {
- totalTime := time.Since(start)
- metric.Add(float64(totalTime))
-}
diff --git a/pkg/postage/listener/listener_js.go b/pkg/postage/listener/listener_js.go
new file mode 100644
index 00000000000..17225216852
--- /dev/null
+++ b/pkg/postage/listener/listener_js.go
@@ -0,0 +1,303 @@
+//go:build js
+// +build js
+
+package listener
+
+import (
+ "context"
+ "errors"
+ "math/big"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/postage/batchservice"
+ "github.com/ethersphere/bee/v2/pkg/transaction"
+ "github.com/ethersphere/bee/v2/pkg/util/syncutil"
+)
+
+type listener struct {
+ logger log.Logger
+ ev BlockHeightContractFilterer
+ blockTime time.Duration
+
+ postageStampContractAddress common.Address
+ postageStampContractABI abi.ABI
+ quit chan struct{}
+ wg sync.WaitGroup
+ stallingTimeout time.Duration
+ backoffTime time.Duration
+ syncingStopped *syncutil.Signaler
+
+ // Cached postage stamp contract event topics.
+ batchCreatedTopic common.Hash
+ batchTopUpTopic common.Hash
+ batchDepthIncreaseTopic common.Hash
+ priceUpdateTopic common.Hash
+ pausedTopic common.Hash
+}
+
+func New(
+ syncingStopped *syncutil.Signaler,
+ logger log.Logger,
+ ev BlockHeightContractFilterer,
+ postageStampContractAddress common.Address,
+ postageStampContractABI abi.ABI,
+ blockTime time.Duration,
+ stallingTimeout time.Duration,
+ backoffTime time.Duration,
+) postage.Listener {
+ return &listener{
+ syncingStopped: syncingStopped,
+ logger: logger.WithName(loggerName).Register(),
+ ev: ev,
+ blockTime: blockTime,
+ postageStampContractAddress: postageStampContractAddress,
+ postageStampContractABI: postageStampContractABI,
+ quit: make(chan struct{}),
+ stallingTimeout: stallingTimeout,
+ backoffTime: backoffTime,
+
+ batchCreatedTopic: postageStampContractABI.Events["BatchCreated"].ID,
+ batchTopUpTopic: postageStampContractABI.Events["BatchTopUp"].ID,
+ batchDepthIncreaseTopic: postageStampContractABI.Events["BatchDepthIncrease"].ID,
+ priceUpdateTopic: postageStampContractABI.Events["PriceUpdate"].ID,
+ pausedTopic: postageStampContractABI.Events["Paused"].ID,
+ }
+}
+
+func (l *listener) processEvent(e types.Log, updater postage.EventUpdater) error {
+ switch e.Topics[0] {
+ case l.batchCreatedTopic:
+ c := &batchCreatedEvent{}
+ err := transaction.ParseEvent(&l.postageStampContractABI, "BatchCreated", c, e)
+ if err != nil {
+ return err
+ }
+ return updater.Create(
+ c.BatchId[:],
+ c.Owner.Bytes(),
+ c.TotalAmount,
+ c.NormalisedBalance,
+ c.Depth,
+ c.BucketDepth,
+ c.ImmutableFlag,
+ e.TxHash,
+ )
+ case l.batchTopUpTopic:
+ c := &batchTopUpEvent{}
+ err := transaction.ParseEvent(&l.postageStampContractABI, "BatchTopUp", c, e)
+ if err != nil {
+ return err
+ }
+ return updater.TopUp(
+ c.BatchId[:],
+ c.TopupAmount,
+ c.NormalisedBalance,
+ e.TxHash,
+ )
+ case l.batchDepthIncreaseTopic:
+ c := &batchDepthIncreaseEvent{}
+ err := transaction.ParseEvent(&l.postageStampContractABI, "BatchDepthIncrease", c, e)
+ if err != nil {
+ return err
+ }
+
+ return updater.UpdateDepth(
+ c.BatchId[:],
+ c.NewDepth,
+ c.NormalisedBalance,
+ e.TxHash,
+ )
+ case l.priceUpdateTopic:
+ c := &priceUpdateEvent{}
+ err := transaction.ParseEvent(&l.postageStampContractABI, "PriceUpdate", c, e)
+ if err != nil {
+ return err
+ }
+ return updater.UpdatePrice(
+ c.Price,
+ e.TxHash,
+ )
+ case l.pausedTopic:
+ l.logger.Warning("Postage contract is paused.")
+ return ErrPostagePaused
+ default:
+ return errors.New("unknown event")
+ }
+}
+
+func (l *listener) Listen(ctx context.Context, from uint64, updater postage.EventUpdater, initState *postage.ChainSnapshot) <-chan error {
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ <-l.quit
+ cancel()
+ }()
+
+ processEvents := func(events []types.Log, to uint64) error {
+ if err := updater.TransactionStart(); err != nil {
+ return err
+ }
+
+ for _, e := range events {
+ err := updater.UpdateBlockNumber(e.BlockNumber)
+ if err != nil {
+ return err
+ }
+ if err = l.processEvent(e, updater); err != nil {
+ // if we have a zero value batch - silence & log then move on
+ if !errors.Is(err, batchservice.ErrZeroValueBatch) {
+ return err
+ }
+ l.logger.Debug("failed processing event", "error", err)
+ }
+ }
+
+ err := updater.UpdateBlockNumber(to)
+ if err != nil {
+ return err
+ }
+
+ if err := updater.TransactionEnd(); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ if initState != nil {
+ err := processEvents(initState.Events, initState.LastBlockNumber+1)
+ if err != nil {
+ l.logger.Error(err, "failed bootstrapping from initial state")
+ }
+ }
+
+ batchFactor, err := strconv.ParseUint(batchFactorOverridePublic, 10, 64)
+ if err != nil {
+ l.logger.Warning("batch factor conversation failed", "batch_factor", batchFactor, "error", err)
+ batchFactor = defaultBatchFactor
+ }
+
+ l.logger.Debug("batch factor", "value", batchFactor)
+
+ synced := make(chan error)
+ closeOnce := new(sync.Once)
+ paged := true
+
+ lastProgress := time.Now()
+ lastConfirmedBlock := uint64(0)
+
+ l.wg.Add(1)
+ listenf := func() error {
+ defer l.wg.Done()
+ for {
+ // if for whatever reason we are stuck for too long we terminate
+ // this can happen because of rpc errors but also because of a stalled backend node
+ // this does not catch the case were a backend node is actively syncing but not caught up
+ if time.Since(lastProgress) >= l.stallingTimeout {
+ return ErrPostageSyncingStalled
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ // if we have a last blocknumber from the backend we can make a good estimate on when we need to requery
+ // otherwise we just use the backoff time
+ var expectedWaitTime time.Duration
+ if lastConfirmedBlock != 0 {
+ nextExpectedBatchBlock := (lastConfirmedBlock/batchFactor + 1) * batchFactor
+ remainingBlocks := nextExpectedBatchBlock - lastConfirmedBlock
+ expectedWaitTime = l.blockTime * time.Duration(remainingBlocks)
+ } else {
+ expectedWaitTime = l.backoffTime
+ }
+
+ if !paged {
+ l.logger.Debug("sleeping until next block batch", "duration", expectedWaitTime)
+ select {
+ case <-time.After(expectedWaitTime):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ paged = false
+
+ to, err := l.ev.BlockNumber(ctx)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ return nil
+ }
+
+ l.logger.Warning("could not get block number", "error", err)
+ lastConfirmedBlock = 0
+ continue
+ }
+
+ if to < tailSize {
+ // in a test blockchain there might be not be enough blocks yet
+ continue
+ }
+
+ // consider to-tailSize as the "latest" block we need to sync to
+ to = to - tailSize
+ lastConfirmedBlock = to
+
+ // round down to the largest multiple of batchFactor
+ to = (to / batchFactor) * batchFactor
+
+ if to < from {
+ // if the blockNumber is actually less than what we already, it might mean the backend is not synced or some reorg scenario
+ continue
+ }
+
+ // do some paging (sub-optimal)
+ if to-from >= blockPage {
+ paged = true
+ to = from + blockPage - 1
+ } else {
+ closeOnce.Do(func() { synced <- nil })
+ }
+
+ events, err := l.ev.FilterLogs(ctx, l.filterQuery(big.NewInt(int64(from)), big.NewInt(int64(to))))
+ if err != nil {
+ l.logger.Warning("could not get blockchain log", "error", err)
+ lastConfirmedBlock = 0
+ continue
+ }
+
+ if err := processEvents(events, to); err != nil {
+ return err
+ }
+
+ from = to + 1
+ lastProgress = time.Now()
+
+ }
+ }
+
+ go func() {
+ err := listenf()
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ // Context cancelled is returned on shutdown, therefore we do nothing here.
+ l.logger.Debug("shutting down event listener")
+ return
+ }
+ l.logger.Error(err, "failed syncing event listener; shutting down node error")
+ }
+ closeOnce.Do(func() { synced <- err })
+ if l.syncingStopped != nil {
+ l.syncingStopped.Signal() // trigger shutdown in start.go
+ }
+ }()
+
+ return synced
+}
diff --git a/pkg/postage/listener/listener_shared.go b/pkg/postage/listener/listener_shared.go
new file mode 100644
index 00000000000..9c19cd486f3
--- /dev/null
+++ b/pkg/postage/listener/listener_shared.go
@@ -0,0 +1,108 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package listener
+
+import (
+ "context"
+ "errors"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "listener"
+
+const (
+ blockPage = 5000 // how many blocks to sync every time we page
+ tailSize = 4 // how many blocks to tail from the tip of the chain
+ defaultBatchFactor = uint64(5) // // minimal number of blocks to sync at once
+)
+
+var (
+ // for testing, set externally
+ batchFactorOverridePublic = "5"
+)
+
+var (
+ ErrPostageSyncingStalled = errors.New("postage syncing stalled")
+ ErrPostagePaused = errors.New("postage contract is paused")
+)
+
+type BlockHeightContractFilterer interface {
+ FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
+ BlockNumber(context.Context) (uint64, error)
+}
+
+func (l *listener) filterQuery(from, to *big.Int) ethereum.FilterQuery {
+ return ethereum.FilterQuery{
+ FromBlock: from,
+ ToBlock: to,
+ Addresses: []common.Address{
+ l.postageStampContractAddress,
+ },
+ Topics: [][]common.Hash{
+ {
+ l.batchCreatedTopic,
+ l.batchTopUpTopic,
+ l.batchDepthIncreaseTopic,
+ l.priceUpdateTopic,
+ l.pausedTopic,
+ },
+ },
+ }
+}
+
+func (l *listener) Close() error {
+ close(l.quit)
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ l.wg.Wait()
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(5 * time.Second):
+ return errors.New("postage listener closed with running goroutines")
+ }
+ return nil
+}
+
+type batchCreatedEvent struct {
+ BatchId [32]byte
+ TotalAmount *big.Int
+ NormalisedBalance *big.Int
+ Owner common.Address
+ Depth uint8
+ BucketDepth uint8
+ ImmutableFlag bool
+}
+
+type batchTopUpEvent struct {
+ BatchId [32]byte
+ TopupAmount *big.Int
+ NormalisedBalance *big.Int
+}
+
+type batchDepthIncreaseEvent struct {
+ BatchId [32]byte
+ NewDepth uint8
+ NormalisedBalance *big.Int
+}
+
+type priceUpdateEvent struct {
+ Price *big.Int
+}
+
+func totalTimeMetric(metric prometheus.Counter, start time.Time) {
+ totalTime := time.Since(start)
+ metric.Add(float64(totalTime))
+}
diff --git a/pkg/postage/listener/metrics.go b/pkg/postage/listener/metrics.go
index 8fb7b639884..8d725de32d0 100644
--- a/pkg/postage/listener/metrics.go
+++ b/pkg/postage/listener/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/postage/postagecontract/contract.go b/pkg/postage/postagecontract/contract.go
index d9645822166..b0e5d06908a 100644
--- a/pkg/postage/postagecontract/contract.go
+++ b/pkg/postage/postagecontract/contract.go
@@ -24,7 +24,7 @@ import (
var (
BucketDepth = uint8(16)
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_9)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
ErrBatchCreate = errors.New("batch creation failed")
ErrInsufficientFunds = errors.New("insufficient token balance")
diff --git a/pkg/pss/metrics.go b/pkg/pss/metrics.go
index 108f516215f..d25d655fe1e 100644
--- a/pkg/pss/metrics.go
+++ b/pkg/pss/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/pss/pss.go b/pkg/pss/pss.go
index 454f5960129..da67b7bf616 100644
--- a/pkg/pss/pss.go
+++ b/pkg/pss/pss.go
@@ -1,53 +1,21 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package pss exposes functionalities needed to communicate
-// with other peers on the network. Pss uses pushsync and
-// pullsync for message delivery and mailboxing. All messages are disguised as content-addressed chunks. Sending and
-// receiving of messages is exposed over the HTTP API, with
-// websocket subscriptions for incoming messages.
package pss
import (
"context"
"crypto/ecdsa"
"errors"
- "io"
"sync"
"time"
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/pushsync"
- "github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/topology"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "pss"
-
-var (
- _ Interface = (*pss)(nil)
- ErrNoHandler = errors.New("no handler found")
-)
-
-type Sender interface {
- // Send arbitrary byte slice with the given topic to Targets.
- Send(context.Context, Topic, []byte, postage.Stamper, *ecdsa.PublicKey, Targets) error
-}
-
-type Interface interface {
- Sender
- // Register a Handler for a given Topic.
- Register(Topic, Handler) func()
- // TryUnwrap tries to unwrap a wrapped trojan message.
- TryUnwrap(swarm.Chunk)
-
- SetPushSyncer(pushSyncer pushsync.PushSyncer)
- io.Closer
-}
-
type pss struct {
key *ecdsa.PrivateKey
pusher pushsync.PushSyncer
@@ -69,23 +37,6 @@ func New(key *ecdsa.PrivateKey, logger log.Logger) Interface {
}
}
-func (ps *pss) Close() error {
- close(ps.quit)
- ps.handlersMu.Lock()
- defer ps.handlersMu.Unlock()
-
- ps.handlers = make(map[Topic][]*Handler) //unset handlers on shutdown
-
- return nil
-}
-
-func (ps *pss) SetPushSyncer(pushSyncer pushsync.PushSyncer) {
- ps.pusher = pushSyncer
-}
-
-// Handler defines code to be executed upon reception of a trojan message.
-type Handler func(context.Context, []byte)
-
// Send constructs a padded message with topic and payload,
// wraps it in a trojan chunk such that one of the targets is a prefix of the chunk address.
// Uses push-sync to deliver message.
@@ -117,81 +68,3 @@ func (p *pss) Send(ctx context.Context, topic Topic, payload []byte, stamper pos
return nil
}
-
-// Register allows the definition of a Handler func for a specific topic on the pss struct.
-func (p *pss) Register(topic Topic, handler Handler) (cleanup func()) {
- p.handlersMu.Lock()
- defer p.handlersMu.Unlock()
-
- p.handlers[topic] = append(p.handlers[topic], &handler)
-
- return func() {
- p.handlersMu.Lock()
- defer p.handlersMu.Unlock()
-
- h := p.handlers[topic]
- for i := 0; i < len(h); i++ {
- if h[i] == &handler {
- p.handlers[topic] = append(h[:i], h[i+1:]...)
- return
- }
- }
- }
-}
-
-func (p *pss) topics() []Topic {
- p.handlersMu.Lock()
- defer p.handlersMu.Unlock()
-
- ts := make([]Topic, 0, len(p.handlers))
- for t := range p.handlers {
- ts = append(ts, t)
- }
-
- return ts
-}
-
-// TryUnwrap allows unwrapping a chunk as a trojan message and calling its handlers based on the topic.
-func (p *pss) TryUnwrap(c swarm.Chunk) {
- if len(c.Data()) < swarm.ChunkWithSpanSize {
- return // chunk not full
- }
- ctx := context.Background()
- topic, msg, err := Unwrap(ctx, p.key, c, p.topics())
- if err != nil {
- return // cannot unwrap
- }
- h := p.getHandlers(topic)
- if h == nil {
- return // no handler
- }
-
- ctx, cancel := context.WithCancel(ctx)
- done := make(chan struct{})
- var wg sync.WaitGroup
- go func() {
- defer cancel()
- select {
- case <-p.quit:
- case <-done:
- }
- }()
- for _, hh := range h {
- wg.Add(1)
- go func(hh Handler) {
- defer wg.Done()
- hh(ctx, msg)
- }(*hh)
- }
- go func() {
- wg.Wait()
- close(done)
- }()
-}
-
-func (p *pss) getHandlers(topic Topic) []*Handler {
- p.handlersMu.Lock()
- defer p.handlersMu.Unlock()
-
- return p.handlers[topic]
-}
diff --git a/pkg/pss/pss_js.go b/pkg/pss/pss_js.go
new file mode 100644
index 00000000000..ffe53a50689
--- /dev/null
+++ b/pkg/pss/pss_js.go
@@ -0,0 +1,62 @@
+//go:build js
+// +build js
+
+package pss
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "sync"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/pushsync"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+)
+
+type pss struct {
+ key *ecdsa.PrivateKey
+ pusher pushsync.PushSyncer
+ handlers map[Topic][]*Handler
+ handlersMu sync.Mutex
+ logger log.Logger
+ quit chan struct{}
+}
+
+// New returns a new pss service.
+func New(key *ecdsa.PrivateKey, logger log.Logger) Interface {
+ return &pss{
+ key: key,
+ logger: logger.WithName(loggerName).Register(),
+ handlers: make(map[Topic][]*Handler),
+ quit: make(chan struct{}),
+ }
+}
+
+// Send constructs a padded message with topic and payload,
+// wraps it in a trojan chunk such that one of the targets is a prefix of the chunk address.
+// Uses push-sync to deliver message.
+func (p *pss) Send(ctx context.Context, topic Topic, payload []byte, stamper postage.Stamper, recipient *ecdsa.PublicKey, targets Targets) error {
+
+ tc, err := Wrap(ctx, topic, payload, recipient, targets)
+ if err != nil {
+ return err
+ }
+
+ stamp, err := stamper.Stamp(tc.Address(), tc.Address())
+ if err != nil {
+ return err
+ }
+ tc = tc.WithStamp(stamp)
+
+ // push the chunk using push sync so that it reaches it destination in network
+ if _, err = p.pusher.PushChunkToClosest(ctx, tc); err != nil {
+ if errors.Is(err, topology.ErrWantSelf) {
+ return nil
+ }
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/pss/pss_shared.go b/pkg/pss/pss_shared.go
new file mode 100644
index 00000000000..5594efab71c
--- /dev/null
+++ b/pkg/pss/pss_shared.go
@@ -0,0 +1,141 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pss exposes functionalities needed to communicate
+// with other peers on the network. Pss uses pushsync and
+// pullsync for message delivery and mailboxing. All messages are disguised as content-addressed chunks. Sending and
+// receiving of messages is exposed over the HTTP API, with
+// websocket subscriptions for incoming messages.
+package pss
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "io"
+ "sync"
+
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/pushsync"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "pss"
+
+var (
+ _ Interface = (*pss)(nil)
+ ErrNoHandler = errors.New("no handler found")
+)
+
+type Sender interface {
+ // Send arbitrary byte slice with the given topic to Targets.
+ Send(context.Context, Topic, []byte, postage.Stamper, *ecdsa.PublicKey, Targets) error
+}
+
+type Interface interface {
+ Sender
+ // Register a Handler for a given Topic.
+ Register(Topic, Handler) func()
+ // TryUnwrap tries to unwrap a wrapped trojan message.
+ TryUnwrap(swarm.Chunk)
+
+ SetPushSyncer(pushSyncer pushsync.PushSyncer)
+ io.Closer
+}
+
+func (ps *pss) Close() error {
+ close(ps.quit)
+ ps.handlersMu.Lock()
+ defer ps.handlersMu.Unlock()
+
+ ps.handlers = make(map[Topic][]*Handler) //unset handlers on shutdown
+
+ return nil
+}
+
+func (ps *pss) SetPushSyncer(pushSyncer pushsync.PushSyncer) {
+ ps.pusher = pushSyncer
+}
+
+// Handler defines code to be executed upon reception of a trojan message.
+type Handler func(context.Context, []byte)
+
+// Register allows the definition of a Handler func for a specific topic on the pss struct.
+func (p *pss) Register(topic Topic, handler Handler) (cleanup func()) {
+ p.handlersMu.Lock()
+ defer p.handlersMu.Unlock()
+
+ p.handlers[topic] = append(p.handlers[topic], &handler)
+
+ return func() {
+ p.handlersMu.Lock()
+ defer p.handlersMu.Unlock()
+
+ h := p.handlers[topic]
+ for i := 0; i < len(h); i++ {
+ if h[i] == &handler {
+ p.handlers[topic] = append(h[:i], h[i+1:]...)
+ return
+ }
+ }
+ }
+}
+
+func (p *pss) topics() []Topic {
+ p.handlersMu.Lock()
+ defer p.handlersMu.Unlock()
+
+ ts := make([]Topic, 0, len(p.handlers))
+ for t := range p.handlers {
+ ts = append(ts, t)
+ }
+
+ return ts
+}
+
+// TryUnwrap allows unwrapping a chunk as a trojan message and calling its handlers based on the topic.
+func (p *pss) TryUnwrap(c swarm.Chunk) {
+ if len(c.Data()) < swarm.ChunkWithSpanSize {
+ return // chunk not full
+ }
+ ctx := context.Background()
+ topic, msg, err := Unwrap(ctx, p.key, c, p.topics())
+ if err != nil {
+ return // cannot unwrap
+ }
+ h := p.getHandlers(topic)
+ if h == nil {
+ return // no handler
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ done := make(chan struct{})
+ var wg sync.WaitGroup
+ go func() {
+ defer cancel()
+ select {
+ case <-p.quit:
+ case <-done:
+ }
+ }()
+ for _, hh := range h {
+ wg.Add(1)
+ go func(hh Handler) {
+ defer wg.Done()
+ hh(ctx, msg)
+ }(*hh)
+ }
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+}
+
+func (p *pss) getHandlers(topic Topic) []*Handler {
+ p.handlersMu.Lock()
+ defer p.handlersMu.Unlock()
+
+ return p.handlers[topic]
+}
diff --git a/pkg/puller/metrics.go b/pkg/puller/metrics.go
index bfa546f26e0..404f78eb430 100644
--- a/pkg/puller/metrics.go
+++ b/pkg/puller/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/puller/puller.go b/pkg/puller/puller.go
index 2fdbc24e9cf..fb5f1b5cb29 100644
--- a/pkg/puller/puller.go
+++ b/pkg/puller/puller.go
@@ -1,24 +1,17 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package puller provides protocol-orchestrating functionality
-// over the pullsync protocol. It pulls chunks from other nodes
-// and reacts to changes in network configuration.
package puller
import (
"context"
"errors"
- "fmt"
- "maps"
"math"
"sync"
"time"
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/p2p"
- "github.com/ethersphere/bee/v2/pkg/puller/intervalstore"
"github.com/ethersphere/bee/v2/pkg/pullsync"
"github.com/ethersphere/bee/v2/pkg/rate"
"github.com/ethersphere/bee/v2/pkg/storage"
@@ -28,26 +21,6 @@ import (
ratelimit "golang.org/x/time/rate"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "puller"
-
-var errCursorsLength = errors.New("cursors length mismatch")
-
-const (
- DefaultHistRateWindow = time.Minute * 15
-
- IntervalPrefix = "sync_interval"
- recalcPeersDur = time.Minute * 5
-
- maxChunksPerSecond = 1000 // roughly 4 MB/s
-
- maxPODelta = 2 // the lowest level of proximity order (of peers) subtracted from the storage radius allowed for chunk syncing.
-)
-
-type Options struct {
- Bins uint8
-}
-
type Puller struct {
base swarm.Address
@@ -110,193 +83,6 @@ func New(
return p
}
-func (p *Puller) Start(ctx context.Context) {
- p.start.Do(func() {
- cctx, cancel := context.WithCancel(ctx)
- p.cancel = cancel
-
- p.wg.Add(1)
- go p.manage(cctx)
- })
-}
-
-func (p *Puller) SyncRate() float64 {
- return p.rate.Rate()
-}
-
-func (p *Puller) manage(ctx context.Context) {
- defer p.wg.Done()
-
- c, unsubscribe := p.topology.SubscribeTopologyChange()
- defer unsubscribe()
-
- p.logger.Info("warmup period complete, starting worker")
-
- var prevRadius uint8
-
- onChange := func() {
- p.syncPeersMtx.Lock()
- defer p.syncPeersMtx.Unlock()
-
- newRadius := p.radius.StorageRadius()
-
- // reset all intervals below the new radius to resync:
- // 1. previously evicted chunks
- // 2. previously ignored chunks due to a higher radius
- if newRadius < prevRadius {
- for _, peer := range p.syncPeers {
- p.disconnectPeer(peer.address)
- }
- if err := p.resetIntervals(prevRadius); err != nil {
- p.logger.Debug("reset lower sync radius failed", "error", err)
- }
- p.logger.Debug("radius decrease", "old_radius", prevRadius, "new_radius", newRadius)
- }
- prevRadius = newRadius
-
- // peersDisconnected is used to mark and prune peers that are no longer connected.
- peersDisconnected := maps.Clone(p.syncPeers)
-
- _ = p.topology.EachConnectedPeerRev(func(addr swarm.Address, po uint8) (stop, jumpToNext bool, err error) {
- if _, ok := p.syncPeers[addr.ByteString()]; !ok {
- p.syncPeers[addr.ByteString()] = newSyncPeer(addr, p.bins, po)
- }
- delete(peersDisconnected, addr.ByteString())
- return false, false, nil
- }, topology.Select{})
-
- for _, peer := range peersDisconnected {
- p.disconnectPeer(peer.address)
- }
-
- p.recalcPeers(ctx, newRadius)
- }
-
- tick := time.NewTicker(recalcPeersDur)
- defer tick.Stop()
-
- for {
-
- onChange()
-
- select {
- case <-ctx.Done():
- return
- case <-tick.C:
- case <-c:
- }
- }
-}
-
-// disconnectPeer cancels all existing syncing and removes the peer entry from the syncing map.
-// Must be called under lock.
-func (p *Puller) disconnectPeer(addr swarm.Address) {
- loggerV2 := p.logger.V(2).Register()
-
- loggerV2.Debug("disconnecting peer", "peer_address", addr)
- if peer, ok := p.syncPeers[addr.ByteString()]; ok {
- peer.mtx.Lock()
- peer.stop()
- peer.mtx.Unlock()
- }
- delete(p.syncPeers, addr.ByteString())
-}
-
-// recalcPeers starts or stops syncing process for peers per bin depending on the current sync radius.
-// Must be called under lock.
-func (p *Puller) recalcPeers(ctx context.Context, storageRadius uint8) {
- var wg sync.WaitGroup
- for _, peer := range p.syncPeers {
- wg.Add(1)
- p.wg.Add(1)
- go func(peer *syncPeer) {
- defer p.wg.Done()
- defer wg.Done()
- if err := p.syncPeer(ctx, peer, storageRadius); err != nil {
- p.logger.Debug("sync peer failed", "peer_address", peer.address, "error", err)
- }
- }(peer)
- }
- wg.Wait()
-}
-
-func (p *Puller) syncPeer(ctx context.Context, peer *syncPeer, storageRadius uint8) error {
- peer.mtx.Lock()
- defer peer.mtx.Unlock()
-
- if peer.cursors == nil {
- cursors, epoch, err := p.syncer.GetCursors(ctx, peer.address)
- if err != nil {
- return fmt.Errorf("could not get cursors from peer %s: %w", peer.address, err)
- }
- peer.cursors = cursors
-
- storedEpoch, err := p.getPeerEpoch(peer.address)
- if err != nil {
- return fmt.Errorf("retrieve epoch for peer %s: %w", peer.address, err)
- }
-
- if storedEpoch != epoch {
- // cancel all bins
- peer.stop()
-
- p.logger.Debug("peer epoch change detected, resetting past synced intervals", "stored_epoch", storedEpoch, "new_epoch", epoch, "peer_address", peer.address)
-
- err = p.resetPeerIntervals(peer.address)
- if err != nil {
- return fmt.Errorf("reset intervals for peer %s: %w", peer.address, err)
- }
- err = p.setPeerEpoch(peer.address, epoch)
- if err != nil {
- return fmt.Errorf("set epoch for peer %s: %w", peer.address, err)
- }
- }
- }
-
- if len(peer.cursors) != int(p.bins) {
- return errCursorsLength
- }
-
- /*
- The syncing behavior diverges for peers outside and within the storage radius.
- For neighbor peers, we sync ALL bins greater than or equal to the storage radius.
- For peers with PO lower than the storage radius, we must sync ONLY the bin that is the PO.
- For peers peer with PO lower than the storage radius and even lower than the allowed minimum threshold,
- no syncing is done.
- */
-
- if peer.po >= storageRadius {
-
- // cancel all bins lower than the storage radius
- for bin := uint8(0); bin < storageRadius; bin++ {
- peer.cancelBin(bin)
- }
-
- // sync all bins >= storage radius
- for bin, cur := range peer.cursors {
- if bin >= int(storageRadius) && !peer.isBinSyncing(uint8(bin)) {
- p.syncPeerBin(ctx, peer, uint8(bin), cur)
- }
- }
-
- } else if storageRadius-peer.po <= maxPODelta {
- // cancel all non-po bins, if any
- for bin := uint8(0); bin < p.bins; bin++ {
- if bin != peer.po {
- peer.cancelBin(bin)
- }
- }
- // sync PO bin only
- if !peer.isBinSyncing(peer.po) {
- p.syncPeerBin(ctx, peer, peer.po, peer.cursors[peer.po])
- }
- } else {
- peer.stop()
- }
-
- return nil
-}
-
// syncPeerBin will start historical and live syncing for the peer for a particular bin.
// Must be called under syncPeer lock.
func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint8, cursor uint64) {
@@ -388,193 +174,3 @@ func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint
p.wg.Add(1)
go sync(false, peer.address, cursor+1)
}
-
-func (p *Puller) Close() error {
- p.logger.Info("shutting down")
- p.cancel()
- cc := make(chan struct{})
- go func() {
- defer close(cc)
- p.wg.Wait()
- }()
- select {
- case <-cc:
- case <-time.After(10 * time.Second):
- p.logger.Warning("shut down timeout, some goroutines may still be running")
- }
-
- return nil
-}
-
-func (p *Puller) addPeerInterval(peer swarm.Address, bin uint8, start, end uint64) (err error) {
- p.intervalMtx.Lock()
- defer p.intervalMtx.Unlock()
-
- peerStreamKey := peerIntervalKey(peer, bin)
- i, err := p.getOrCreateInterval(peer, bin)
- if err != nil {
- return err
- }
-
- i.Add(start, end)
-
- return p.statestore.Put(peerStreamKey, i)
-}
-
-func (p *Puller) getPeerEpoch(peer swarm.Address) (uint64, error) {
- p.intervalMtx.Lock()
- defer p.intervalMtx.Unlock()
-
- var epoch uint64
- err := p.statestore.Get(peerEpochKey(peer), &epoch)
- if err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- return 0, nil
- }
- return 0, err
- }
-
- return epoch, nil
-}
-
-func (p *Puller) setPeerEpoch(peer swarm.Address, epoch uint64) error {
- p.intervalMtx.Lock()
- defer p.intervalMtx.Unlock()
-
- return p.statestore.Put(peerEpochKey(peer), epoch)
-}
-
-func (p *Puller) resetPeerIntervals(peer swarm.Address) (err error) {
- p.intervalMtx.Lock()
- defer p.intervalMtx.Unlock()
-
- for bin := uint8(0); bin < p.bins; bin++ {
- err = errors.Join(err, p.statestore.Delete(peerIntervalKey(peer, bin)))
- }
-
- return
-}
-
-func (p *Puller) resetIntervals(oldRadius uint8) (err error) {
- p.intervalMtx.Lock()
- defer p.intervalMtx.Unlock()
-
- var deleteKeys []string
-
- for bin := uint8(0); bin < p.bins; bin++ {
- err = errors.Join(err,
- p.statestore.Iterate(binIntervalKey(bin), func(key, _ []byte) (stop bool, err error) {
- po := swarm.Proximity(addressFromKey(key).Bytes(), p.base.Bytes())
-
- // 1. for neighbor peers, only reset the bins below the current radius
- // 2. for non-neighbor peers, we must reset the entire history
- if po >= oldRadius {
- if bin < oldRadius {
- deleteKeys = append(deleteKeys, string(key))
- }
- } else {
- deleteKeys = append(deleteKeys, string(key))
- }
- return false, nil
- }),
- )
- }
-
- for _, k := range deleteKeys {
- err = errors.Join(err, p.statestore.Delete(k))
- }
-
- return err
-}
-
-func (p *Puller) nextPeerInterval(peer swarm.Address, bin uint8) (uint64, error) {
- p.intervalMtx.Lock()
- defer p.intervalMtx.Unlock()
-
- i, err := p.getOrCreateInterval(peer, bin)
- if err != nil {
- return 0, err
- }
-
- start, _, _ := i.Next(0)
- return start, nil
-}
-
-// Must be called underlock.
-func (p *Puller) getOrCreateInterval(peer swarm.Address, bin uint8) (*intervalstore.Intervals, error) {
- // check that an interval entry exists
- key := peerIntervalKey(peer, bin)
- itv := &intervalstore.Intervals{}
- if err := p.statestore.Get(key, itv); err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- // key interval values are ALWAYS > 0
- itv = intervalstore.NewIntervals(1)
- if err := p.statestore.Put(key, itv); err != nil {
- return nil, err
- }
- } else {
- return nil, err
- }
- }
- return itv, nil
-}
-
-func peerEpochKey(peer swarm.Address) string {
- return fmt.Sprintf("%s_epoch_%s", IntervalPrefix, peer.ByteString())
-}
-
-func peerIntervalKey(peer swarm.Address, bin uint8) string {
- return fmt.Sprintf("%s_%03d_%s", IntervalPrefix, bin, peer.ByteString())
-}
-
-func binIntervalKey(bin uint8) string {
- return fmt.Sprintf("%s_%03d", IntervalPrefix, bin)
-}
-
-func addressFromKey(key []byte) swarm.Address {
- addr := key[len(fmt.Sprintf("%s_%03d_", IntervalPrefix, 0)):]
- return swarm.NewAddress(addr)
-}
-
-type syncPeer struct {
- address swarm.Address
- binCancelFuncs map[uint8]func() // slice of context cancel funcs for historical sync. index is bin
- po uint8
- cursors []uint64
-
- mtx sync.Mutex
- wg sync.WaitGroup
-}
-
-func newSyncPeer(addr swarm.Address, bins, po uint8) *syncPeer {
- return &syncPeer{
- address: addr,
- binCancelFuncs: make(map[uint8]func(), bins),
- po: po,
- }
-}
-
-// called when peer disconnects or on shutdown, cleans up ongoing sync operations
-func (p *syncPeer) stop() {
- for bin, c := range p.binCancelFuncs {
- c()
- delete(p.binCancelFuncs, bin)
- }
- p.wg.Wait()
-}
-
-func (p *syncPeer) setBinCancel(cf func(), bin uint8) {
- p.binCancelFuncs[bin] = cf
-}
-
-func (p *syncPeer) cancelBin(bin uint8) {
- if c, ok := p.binCancelFuncs[bin]; ok {
- c()
- delete(p.binCancelFuncs, bin)
- }
-}
-
-func (p *syncPeer) isBinSyncing(bin uint8) bool {
- _, ok := p.binCancelFuncs[bin]
- return ok
-}
diff --git a/pkg/puller/puller_js.go b/pkg/puller/puller_js.go
new file mode 100644
index 00000000000..16bf369ae09
--- /dev/null
+++ b/pkg/puller/puller_js.go
@@ -0,0 +1,162 @@
+//go:build js
+// +build js
+
+package puller
+
+import (
+ "context"
+ "errors"
+ "math"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/pullsync"
+ "github.com/ethersphere/bee/v2/pkg/rate"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ ratelimit "golang.org/x/time/rate"
+)
+
+type Puller struct {
+ base swarm.Address
+
+ topology topology.Driver
+ radius storer.RadiusChecker
+ statestore storage.StateStorer
+ syncer pullsync.Interface
+ blockLister p2p.Blocklister
+
+ logger log.Logger
+
+ syncPeers map[string]*syncPeer // index is bin, map key is peer address
+ syncPeersMtx sync.Mutex
+ intervalMtx sync.Mutex
+
+ cancel func()
+
+ wg sync.WaitGroup
+
+ bins uint8 // how many bins do we support
+
+ rate *rate.Rate // rate of historical syncing
+
+ start sync.Once
+
+ limiter *ratelimit.Limiter
+}
+
+func New(
+ addr swarm.Address,
+ stateStore storage.StateStorer,
+ topology topology.Driver,
+ reserveState storer.RadiusChecker,
+ pullSync pullsync.Interface,
+ blockLister p2p.Blocklister,
+ logger log.Logger,
+ o Options,
+) *Puller {
+ bins := swarm.MaxBins
+ if o.Bins != 0 {
+ bins = o.Bins
+ }
+ p := &Puller{
+ base: addr,
+ statestore: stateStore,
+ topology: topology,
+ radius: reserveState,
+ syncer: pullSync,
+ logger: logger.WithName(loggerName).Register(),
+ syncPeers: make(map[string]*syncPeer),
+ bins: bins,
+ blockLister: blockLister,
+ rate: rate.New(DefaultHistRateWindow),
+ cancel: func() { /* Noop, since the context is initialized in the Start(). */ },
+ limiter: ratelimit.NewLimiter(ratelimit.Every(time.Second/maxChunksPerSecond), maxChunksPerSecond),
+ }
+
+ return p
+}
+
+// syncPeerBin will start historical and live syncing for the peer for a particular bin.
+// Must be called under syncPeer lock.
+func (p *Puller) syncPeerBin(parentCtx context.Context, peer *syncPeer, bin uint8, cursor uint64) {
+ loggerV2 := p.logger.V(2).Register()
+
+ ctx, cancel := context.WithCancel(parentCtx)
+ peer.setBinCancel(cancel, bin)
+
+ sync := func(isHistorical bool, address swarm.Address, start uint64) {
+ defer p.wg.Done()
+ defer peer.wg.Done()
+
+ var err error
+
+ for {
+ if isHistorical { // override start with the next interval if historical syncing
+ start, err = p.nextPeerInterval(address, bin)
+ if err != nil {
+ p.logger.Error(err, "syncWorker nextPeerInterval failed, quitting")
+ return
+ }
+
+ // historical sync has caught up to the cursor, exit
+ if start > cursor {
+ return
+ }
+ }
+
+ select {
+ case <-ctx.Done():
+ loggerV2.Debug("syncWorker context cancelled", "peer_address", address, "bin", bin)
+ return
+ default:
+ }
+
+ syncStart := time.Now()
+ top, count, err := p.syncer.Sync(ctx, address, bin, start)
+
+ if top == math.MaxUint64 {
+ p.logger.Error(nil, "syncWorker max uint64 encountered, quitting", "peer_address", address, "bin", bin, "from", start, "topmost", top)
+ return
+ }
+
+ if err != nil {
+ if errors.Is(err, p2p.ErrPeerNotFound) {
+ p.logger.Debug("syncWorker interval failed, quitting", "error", err, "peer_address", address, "bin", bin, "cursor", cursor, "start", start, "topmost", top)
+ return
+ }
+ loggerV2.Debug("syncWorker interval failed", "error", err, "peer_address", address, "bin", bin, "cursor", cursor, "start", start, "topmost", top)
+ }
+
+ _ = p.limiter.WaitN(ctx, count)
+
+ if isHistorical {
+ p.rate.Add(count)
+ }
+
+ // pulled at least one chunk
+ if top >= start {
+ if err := p.addPeerInterval(address, bin, start, top); err != nil {
+ p.logger.Error(err, "syncWorker could not persist interval for peer, quitting", "peer_address", address)
+ return
+ }
+ loggerV2.Debug("syncWorker pulled", "bin", bin, "start", start, "topmost", top, "isHistorical", isHistorical, "duration", time.Since(syncStart), "peer_address", address)
+ start = top + 1
+ }
+ }
+ }
+
+ if cursor > 0 {
+ peer.wg.Add(1)
+ p.wg.Add(1)
+ go sync(true, peer.address, cursor)
+ }
+
+ peer.wg.Add(1)
+ p.wg.Add(1)
+ go sync(false, peer.address, cursor+1)
+}
diff --git a/pkg/puller/puller_shared.go b/pkg/puller/puller_shared.go
new file mode 100644
index 00000000000..d343a49da34
--- /dev/null
+++ b/pkg/puller/puller_shared.go
@@ -0,0 +1,419 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package puller provides protocol-orchestrating functionality
+// over the pullsync protocol. It pulls chunks from other nodes
+// and reacts to changes in network configuration.
+package puller
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "maps"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/puller/intervalstore"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "puller"
+
+var errCursorsLength = errors.New("cursors length mismatch")
+
+const (
+ DefaultHistRateWindow = time.Minute * 15
+
+ IntervalPrefix = "sync_interval"
+ recalcPeersDur = time.Minute * 5
+
+ maxChunksPerSecond = 1000 // roughly 4 MB/s
+
+ maxPODelta = 2 // the lowest level of proximity order (of peers) subtracted from the storage radius allowed for chunk syncing.
+)
+
+type Options struct {
+ Bins uint8
+}
+
+func (p *Puller) Start(ctx context.Context) {
+ p.start.Do(func() {
+ cctx, cancel := context.WithCancel(ctx)
+ p.cancel = cancel
+
+ p.wg.Add(1)
+ go p.manage(cctx)
+ })
+}
+
+func (p *Puller) SyncRate() float64 {
+ return p.rate.Rate()
+}
+
+func (p *Puller) manage(ctx context.Context) {
+ defer p.wg.Done()
+
+ c, unsubscribe := p.topology.SubscribeTopologyChange()
+ defer unsubscribe()
+
+ p.logger.Info("warmup period complete, starting worker")
+
+ var prevRadius uint8
+
+ onChange := func() {
+ p.syncPeersMtx.Lock()
+ defer p.syncPeersMtx.Unlock()
+
+ newRadius := p.radius.StorageRadius()
+
+ // reset all intervals below the new radius to resync:
+ // 1. previously evicted chunks
+ // 2. previously ignored chunks due to a higher radius
+ if newRadius < prevRadius {
+ for _, peer := range p.syncPeers {
+ p.disconnectPeer(peer.address)
+ }
+ if err := p.resetIntervals(prevRadius); err != nil {
+ p.logger.Debug("reset lower sync radius failed", "error", err)
+ }
+ p.logger.Debug("radius decrease", "old_radius", prevRadius, "new_radius", newRadius)
+ }
+ prevRadius = newRadius
+
+ // peersDisconnected is used to mark and prune peers that are no longer connected.
+ peersDisconnected := maps.Clone(p.syncPeers)
+
+ _ = p.topology.EachConnectedPeerRev(func(addr swarm.Address, po uint8) (stop, jumpToNext bool, err error) {
+ if _, ok := p.syncPeers[addr.ByteString()]; !ok {
+ p.syncPeers[addr.ByteString()] = newSyncPeer(addr, p.bins, po)
+ }
+ delete(peersDisconnected, addr.ByteString())
+ return false, false, nil
+ }, topology.Select{})
+
+ for _, peer := range peersDisconnected {
+ p.disconnectPeer(peer.address)
+ }
+
+ p.recalcPeers(ctx, newRadius)
+ }
+
+ tick := time.NewTicker(recalcPeersDur)
+ defer tick.Stop()
+
+ for {
+
+ onChange()
+
+ select {
+ case <-ctx.Done():
+ return
+ case <-tick.C:
+ case <-c:
+ }
+ }
+}
+
+// disconnectPeer cancels all existing syncing and removes the peer entry from the syncing map.
+// Must be called under lock.
+func (p *Puller) disconnectPeer(addr swarm.Address) {
+ loggerV2 := p.logger.V(2).Register()
+
+ loggerV2.Debug("disconnecting peer", "peer_address", addr)
+ if peer, ok := p.syncPeers[addr.ByteString()]; ok {
+ peer.mtx.Lock()
+ peer.stop()
+ peer.mtx.Unlock()
+ }
+ delete(p.syncPeers, addr.ByteString())
+}
+
+// recalcPeers starts or stops syncing process for peers per bin depending on the current sync radius.
+// Must be called under lock.
+func (p *Puller) recalcPeers(ctx context.Context, storageRadius uint8) {
+ var wg sync.WaitGroup
+ for _, peer := range p.syncPeers {
+ wg.Add(1)
+ p.wg.Add(1)
+ go func(peer *syncPeer) {
+ defer p.wg.Done()
+ defer wg.Done()
+ if err := p.syncPeer(ctx, peer, storageRadius); err != nil {
+ p.logger.Debug("sync peer failed", "peer_address", peer.address, "error", err)
+ }
+ }(peer)
+ }
+ wg.Wait()
+}
+
+func (p *Puller) syncPeer(ctx context.Context, peer *syncPeer, storageRadius uint8) error {
+ peer.mtx.Lock()
+ defer peer.mtx.Unlock()
+
+ if peer.cursors == nil {
+ cursors, epoch, err := p.syncer.GetCursors(ctx, peer.address)
+ if err != nil {
+ return fmt.Errorf("could not get cursors from peer %s: %w", peer.address, err)
+ }
+ peer.cursors = cursors
+
+ storedEpoch, err := p.getPeerEpoch(peer.address)
+ if err != nil {
+ return fmt.Errorf("retrieve epoch for peer %s: %w", peer.address, err)
+ }
+
+ if storedEpoch != epoch {
+ // cancel all bins
+ peer.stop()
+
+ p.logger.Debug("peer epoch change detected, resetting past synced intervals", "stored_epoch", storedEpoch, "new_epoch", epoch, "peer_address", peer.address)
+
+ err = p.resetPeerIntervals(peer.address)
+ if err != nil {
+ return fmt.Errorf("reset intervals for peer %s: %w", peer.address, err)
+ }
+ err = p.setPeerEpoch(peer.address, epoch)
+ if err != nil {
+ return fmt.Errorf("set epoch for peer %s: %w", peer.address, err)
+ }
+ }
+ }
+
+ if len(peer.cursors) != int(p.bins) {
+ return errCursorsLength
+ }
+
+ /*
+ The syncing behavior diverges for peers outside and within the storage radius.
+ For neighbor peers, we sync ALL bins greater than or equal to the storage radius.
+ For peers with PO lower than the storage radius, we must sync ONLY the bin that is the PO.
+ For peers peer with PO lower than the storage radius and even lower than the allowed minimum threshold,
+ no syncing is done.
+ */
+
+ if peer.po >= storageRadius {
+
+ // cancel all bins lower than the storage radius
+ for bin := uint8(0); bin < storageRadius; bin++ {
+ peer.cancelBin(bin)
+ }
+
+ // sync all bins >= storage radius
+ for bin, cur := range peer.cursors {
+ if bin >= int(storageRadius) && !peer.isBinSyncing(uint8(bin)) {
+ p.syncPeerBin(ctx, peer, uint8(bin), cur)
+ }
+ }
+
+ } else if storageRadius-peer.po <= maxPODelta {
+ // cancel all non-po bins, if any
+ for bin := uint8(0); bin < p.bins; bin++ {
+ if bin != peer.po {
+ peer.cancelBin(bin)
+ }
+ }
+ // sync PO bin only
+ if !peer.isBinSyncing(peer.po) {
+ p.syncPeerBin(ctx, peer, peer.po, peer.cursors[peer.po])
+ }
+ } else {
+ peer.stop()
+ }
+
+ return nil
+}
+
+func (p *Puller) Close() error {
+ p.logger.Info("shutting down")
+ p.cancel()
+ cc := make(chan struct{})
+ go func() {
+ defer close(cc)
+ p.wg.Wait()
+ }()
+ select {
+ case <-cc:
+ case <-time.After(10 * time.Second):
+ p.logger.Warning("shut down timeout, some goroutines may still be running")
+ }
+
+ return nil
+}
+
+func (p *Puller) addPeerInterval(peer swarm.Address, bin uint8, start, end uint64) (err error) {
+ p.intervalMtx.Lock()
+ defer p.intervalMtx.Unlock()
+
+ peerStreamKey := peerIntervalKey(peer, bin)
+ i, err := p.getOrCreateInterval(peer, bin)
+ if err != nil {
+ return err
+ }
+
+ i.Add(start, end)
+
+ return p.statestore.Put(peerStreamKey, i)
+}
+
+func (p *Puller) getPeerEpoch(peer swarm.Address) (uint64, error) {
+ p.intervalMtx.Lock()
+ defer p.intervalMtx.Unlock()
+
+ var epoch uint64
+ err := p.statestore.Get(peerEpochKey(peer), &epoch)
+ if err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ return 0, nil
+ }
+ return 0, err
+ }
+
+ return epoch, nil
+}
+
+func (p *Puller) setPeerEpoch(peer swarm.Address, epoch uint64) error {
+ p.intervalMtx.Lock()
+ defer p.intervalMtx.Unlock()
+
+ return p.statestore.Put(peerEpochKey(peer), epoch)
+}
+
+func (p *Puller) resetPeerIntervals(peer swarm.Address) (err error) {
+ p.intervalMtx.Lock()
+ defer p.intervalMtx.Unlock()
+
+ for bin := uint8(0); bin < p.bins; bin++ {
+ err = errors.Join(err, p.statestore.Delete(peerIntervalKey(peer, bin)))
+ }
+
+ return
+}
+
+func (p *Puller) resetIntervals(oldRadius uint8) (err error) {
+ p.intervalMtx.Lock()
+ defer p.intervalMtx.Unlock()
+
+ var deleteKeys []string
+
+ for bin := uint8(0); bin < p.bins; bin++ {
+ err = errors.Join(err,
+ p.statestore.Iterate(binIntervalKey(bin), func(key, _ []byte) (stop bool, err error) {
+ po := swarm.Proximity(addressFromKey(key).Bytes(), p.base.Bytes())
+
+ // 1. for neighbor peers, only reset the bins below the current radius
+ // 2. for non-neighbor peers, we must reset the entire history
+ if po >= oldRadius {
+ if bin < oldRadius {
+ deleteKeys = append(deleteKeys, string(key))
+ }
+ } else {
+ deleteKeys = append(deleteKeys, string(key))
+ }
+ return false, nil
+ }),
+ )
+ }
+
+ for _, k := range deleteKeys {
+ err = errors.Join(err, p.statestore.Delete(k))
+ }
+
+ return err
+}
+
+func (p *Puller) nextPeerInterval(peer swarm.Address, bin uint8) (uint64, error) {
+ p.intervalMtx.Lock()
+ defer p.intervalMtx.Unlock()
+
+ i, err := p.getOrCreateInterval(peer, bin)
+ if err != nil {
+ return 0, err
+ }
+
+ start, _, _ := i.Next(0)
+ return start, nil
+}
+
+// Must be called underlock.
+func (p *Puller) getOrCreateInterval(peer swarm.Address, bin uint8) (*intervalstore.Intervals, error) {
+ // check that an interval entry exists
+ key := peerIntervalKey(peer, bin)
+ itv := &intervalstore.Intervals{}
+ if err := p.statestore.Get(key, itv); err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ // key interval values are ALWAYS > 0
+ itv = intervalstore.NewIntervals(1)
+ if err := p.statestore.Put(key, itv); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+ return itv, nil
+}
+
+func peerEpochKey(peer swarm.Address) string {
+ return fmt.Sprintf("%s_epoch_%s", IntervalPrefix, peer.ByteString())
+}
+
+func peerIntervalKey(peer swarm.Address, bin uint8) string {
+ return fmt.Sprintf("%s_%03d_%s", IntervalPrefix, bin, peer.ByteString())
+}
+
+func binIntervalKey(bin uint8) string {
+ return fmt.Sprintf("%s_%03d", IntervalPrefix, bin)
+}
+
+func addressFromKey(key []byte) swarm.Address {
+ addr := key[len(fmt.Sprintf("%s_%03d_", IntervalPrefix, 0)):]
+ return swarm.NewAddress(addr)
+}
+
+type syncPeer struct {
+ address swarm.Address
+ binCancelFuncs map[uint8]func() // slice of context cancel funcs for historical sync. index is bin
+ po uint8
+ cursors []uint64
+
+ mtx sync.Mutex
+ wg sync.WaitGroup
+}
+
+func newSyncPeer(addr swarm.Address, bins, po uint8) *syncPeer {
+ return &syncPeer{
+ address: addr,
+ binCancelFuncs: make(map[uint8]func(), bins),
+ po: po,
+ }
+}
+
+// called when peer disconnects or on shutdown, cleans up ongoing sync operations
+func (p *syncPeer) stop() {
+ for bin, c := range p.binCancelFuncs {
+ c()
+ delete(p.binCancelFuncs, bin)
+ }
+ p.wg.Wait()
+}
+
+func (p *syncPeer) setBinCancel(cf func(), bin uint8) {
+ p.binCancelFuncs[bin] = cf
+}
+
+func (p *syncPeer) cancelBin(bin uint8) {
+ if c, ok := p.binCancelFuncs[bin]; ok {
+ c()
+ delete(p.binCancelFuncs, bin)
+ }
+}
+
+func (p *syncPeer) isBinSyncing(bin uint8) bool {
+ _, ok := p.binCancelFuncs[bin]
+ return ok
+}
diff --git a/pkg/pullsync/metrics.go b/pkg/pullsync/metrics.go
index 57e26916c93..4dd7291a1c2 100644
--- a/pkg/pullsync/metrics.go
+++ b/pkg/pullsync/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/pullsync/pullsync.go b/pkg/pullsync/pullsync.go
index 064099e765c..723ebab246d 100644
--- a/pkg/pullsync/pullsync.go
+++ b/pkg/pullsync/pullsync.go
@@ -1,20 +1,14 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package pullsync provides the pullsync protocol
-// implementation.
package pullsync
import (
"context"
- "encoding/hex"
"errors"
"fmt"
"io"
- "math"
"sync/atomic"
- "time"
"github.com/ethersphere/bee/v2/pkg/bitvector"
"github.com/ethersphere/bee/v2/pkg/cac"
@@ -31,39 +25,6 @@ import (
"resenje.org/singleflight"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "pullsync"
-
-const (
- protocolName = "pullsync"
- protocolVersion = "1.4.0"
- streamName = "pullsync"
- cursorStreamName = "cursors"
-)
-
-var (
- ErrUnsolicitedChunk = errors.New("peer sent unsolicited chunk")
-)
-
-const (
- MaxCursor = math.MaxUint64
- DefaultMaxPage uint64 = 250
- pageTimeout = time.Second
- makeOfferTimeout = 15 * time.Minute
- handleMaxChunksPerSecond = 250
- handleRequestsLimitRate = time.Second / handleMaxChunksPerSecond // handle max `handleMaxChunksPerSecond` chunks per second per peer
-)
-
-// Interface is the PullSync interface.
-type Interface interface {
- // Sync syncs a batch of chunks starting at a start BinID.
- // It returns the BinID of highest chunk that was synced from the given
- // batch and the total number of chunks the downstream peer has sent.
- Sync(ctx context.Context, peer swarm.Address, bin uint8, start uint64) (topmost uint64, count int, err error)
- // GetCursors retrieves all cursors from a downstream peer.
- GetCursors(ctx context.Context, peer swarm.Address) ([]uint64, uint64, error)
-}
-
type Syncer struct {
streamer p2p.Streamer
metrics metrics
@@ -108,25 +69,6 @@ func New(
}
}
-func (s *Syncer) Protocol() p2p.ProtocolSpec {
- return p2p.ProtocolSpec{
- Name: protocolName,
- Version: protocolVersion,
- StreamSpecs: []p2p.StreamSpec{
- {
- Name: streamName,
- Handler: s.handler,
- },
- {
- Name: cursorStreamName,
- Handler: s.cursorHandler,
- },
- },
- DisconnectIn: s.disconnect,
- DisconnectOut: s.disconnect,
- }
-}
-
// handler handles an incoming request to sync an interval
func (s *Syncer) handler(streamCtx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
@@ -400,216 +342,21 @@ func (s *Syncer) Sync(ctx context.Context, peer swarm.Address, bin uint8, start
return topmost, chunksPut, chunkErr
}
-// makeOffer tries to assemble an offer for a given requested interval.
-func (s *Syncer) makeOffer(ctx context.Context, rn pb.Get) (*pb.Offer, error) {
-
- ctx, cancel := context.WithTimeout(ctx, makeOfferTimeout)
- defer cancel()
-
- addrs, top, err := s.collectAddrs(ctx, uint8(rn.Bin), rn.Start)
- if err != nil {
- return nil, err
- }
-
- o := new(pb.Offer)
- o.Topmost = top
- o.Chunks = make([]*pb.Chunk, 0, len(addrs))
- for _, v := range addrs {
- o.Chunks = append(o.Chunks, &pb.Chunk{Address: v.Address.Bytes(), BatchID: v.BatchID, StampHash: v.StampHash})
- }
- return o, nil
-}
-
-type collectAddrsResult struct {
- chs []*storer.BinC
- topmost uint64
-}
-
-// collectAddrs collects chunk addresses at a bin starting at some start BinID until a limit is reached.
-// The function waits for an unbounded amount of time for the first chunk to arrive.
-// After the arrival of the first chunk, the subsequent chunks have a limited amount of time to arrive,
-// after which the function returns the collected slice of chunks.
-func (s *Syncer) collectAddrs(ctx context.Context, bin uint8, start uint64) ([]*storer.BinC, uint64, error) {
- loggerV2 := s.logger.V(2).Register()
-
- v, _, err := s.intervalsSF.Do(ctx, sfKey(bin, start), func(ctx context.Context) (*collectAddrsResult, error) {
- var (
- chs []*storer.BinC
- topmost uint64
- timer *time.Timer
- timerC <-chan time.Time
- )
- chC, unsub, errC := s.store.SubscribeBin(ctx, bin, start)
- defer func() {
- unsub()
- if timer != nil {
- timer.Stop()
- }
- }()
-
- limit := s.maxPage
-
- LOOP:
- for limit > 0 {
- select {
- case c, ok := <-chC:
- if !ok {
- break LOOP // The stream has been closed.
- }
-
- chs = append(chs, &storer.BinC{Address: c.Address, BatchID: c.BatchID, StampHash: c.StampHash})
- if c.BinID > topmost {
- topmost = c.BinID
- }
- limit--
- if timer == nil {
- timer = time.NewTimer(pageTimeout)
- } else {
- if !timer.Stop() {
- <-timer.C
- }
- timer.Reset(pageTimeout)
- }
- timerC = timer.C
- case err := <-errC:
- return nil, err
- case <-ctx.Done():
- return nil, ctx.Err()
- case <-timerC:
- loggerV2.Debug("batch timeout timer triggered")
- // return batch if new chunks are not received after some time
- break LOOP
- }
- }
-
- return &collectAddrsResult{chs: chs, topmost: topmost}, nil
- })
- if err != nil {
- return nil, 0, err
- }
- return v.chs, v.topmost, nil
-}
-
-// processWant compares a received Want to a sent Offer and returns
-// the appropriate chunks from the local store.
-func (s *Syncer) processWant(ctx context.Context, o *pb.Offer, w *pb.Want) ([]swarm.Chunk, error) {
- bv, err := bitvector.NewFromBytes(w.BitVector, len(o.Chunks))
- if err != nil {
- return nil, err
- }
-
- chunks := make([]swarm.Chunk, 0, len(o.Chunks))
- for i := 0; i < len(o.Chunks); i++ {
- if bv.Get(i) {
- ch := o.Chunks[i]
- addr := swarm.NewAddress(ch.Address)
- s.metrics.SentWanted.Inc()
- c, err := s.store.ReserveGet(ctx, addr, ch.BatchID, ch.StampHash)
- if err != nil {
- s.logger.Debug("processing want: unable to find chunk", "chunk_address", addr, "batch_id", hex.EncodeToString(ch.BatchID))
- chunks = append(chunks, swarm.NewChunk(swarm.ZeroAddress, nil))
- s.metrics.MissingChunks.Inc()
- continue
- }
- chunks = append(chunks, c)
- }
- }
- return chunks, nil
-}
-
-func (s *Syncer) GetCursors(ctx context.Context, peer swarm.Address) (retr []uint64, epoch uint64, err error) {
- loggerV2 := s.logger.V(2).Register()
-
- stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, cursorStreamName)
- if err != nil {
- return nil, 0, fmt.Errorf("new stream: %w", err)
- }
- loggerV2.Debug("getting cursors from peer", "peer_address", peer)
- defer func() {
- if err != nil {
- _ = stream.Reset()
- loggerV2.Debug("error getting cursors from peer", "peer_address", peer, "error", err)
- } else {
- stream.FullClose()
- }
- }()
-
- w, r := protobuf.NewWriterAndReader(stream)
- syn := &pb.Syn{}
- if err = w.WriteMsgWithContext(ctx, syn); err != nil {
- return nil, 0, fmt.Errorf("write syn: %w", err)
- }
-
- var ack pb.Ack
- if err = r.ReadMsgWithContext(ctx, &ack); err != nil {
- return nil, 0, fmt.Errorf("read ack: %w", err)
- }
-
- return ack.Cursors, ack.Epoch, nil
-}
-
-func (s *Syncer) cursorHandler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
- loggerV2 := s.logger.V(2).Register()
-
- w, r := protobuf.NewWriterAndReader(stream)
- loggerV2.Debug("peer wants cursors", "peer_address", p.Address)
- defer func() {
- if err != nil {
- _ = stream.Reset()
- loggerV2.Debug("error getting cursors for peer", "peer_address", p.Address, "error", err)
- } else {
- _ = stream.FullClose()
- }
- }()
-
- var syn pb.Syn
- if err := r.ReadMsgWithContext(ctx, &syn); err != nil {
- return fmt.Errorf("read syn: %w", err)
- }
-
- var ack pb.Ack
- ints, epoch, err := s.store.ReserveLastBinIDs()
- if err != nil {
- return err
- }
- ack.Cursors = ints
- ack.Epoch = epoch
- if err = w.WriteMsgWithContext(ctx, &ack); err != nil {
- return fmt.Errorf("write ack: %w", err)
- }
-
- return nil
-}
-
-func (s *Syncer) disconnect(peer p2p.Peer) error {
- s.limiter.Clear(peer.Address.ByteString())
- return nil
-}
-
-func (s *Syncer) Close() error {
- s.logger.Info("pull syncer shutting down")
- close(s.quit)
- cc := make(chan struct{})
- go func() {
- defer close(cc)
- for {
- if s.syncInProgress.Load() > 0 {
- time.Sleep(100 * time.Millisecond)
- continue
- }
- break
- }
- }()
-
- select {
- case <-cc:
- case <-time.After(5 * time.Second):
- s.logger.Warning("pull syncer shutting down with running goroutines")
+func (s *Syncer) Protocol() p2p.ProtocolSpec {
+ return p2p.ProtocolSpec{
+ Name: protocolName,
+ Version: protocolVersion,
+ StreamSpecs: []p2p.StreamSpec{
+ {
+ Name: streamName,
+ Handler: s.handler,
+ },
+ {
+ Name: cursorStreamName,
+ Handler: s.cursorHandler,
+ },
+ },
+ DisconnectIn: s.disconnect,
+ DisconnectOut: s.disconnect,
}
- return nil
-}
-
-// singleflight key for intervals
-func sfKey(bin uint8, start uint64) string {
- return fmt.Sprintf("%d-%d", bin, start)
}
diff --git a/pkg/pullsync/pullsync_js.go b/pkg/pullsync/pullsync_js.go
new file mode 100644
index 00000000000..ec3067cbe4c
--- /dev/null
+++ b/pkg/pullsync/pullsync_js.go
@@ -0,0 +1,178 @@
+//go:build js
+// +build js
+
+package pullsync
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync/atomic"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/pullsync/pb"
+ "github.com/ethersphere/bee/v2/pkg/ratelimit"
+ "github.com/ethersphere/bee/v2/pkg/soc"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "resenje.org/singleflight"
+)
+
+type Syncer struct {
+ streamer p2p.Streamer
+ logger log.Logger
+ store storer.Reserve
+ quit chan struct{}
+ unwrap func(swarm.Chunk)
+ gsocHandler func(*soc.SOC)
+ validStamp postage.ValidStampFn
+ intervalsSF singleflight.Group[string, *collectAddrsResult]
+ syncInProgress atomic.Int32
+
+ maxPage uint64
+
+ limiter *ratelimit.Limiter
+
+ Interface
+ io.Closer
+}
+
+func New(
+ streamer p2p.Streamer,
+ store storer.Reserve,
+ unwrap func(swarm.Chunk),
+ gsocHandler func(*soc.SOC),
+ validStamp postage.ValidStampFn,
+ logger log.Logger,
+ maxPage uint64,
+) *Syncer {
+
+ return &Syncer{
+ streamer: streamer,
+ store: store,
+ unwrap: unwrap,
+ gsocHandler: gsocHandler,
+ validStamp: validStamp,
+ logger: logger.WithName(loggerName).Register(),
+ quit: make(chan struct{}),
+ maxPage: maxPage,
+ limiter: ratelimit.New(handleRequestsLimitRate, int(maxPage)),
+ }
+}
+
+func (s *Syncer) Protocol() p2p.ProtocolSpec {
+ return p2p.ProtocolSpec{
+ Name: protocolName,
+ Version: protocolVersion,
+ StreamSpecs: []p2p.StreamSpec{
+ {
+ Name: streamName,
+ Handler: s.handler,
+ },
+ {
+ Name: cursorStreamName,
+ Handler: s.cursorHandler,
+ },
+ },
+ DisconnectIn: s.disconnect,
+ DisconnectOut: s.disconnect,
+ }
+}
+
+// handler handles an incoming request to sync an interval
+func (s *Syncer) handler(streamCtx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
+
+ select {
+ case <-s.quit:
+ return nil
+ default:
+ s.syncInProgress.Add(1)
+ defer s.syncInProgress.Add(-1)
+ }
+
+ r := protobuf.NewReader(stream)
+ defer func() {
+ if err != nil {
+ _ = stream.Reset()
+ } else {
+ _ = stream.FullClose()
+ }
+ }()
+
+ ctx, cancel := context.WithCancel(streamCtx)
+ defer cancel()
+
+ go func() {
+ select {
+ case <-s.quit:
+ cancel()
+ case <-ctx.Done():
+ return
+ }
+ }()
+
+ var rn pb.Get
+ if err := r.ReadMsgWithContext(ctx, &rn); err != nil {
+ return fmt.Errorf("read get range: %w", err)
+ }
+
+ // recreate the reader to allow the first one to be garbage collected
+ // before the makeOffer function call, to reduce the total memory allocated
+ // while makeOffer is executing (waiting for the new chunks)
+ w, r := protobuf.NewWriterAndReader(stream)
+
+ // make an offer to the upstream peer in return for the requested range
+ offer, err := s.makeOffer(ctx, rn)
+ if err != nil {
+ return fmt.Errorf("make offer: %w", err)
+ }
+
+ if err := w.WriteMsgWithContext(ctx, offer); err != nil {
+ return fmt.Errorf("write offer: %w", err)
+ }
+
+ // we don't have any hashes to offer in this range (the
+ // interval is empty). nothing more to do
+ if len(offer.Chunks) == 0 {
+ return nil
+ }
+
+ var want pb.Want
+ if err := r.ReadMsgWithContext(ctx, &want); err != nil {
+ return fmt.Errorf("read want: %w", err)
+ }
+
+ chs, err := s.processWant(ctx, offer, &want)
+ if err != nil {
+ return fmt.Errorf("process want: %w", err)
+ }
+
+ // slow down future requests
+ waitDur, err := s.limiter.Wait(streamCtx, p.Address.ByteString(), max(1, len(chs)))
+ if err != nil {
+ return fmt.Errorf("rate limiter: %w", err)
+ }
+ if waitDur > 0 {
+ s.logger.Debug("rate limited peer", "wait_duration", waitDur, "peer_address", p.Address)
+ }
+
+ for _, c := range chs {
+ var stamp []byte
+ if c.Stamp() != nil {
+ stamp, err = c.Stamp().MarshalBinary()
+ if err != nil {
+ return fmt.Errorf("serialise stamp: %w", err)
+ }
+ }
+
+ deliver := pb.Delivery{Address: c.Address().Bytes(), Data: c.Data(), Stamp: stamp}
+ if err := w.WriteMsgWithContext(ctx, &deliver); err != nil {
+ return fmt.Errorf("write delivery: %w", err)
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/pullsync/pullsync_shared.go b/pkg/pullsync/pullsync_shared.go
new file mode 100644
index 00000000000..c26e33ea914
--- /dev/null
+++ b/pkg/pullsync/pullsync_shared.go
@@ -0,0 +1,270 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pullsync provides the pullsync protocol
+// implementation.
+package pullsync
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/bitvector"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/pullsync/pb"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "pullsync"
+
+const (
+ protocolName = "pullsync"
+ protocolVersion = "1.4.0"
+ streamName = "pullsync"
+ cursorStreamName = "cursors"
+)
+
+var (
+ ErrUnsolicitedChunk = errors.New("peer sent unsolicited chunk")
+)
+
+const (
+ MaxCursor = math.MaxUint64
+ DefaultMaxPage uint64 = 250
+ pageTimeout = time.Second
+ makeOfferTimeout = 15 * time.Minute
+ handleMaxChunksPerSecond = 250
+ handleRequestsLimitRate = time.Second / handleMaxChunksPerSecond // handle max `handleMaxChunksPerSecond` chunks per second per peer
+)
+
+// Interface is the PullSync interface.
+type Interface interface {
+ // Sync syncs a batch of chunks starting at a start BinID.
+ // It returns the BinID of highest chunk that was synced from the given
+ // batch and the total number of chunks the downstream peer has sent.
+ Sync(ctx context.Context, peer swarm.Address, bin uint8, start uint64) (topmost uint64, count int, err error)
+ // GetCursors retrieves all cursors from a downstream peer.
+ GetCursors(ctx context.Context, peer swarm.Address) ([]uint64, uint64, error)
+}
+
+// makeOffer tries to assemble an offer for a given requested interval.
+func (s *Syncer) makeOffer(ctx context.Context, rn pb.Get) (*pb.Offer, error) {
+
+ ctx, cancel := context.WithTimeout(ctx, makeOfferTimeout)
+ defer cancel()
+
+ addrs, top, err := s.collectAddrs(ctx, uint8(rn.Bin), rn.Start)
+ if err != nil {
+ return nil, err
+ }
+
+ o := new(pb.Offer)
+ o.Topmost = top
+ o.Chunks = make([]*pb.Chunk, 0, len(addrs))
+ for _, v := range addrs {
+ o.Chunks = append(o.Chunks, &pb.Chunk{Address: v.Address.Bytes(), BatchID: v.BatchID, StampHash: v.StampHash})
+ }
+ return o, nil
+}
+
+type collectAddrsResult struct {
+ chs []*storer.BinC
+ topmost uint64
+}
+
+// collectAddrs collects chunk addresses at a bin starting at some start BinID until a limit is reached.
+// The function waits for an unbounded amount of time for the first chunk to arrive.
+// After the arrival of the first chunk, the subsequent chunks have a limited amount of time to arrive,
+// after which the function returns the collected slice of chunks.
+func (s *Syncer) collectAddrs(ctx context.Context, bin uint8, start uint64) ([]*storer.BinC, uint64, error) {
+ loggerV2 := s.logger.V(2).Register()
+
+ v, _, err := s.intervalsSF.Do(ctx, sfKey(bin, start), func(ctx context.Context) (*collectAddrsResult, error) {
+ var (
+ chs []*storer.BinC
+ topmost uint64
+ timer *time.Timer
+ timerC <-chan time.Time
+ )
+ chC, unsub, errC := s.store.SubscribeBin(ctx, bin, start)
+ defer func() {
+ unsub()
+ if timer != nil {
+ timer.Stop()
+ }
+ }()
+
+ limit := s.maxPage
+
+ LOOP:
+ for limit > 0 {
+ select {
+ case c, ok := <-chC:
+ if !ok {
+ break LOOP // The stream has been closed.
+ }
+
+ chs = append(chs, &storer.BinC{Address: c.Address, BatchID: c.BatchID, StampHash: c.StampHash})
+ if c.BinID > topmost {
+ topmost = c.BinID
+ }
+ limit--
+ if timer == nil {
+ timer = time.NewTimer(pageTimeout)
+ } else {
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timer.Reset(pageTimeout)
+ }
+ timerC = timer.C
+ case err := <-errC:
+ return nil, err
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-timerC:
+ loggerV2.Debug("batch timeout timer triggered")
+ // return batch if new chunks are not received after some time
+ break LOOP
+ }
+ }
+
+ return &collectAddrsResult{chs: chs, topmost: topmost}, nil
+ })
+ if err != nil {
+ return nil, 0, err
+ }
+ return v.chs, v.topmost, nil
+}
+
+// processWant compares a received Want to a sent Offer and returns
+// the appropriate chunks from the local store.
+func (s *Syncer) processWant(ctx context.Context, o *pb.Offer, w *pb.Want) ([]swarm.Chunk, error) {
+ bv, err := bitvector.NewFromBytes(w.BitVector, len(o.Chunks))
+ if err != nil {
+ return nil, err
+ }
+
+ chunks := make([]swarm.Chunk, 0, len(o.Chunks))
+ for i := 0; i < len(o.Chunks); i++ {
+ if bv.Get(i) {
+ ch := o.Chunks[i]
+ addr := swarm.NewAddress(ch.Address)
+
+ c, err := s.store.ReserveGet(ctx, addr, ch.BatchID, ch.StampHash)
+ if err != nil {
+ s.logger.Debug("processing want: unable to find chunk", "chunk_address", addr, "batch_id", hex.EncodeToString(ch.BatchID))
+ chunks = append(chunks, swarm.NewChunk(swarm.ZeroAddress, nil))
+
+ continue
+ }
+ chunks = append(chunks, c)
+ }
+ }
+ return chunks, nil
+}
+
+func (s *Syncer) GetCursors(ctx context.Context, peer swarm.Address) (retr []uint64, epoch uint64, err error) {
+ loggerV2 := s.logger.V(2).Register()
+
+ stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, cursorStreamName)
+ if err != nil {
+ return nil, 0, fmt.Errorf("new stream: %w", err)
+ }
+ loggerV2.Debug("getting cursors from peer", "peer_address", peer)
+ defer func() {
+ if err != nil {
+ _ = stream.Reset()
+ loggerV2.Debug("error getting cursors from peer", "peer_address", peer, "error", err)
+ } else {
+ stream.FullClose()
+ }
+ }()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ syn := &pb.Syn{}
+ if err = w.WriteMsgWithContext(ctx, syn); err != nil {
+ return nil, 0, fmt.Errorf("write syn: %w", err)
+ }
+
+ var ack pb.Ack
+ if err = r.ReadMsgWithContext(ctx, &ack); err != nil {
+ return nil, 0, fmt.Errorf("read ack: %w", err)
+ }
+
+ return ack.Cursors, ack.Epoch, nil
+}
+
+func (s *Syncer) cursorHandler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
+ loggerV2 := s.logger.V(2).Register()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ loggerV2.Debug("peer wants cursors", "peer_address", p.Address)
+ defer func() {
+ if err != nil {
+ _ = stream.Reset()
+ loggerV2.Debug("error getting cursors for peer", "peer_address", p.Address, "error", err)
+ } else {
+ _ = stream.FullClose()
+ }
+ }()
+
+ var syn pb.Syn
+ if err := r.ReadMsgWithContext(ctx, &syn); err != nil {
+ return fmt.Errorf("read syn: %w", err)
+ }
+
+ var ack pb.Ack
+ ints, epoch, err := s.store.ReserveLastBinIDs()
+ if err != nil {
+ return err
+ }
+ ack.Cursors = ints
+ ack.Epoch = epoch
+ if err = w.WriteMsgWithContext(ctx, &ack); err != nil {
+ return fmt.Errorf("write ack: %w", err)
+ }
+
+ return nil
+}
+
+func (s *Syncer) disconnect(peer p2p.Peer) error {
+ s.limiter.Clear(peer.Address.ByteString())
+ return nil
+}
+
+func (s *Syncer) Close() error {
+ s.logger.Info("pull syncer shutting down")
+ close(s.quit)
+ cc := make(chan struct{})
+ go func() {
+ defer close(cc)
+ for {
+ if s.syncInProgress.Load() > 0 {
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ break
+ }
+ }()
+
+ select {
+ case <-cc:
+ case <-time.After(5 * time.Second):
+ s.logger.Warning("pull syncer shutting down with running goroutines")
+ }
+ return nil
+}
+
+// singleflight key for intervals
+func sfKey(bin uint8, start uint64) string {
+ return fmt.Sprintf("%d-%d", bin, start)
+}
diff --git a/pkg/pusher/metrics.go b/pkg/pusher/metrics.go
index 0a92951b42b..8ec6eba9c9d 100644
--- a/pkg/pusher/metrics.go
+++ b/pkg/pusher/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/pusher/pushed_js.go b/pkg/pusher/pushed_js.go
new file mode 100644
index 00000000000..83c7a5a27f4
--- /dev/null
+++ b/pkg/pusher/pushed_js.go
@@ -0,0 +1,203 @@
+package pusher
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/pushsync"
+ "github.com/ethersphere/bee/v2/pkg/stabilization"
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+)
+
+type Service struct {
+ networkID uint64
+ storer Storer
+ pushSyncer pushsync.PushSyncer
+ batchExist postage.BatchExist
+ logger log.Logger
+ quit chan struct{}
+ chunksWorkerQuitC chan struct{}
+ inflight *inflight
+ attempts *attempts
+ smuggler chan OpChan
+}
+
+func New(
+ networkID uint64,
+ storer Storer,
+ pushSyncer pushsync.PushSyncer,
+ batchExist postage.BatchExist,
+ logger log.Logger,
+ startupStabilizer stabilization.Subscriber,
+ retryCount int,
+) *Service {
+ p := &Service{
+ networkID: networkID,
+ storer: storer,
+ pushSyncer: pushSyncer,
+ batchExist: batchExist,
+ logger: logger.WithName(loggerName).Register(),
+
+ quit: make(chan struct{}),
+ chunksWorkerQuitC: make(chan struct{}),
+ inflight: newInflight(),
+ attempts: &attempts{retryCount: retryCount, attempts: make(map[string]int)},
+ smuggler: make(chan OpChan),
+ }
+ go p.chunksWorker(startupStabilizer)
+ return p
+}
+
+// chunksWorker is a loop that keeps looking for chunks that are locally uploaded ( by monitoring pushIndex )
+// and pushes them to the closest peer and get a receipt.
+func (s *Service) chunksWorker(startupStabilizer stabilization.Subscriber) {
+ defer close(s.chunksWorkerQuitC)
+
+ sub, unsubscribe := startupStabilizer.Subscribe()
+ defer unsubscribe()
+
+ select {
+ case <-sub:
+ s.logger.Debug("node warmup check completed")
+ case <-s.quit:
+ return
+ }
+
+ var (
+ ctx, cancel = context.WithCancel(context.Background())
+ sem = make(chan struct{}, ConcurrentPushes)
+ cc = make(chan *Op)
+ )
+
+ // inflight.set handles the backpressure for the maximum amount of inflight chunks
+ // and duplicate handling.
+ chunks, unsubscribe := s.storer.SubscribePush(ctx)
+ defer func() {
+ unsubscribe()
+ cancel()
+ }()
+
+ var wg sync.WaitGroup
+
+ push := func(op *Op) {
+ var (
+ err error
+ doRepeat bool
+ )
+
+ defer func() {
+ // no peer was found which may mean that the node is suffering from connections issues
+ // we must slow down the pusher to prevent constant retries
+ if errors.Is(err, topology.ErrNotFound) {
+ select {
+ case <-time.After(time.Second * 5):
+ case <-s.quit:
+ }
+ }
+
+ wg.Done()
+ <-sem
+ if doRepeat {
+ select {
+ case cc <- op:
+ case <-s.quit:
+ }
+ }
+ }()
+
+ spanCtx := ctx
+ if op.Span != nil {
+ spanCtx = tracing.WithContext(spanCtx, op.Span.Context())
+ } else {
+ op.Span = opentracing.NoopTracer{}.StartSpan("noOp")
+ }
+
+ if op.Direct {
+ err = s.pushDirect(spanCtx, s.logger, op)
+ } else {
+ doRepeat, err = s.pushDeferred(spanCtx, s.logger, op)
+ }
+
+ if err != nil {
+ ext.LogError(op.Span, err)
+ } else {
+ op.Span.LogFields(olog.Bool("success", true))
+ }
+ }
+
+ go func() {
+ for {
+ select {
+ case ch, ok := <-chunks:
+ if !ok {
+ chunks = nil
+ continue
+ }
+ select {
+ case cc <- &Op{Chunk: ch, Direct: false}:
+ case <-s.quit:
+ return
+ }
+ case apiC := <-s.smuggler:
+ go func() {
+ for {
+ select {
+ case op := <-apiC:
+ select {
+ case cc <- op:
+ case <-s.quit:
+ return
+ }
+ case <-s.quit:
+ return
+ }
+ }
+ }()
+ case <-s.quit:
+ return
+ }
+ }
+ }()
+
+ defer wg.Wait()
+
+ for {
+ select {
+ case op := <-cc:
+ idAddress, err := storage.IdentityAddress(op.Chunk)
+ if err != nil {
+ op.Err <- err
+ continue
+ }
+ op.identityAddress = idAddress
+ if s.inflight.set(idAddress, op.Chunk.Stamp().BatchID()) {
+ if op.Direct {
+ select {
+ case op.Err <- nil:
+ default:
+ s.logger.Debug("chunk already in flight, skipping", "chunk", op.Chunk.Address())
+ }
+ }
+ continue
+ }
+ select {
+ case sem <- struct{}{}:
+ wg.Add(1)
+ go push(op)
+ case <-s.quit:
+ return
+ }
+ case <-s.quit:
+ return
+ }
+ }
+}
diff --git a/pkg/pusher/pusher.go b/pkg/pusher/pusher.go
index 46632c18765..ff2dbfb0a39 100644
--- a/pkg/pusher/pusher.go
+++ b/pkg/pusher/pusher.go
@@ -1,16 +1,10 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package pusher provides protocol-orchestrating functionality
-// over the pushsync protocol. It makes sure that chunks meant
-// to be distributed over the network are sent used using the
-// pushsync protocol.
package pusher
import (
"context"
- "encoding/hex"
"errors"
"sync"
"time"
@@ -20,7 +14,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/pushsync"
"github.com/ethersphere/bee/v2/pkg/stabilization"
storage "github.com/ethersphere/bee/v2/pkg/storage"
- "github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/topology"
"github.com/ethersphere/bee/v2/pkg/tracing"
"github.com/opentracing/opentracing-go"
@@ -28,26 +21,6 @@ import (
olog "github.com/opentracing/opentracing-go/log"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "pusher"
-
-type Op struct {
- Chunk swarm.Chunk
- Err chan error
- Direct bool
- Span opentracing.Span
-
- identityAddress swarm.Address
-}
-
-type OpChan <-chan *Op
-
-type Storer interface {
- storage.PushReporter
- storage.PushSubscriber
- ReservePutter() storage.Putter
-}
-
type Service struct {
networkID uint64
storer Storer
@@ -62,12 +35,6 @@ type Service struct {
smuggler chan OpChan
}
-const (
- traceDuration = 30 * time.Second // duration for every root tracing span
- ConcurrentPushes = swarm.Branches // how many chunks to push simultaneously
- DefaultRetryCount = 6
-)
-
func New(
networkID uint64,
storer Storer,
@@ -246,130 +213,3 @@ func (s *Service) chunksWorker(startupStabilizer stabilization.Subscriber) {
}
}
}
-
-func (s *Service) pushDeferred(ctx context.Context, logger log.Logger, op *Op) (bool, error) {
- loggerV1 := logger.V(1).Build()
-
- defer s.inflight.delete(op.identityAddress, op.Chunk.Stamp().BatchID())
-
- ok, err := s.batchExist.Exists(op.Chunk.Stamp().BatchID())
- if !ok || err != nil {
- loggerV1.Warning(
- "stamp is no longer valid, skipping syncing for chunk",
- "batch_id", hex.EncodeToString(op.Chunk.Stamp().BatchID()),
- "chunk_address", op.Chunk.Address(),
- "error", err,
- )
- return false, errors.Join(err, s.storer.Report(ctx, op.Chunk, storage.ChunkCouldNotSync))
- }
-
- switch _, err := s.pushSyncer.PushChunkToClosest(ctx, op.Chunk); {
- case errors.Is(err, topology.ErrWantSelf):
- // store the chunk
- loggerV1.Debug("chunk stays here, i'm the closest node", "chunk_address", op.Chunk.Address())
- err = s.storer.ReservePutter().Put(ctx, op.Chunk)
- if err != nil {
- loggerV1.Error(err, "pusher: failed to store chunk")
- return true, err
- }
- err = s.storer.Report(ctx, op.Chunk, storage.ChunkStored)
- if err != nil {
- loggerV1.Error(err, "pusher: failed reporting chunk")
- return true, err
- }
- case errors.Is(err, pushsync.ErrShallowReceipt):
- if s.shallowReceipt(op.identityAddress) {
- return true, err
- }
- if err := s.storer.Report(ctx, op.Chunk, storage.ChunkSynced); err != nil {
- loggerV1.Error(err, "pusher: failed to report sync status")
- return true, err
- }
- case err == nil:
- if err := s.storer.Report(ctx, op.Chunk, storage.ChunkSynced); err != nil {
- loggerV1.Error(err, "pusher: failed to report sync status")
- return true, err
- }
- default:
- loggerV1.Error(err, "pusher: failed PushChunkToClosest")
- return true, err
- }
-
- return false, nil
-}
-
-func (s *Service) pushDirect(ctx context.Context, logger log.Logger, op *Op) error {
- loggerV1 := logger.V(1).Build()
-
- var err error
-
- defer func() {
- s.inflight.delete(op.identityAddress, op.Chunk.Stamp().BatchID())
- select {
- case op.Err <- err:
- default:
- loggerV1.Error(err, "pusher: failed to return error for direct upload")
- }
- }()
-
- ok, err := s.batchExist.Exists(op.Chunk.Stamp().BatchID())
- if !ok || err != nil {
- loggerV1.Warning(
- "stamp is no longer valid, skipping direct upload for chunk",
- "batch_id", hex.EncodeToString(op.Chunk.Stamp().BatchID()),
- "chunk_address", op.Chunk.Address(),
- "error", err,
- )
- return err
- }
-
- switch _, err = s.pushSyncer.PushChunkToClosest(ctx, op.Chunk); {
- case errors.Is(err, topology.ErrWantSelf):
- // store the chunk
- loggerV1.Debug("chunk stays here, i'm the closest node", "chunk_address", op.Chunk.Address())
- err = s.storer.ReservePutter().Put(ctx, op.Chunk)
- if err != nil {
- loggerV1.Error(err, "pusher: failed to store chunk")
- }
- case errors.Is(err, pushsync.ErrShallowReceipt):
- if s.shallowReceipt(op.identityAddress) {
- return err
- }
- // out of attempts for retry, swallow error
- err = nil
- case err != nil:
- loggerV1.Error(err, "pusher: failed PushChunkToClosest")
- }
-
- return err
-}
-
-func (s *Service) shallowReceipt(idAddress swarm.Address) bool {
- if s.attempts.try(idAddress) {
- return true
- }
- s.attempts.delete(idAddress)
- return false
-}
-
-func (s *Service) AddFeed(c <-chan *Op) {
- go func() {
- select {
- case s.smuggler <- c:
- s.logger.Info("got a chunk being smuggled")
- case <-s.quit:
- }
- }()
-}
-
-func (s *Service) Close() error {
- s.logger.Info("pusher shutting down")
- close(s.quit)
-
- // Wait for chunks worker to finish
- select {
- case <-s.chunksWorkerQuitC:
- case <-time.After(10 * time.Second):
- }
- return nil
-}
diff --git a/pkg/pusher/pusher_shared.go b/pkg/pusher/pusher_shared.go
new file mode 100644
index 00000000000..5fd62fa49cb
--- /dev/null
+++ b/pkg/pusher/pusher_shared.go
@@ -0,0 +1,176 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pusher provides protocol-orchestrating functionality
+// over the pushsync protocol. It makes sure that chunks meant
+// to be distributed over the network are sent used using the
+// pushsync protocol.
+package pusher
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/pushsync"
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/opentracing/opentracing-go"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "pusher"
+
+type Op struct {
+ Chunk swarm.Chunk
+ Err chan error
+ Direct bool
+ Span opentracing.Span
+
+ identityAddress swarm.Address
+}
+
+type OpChan <-chan *Op
+
+type Storer interface {
+ storage.PushReporter
+ storage.PushSubscriber
+ ReservePutter() storage.Putter
+}
+
+const (
+ traceDuration = 30 * time.Second // duration for every root tracing span
+ ConcurrentPushes = swarm.Branches // how many chunks to push simultaneously
+ DefaultRetryCount = 6
+)
+
+func (s *Service) pushDeferred(ctx context.Context, logger log.Logger, op *Op) (bool, error) {
+ loggerV1 := logger.V(1).Build()
+
+ defer s.inflight.delete(op.identityAddress, op.Chunk.Stamp().BatchID())
+
+ ok, err := s.batchExist.Exists(op.Chunk.Stamp().BatchID())
+ if !ok || err != nil {
+ loggerV1.Warning(
+ "stamp is no longer valid, skipping syncing for chunk",
+ "batch_id", hex.EncodeToString(op.Chunk.Stamp().BatchID()),
+ "chunk_address", op.Chunk.Address(),
+ "error", err,
+ )
+ return false, errors.Join(err, s.storer.Report(ctx, op.Chunk, storage.ChunkCouldNotSync))
+ }
+
+ switch _, err := s.pushSyncer.PushChunkToClosest(ctx, op.Chunk); {
+ case errors.Is(err, topology.ErrWantSelf):
+ // store the chunk
+ loggerV1.Debug("chunk stays here, i'm the closest node", "chunk_address", op.Chunk.Address())
+ err = s.storer.ReservePutter().Put(ctx, op.Chunk)
+ if err != nil {
+ loggerV1.Error(err, "pusher: failed to store chunk")
+ return true, err
+ }
+ err = s.storer.Report(ctx, op.Chunk, storage.ChunkStored)
+ if err != nil {
+ loggerV1.Error(err, "pusher: failed reporting chunk")
+ return true, err
+ }
+ case errors.Is(err, pushsync.ErrShallowReceipt):
+ if s.shallowReceipt(op.identityAddress) {
+ return true, err
+ }
+ if err := s.storer.Report(ctx, op.Chunk, storage.ChunkSynced); err != nil {
+ loggerV1.Error(err, "pusher: failed to report sync status")
+ return true, err
+ }
+ case err == nil:
+ if err := s.storer.Report(ctx, op.Chunk, storage.ChunkSynced); err != nil {
+ loggerV1.Error(err, "pusher: failed to report sync status")
+ return true, err
+ }
+ default:
+ loggerV1.Error(err, "pusher: failed PushChunkToClosest")
+ return true, err
+ }
+
+ return false, nil
+}
+
+func (s *Service) pushDirect(ctx context.Context, logger log.Logger, op *Op) error {
+ loggerV1 := logger.V(1).Build()
+
+ var err error
+
+ defer func() {
+ s.inflight.delete(op.identityAddress, op.Chunk.Stamp().BatchID())
+ select {
+ case op.Err <- err:
+ default:
+ loggerV1.Error(err, "pusher: failed to return error for direct upload")
+ }
+ }()
+
+ ok, err := s.batchExist.Exists(op.Chunk.Stamp().BatchID())
+ if !ok || err != nil {
+ loggerV1.Warning(
+ "stamp is no longer valid, skipping direct upload for chunk",
+ "batch_id", hex.EncodeToString(op.Chunk.Stamp().BatchID()),
+ "chunk_address", op.Chunk.Address(),
+ "error", err,
+ )
+ return err
+ }
+
+ switch _, err = s.pushSyncer.PushChunkToClosest(ctx, op.Chunk); {
+ case errors.Is(err, topology.ErrWantSelf):
+ // store the chunk
+ loggerV1.Debug("chunk stays here, i'm the closest node", "chunk_address", op.Chunk.Address())
+ err = s.storer.ReservePutter().Put(ctx, op.Chunk)
+ if err != nil {
+ loggerV1.Error(err, "pusher: failed to store chunk")
+ }
+ case errors.Is(err, pushsync.ErrShallowReceipt):
+ if s.shallowReceipt(op.identityAddress) {
+ return err
+ }
+ // out of attempts for retry, swallow error
+ err = nil
+ case err != nil:
+ loggerV1.Error(err, "pusher: failed PushChunkToClosest")
+ }
+
+ return err
+}
+
+func (s *Service) shallowReceipt(idAddress swarm.Address) bool {
+ if s.attempts.try(idAddress) {
+ return true
+ }
+ s.attempts.delete(idAddress)
+ return false
+}
+
+func (s *Service) AddFeed(c <-chan *Op) {
+ go func() {
+ select {
+ case s.smuggler <- c:
+ s.logger.Info("got a chunk being smuggled")
+ case <-s.quit:
+ }
+ }()
+}
+
+func (s *Service) Close() error {
+ s.logger.Info("pusher shutting down")
+ close(s.quit)
+
+ // Wait for chunks worker to finish
+ select {
+ case <-s.chunksWorkerQuitC:
+ case <-time.After(10 * time.Second):
+ }
+ return nil
+}
diff --git a/pkg/pushsync/metrics.go b/pkg/pushsync/metrics.go
index 7f4f5884bf0..eeb4e717460 100644
--- a/pkg/pushsync/metrics.go
+++ b/pkg/pushsync/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/pushsync/pushsync.go b/pkg/pushsync/pushsync.go
index 9a9754a9ab3..e05bd18febf 100644
--- a/pkg/pushsync/pushsync.go
+++ b/pkg/pushsync/pushsync.go
@@ -1,9 +1,6 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package pushsync provides the pushsync protocol
-// implementation.
package pushsync
import (
@@ -34,49 +31,6 @@ import (
olog "github.com/opentracing/opentracing-go/log"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "pushsync"
-
-const (
- protocolName = "pushsync"
- protocolVersion = "1.3.1"
- streamName = "pushsync"
-)
-
-const (
- defaultTTL = 30 * time.Second // request time to live
- preemptiveInterval = 5 * time.Second // P90 request time to live
- skiplistDur = 5 * time.Minute
- overDraftRefresh = time.Millisecond * 600
-)
-
-const (
- maxMultiplexForwards = 2 // number of extra peers to forward the request from the multiplex node
- maxPushErrors = 32
-)
-
-var (
- ErrNoPush = errors.New("could not push chunk")
- ErrOutOfDepthStoring = errors.New("storing outside of the neighborhood")
- ErrWarmup = errors.New("node warmup time not complete")
- ErrShallowReceipt = errors.New("shallow receipt")
-)
-
-type PushSyncer interface {
- PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error)
-}
-
-type Receipt struct {
- Address swarm.Address
- Signature []byte
- Nonce []byte
-}
-
-type Storer interface {
- storage.PushReporter
- ReservePutter() storage.Putter
-}
-
type PushSync struct {
address swarm.Address
networkID uint64
@@ -101,13 +55,6 @@ type PushSync struct {
shallowReceiptTolerance uint8
}
-type receiptResult struct {
- pushTime time.Time
- peer swarm.Address
- receipt *pb.Receipt
- err error
-}
-
func New(
address swarm.Address,
networkID uint64,
@@ -154,19 +101,6 @@ func New(
return ps
}
-func (s *PushSync) Protocol() p2p.ProtocolSpec {
- return p2p.ProtocolSpec{
- Name: protocolName,
- Version: protocolVersion,
- StreamSpecs: []p2p.StreamSpec{
- {
- Name: streamName,
- Handler: s.handler,
- },
- },
- }
-}
-
// handler handles chunk delivery from other node and forwards to its destination node.
// If the current node is the destination, it stores in the local store and sends a receipt.
func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
@@ -286,7 +220,7 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
return debit.Apply()
}
- if ps.topologyDriver.IsReachable() && swarm.Proximity(ps.address.Bytes(), chunkAddress.Bytes()) >= rad {
+ if swarm.Proximity(ps.address.Bytes(), chunkAddress.Bytes()) >= rad {
stored, reason = true, "is within AOR"
return store(ctx)
}
@@ -499,21 +433,6 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bo
return nil, ErrNoPush
}
-func (ps *PushSync) closestPeer(chunkAddress swarm.Address, origin bool, skipList []swarm.Address) (swarm.Address, error) {
- includeSelf := ps.fullNode && !origin
-
- peer, err := ps.topologyDriver.ClosestPeer(chunkAddress, includeSelf, topology.Select{Reachable: true, Healthy: true}, skipList...)
- if errors.Is(err, topology.ErrNotFound) {
- peer, err := ps.topologyDriver.ClosestPeer(chunkAddress, includeSelf, topology.Select{Reachable: true}, skipList...)
- if errors.Is(err, topology.ErrNotFound) {
- return ps.topologyDriver.ClosestPeer(chunkAddress, includeSelf, topology.Select{}, skipList...)
- }
- return peer, err
- }
-
- return peer, err
-}
-
func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptResult, peer swarm.Address, ch swarm.Chunk, action accounting.Action) {
// here we use a background timeout context because we do not want another push attempt to cancel this one
ctx, cancel := context.WithTimeout(context.Background(), defaultTTL)
@@ -593,67 +512,6 @@ func (ps *PushSync) checkReceipt(receipt *pb.Receipt) error {
return nil
}
-func (ps *PushSync) pushChunkToPeer(ctx context.Context, peer swarm.Address, ch swarm.Chunk) (receipt *pb.Receipt, err error) {
- streamer, err := ps.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
- if err != nil {
- return nil, fmt.Errorf("new stream for peer %s: %w", peer.String(), err)
- }
-
- defer func() {
- if err != nil {
- _ = streamer.Reset()
- } else {
- _ = streamer.FullClose()
- }
- }()
-
- w, r := protobuf.NewWriterAndReader(streamer)
- stamp, err := ch.Stamp().MarshalBinary()
- if err != nil {
- return nil, err
- }
- err = w.WriteMsgWithContext(ctx, &pb.Delivery{
- Address: ch.Address().Bytes(),
- Data: ch.Data(),
- Stamp: stamp,
- })
- if err != nil {
- return nil, err
- }
-
- // if the chunk has a tag, then it's from a local deferred upload
- if ch.TagID() != 0 {
- err = ps.store.Report(ctx, ch, storage.ChunkSent)
- if err != nil && !errors.Is(err, storage.ErrNotFound) {
- err = fmt.Errorf("tag %d increment: %w", ch.TagID(), err)
- return
- }
- }
-
- var rec pb.Receipt
- if err = r.ReadMsgWithContext(ctx, &rec); err != nil {
- return nil, err
- }
- if rec.Err != "" {
- return nil, p2p.NewChunkDeliveryError(rec.Err)
- }
-
- if !ch.Address().Equal(swarm.NewAddress(rec.Address)) {
- return nil, fmt.Errorf("invalid receipt. chunk %s, peer %s", ch.Address(), peer)
- }
-
- return &rec, nil
-}
-
-func (ps *PushSync) prepareCredit(ctx context.Context, peer swarm.Address, ch swarm.Chunk, origin bool) (accounting.Action, error) {
- creditAction, err := ps.accounting.PrepareCredit(ctx, peer, ps.pricer.PeerPrice(peer, ch.Address()), origin)
- if err != nil {
- return nil, err
- }
-
- return creditAction, nil
-}
-
func (ps *PushSync) measurePushPeer(t time.Time, err error) {
var status string
if err != nil {
@@ -677,7 +535,3 @@ func (ps *PushSync) validStampWrapper(f postage.ValidStampFn) postage.ValidStamp
return chunk, err
}
}
-
-func (s *PushSync) Close() error {
- return s.errSkip.Close()
-}
diff --git a/pkg/pushsync/pushsync_js.go b/pkg/pushsync/pushsync_js.go
new file mode 100644
index 00000000000..fd35606032b
--- /dev/null
+++ b/pkg/pushsync/pushsync_js.go
@@ -0,0 +1,505 @@
+//go:build js
+// +build js
+
+package pushsync
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/accounting"
+ "github.com/ethersphere/bee/v2/pkg/cac"
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/pricer"
+ "github.com/ethersphere/bee/v2/pkg/pushsync/pb"
+ "github.com/ethersphere/bee/v2/pkg/skippeers"
+ "github.com/ethersphere/bee/v2/pkg/soc"
+ "github.com/ethersphere/bee/v2/pkg/stabilization"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+)
+
+type PushSync struct {
+ address swarm.Address
+ networkID uint64
+ radius func() (uint8, error)
+ nonce []byte
+ streamer p2p.StreamerDisconnecter
+ store Storer
+ topologyDriver topology.Driver
+ unwrap func(swarm.Chunk)
+ gsocHandler func(*soc.SOC)
+ logger log.Logger
+ accounting accounting.Interface
+ pricer pricer.Interface
+ tracer *tracing.Tracer
+ validStamp postage.ValidStampFn
+ signer crypto.Signer
+ fullNode bool
+ errSkip *skippeers.List
+ stabilizer stabilization.Subscriber
+
+ shallowReceiptTolerance uint8
+}
+
+func New(
+ address swarm.Address,
+ networkID uint64,
+ nonce []byte,
+ streamer p2p.StreamerDisconnecter,
+ store Storer,
+ radius func() (uint8, error),
+ topology topology.Driver,
+ fullNode bool,
+ unwrap func(swarm.Chunk),
+ gsocHandler func(*soc.SOC),
+ validStamp postage.ValidStampFn,
+ logger log.Logger,
+ accounting accounting.Interface,
+ pricer pricer.Interface,
+ signer crypto.Signer,
+ tracer *tracing.Tracer,
+ stabilizer stabilization.Subscriber,
+ shallowReceiptTolerance uint8,
+) *PushSync {
+ ps := &PushSync{
+ address: address,
+ radius: radius,
+ networkID: networkID,
+ nonce: nonce,
+ streamer: streamer,
+ store: store,
+ topologyDriver: topology,
+ fullNode: fullNode,
+ unwrap: unwrap,
+ gsocHandler: gsocHandler,
+ logger: logger.WithName(loggerName).Register(),
+ accounting: accounting,
+ pricer: pricer,
+ tracer: tracer,
+ signer: signer,
+ errSkip: skippeers.NewList(time.Minute),
+ stabilizer: stabilizer,
+ shallowReceiptTolerance: shallowReceiptTolerance,
+ }
+
+ ps.validStamp = ps.validStampWrapper(validStamp)
+ return ps
+}
+
+// handler handles chunk delivery from other node and forwards to its destination node.
+// If the current node is the destination, it stores in the local store and sends a receipt.
+func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ var attemptedWrite bool
+
+ ctx, cancel := context.WithTimeout(ctx, defaultTTL)
+ defer cancel()
+
+ defer func() {
+ if err != nil {
+
+ if !attemptedWrite {
+ _ = w.WriteMsgWithContext(ctx, &pb.Receipt{Err: err.Error()})
+ }
+ _ = stream.Reset()
+ } else {
+ _ = stream.FullClose()
+ }
+ }()
+
+ var ch pb.Delivery
+ if err = r.ReadMsgWithContext(ctx, &ch); err != nil {
+ return fmt.Errorf("pushsync read delivery: %w", err)
+ }
+
+ chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
+ chunkAddress := chunk.Address()
+
+ span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()}, opentracing.Tag{Key: "tagID", Value: chunk.TagID()}, opentracing.Tag{Key: "sender_address", Value: p.Address.String()})
+
+ var (
+ stored bool
+ reason string
+ )
+
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ } else {
+ var logs []olog.Field
+ logs = append(logs, olog.Bool("success", true))
+ if stored {
+ logs = append(logs, olog.Bool("stored", true))
+ logs = append(logs, olog.String("reason", reason))
+ }
+ span.LogFields(logs...)
+ }
+ span.Finish()
+ }()
+
+ stamp := new(postage.Stamp)
+ err = stamp.UnmarshalBinary(ch.Stamp)
+ if err != nil {
+ return fmt.Errorf("pushsync stamp unmarshall: %w", err)
+ }
+ chunk.WithStamp(stamp)
+
+ if cac.Valid(chunk) {
+ go ps.unwrap(chunk)
+ } else if chunk, err := soc.FromChunk(chunk); err == nil {
+ addr, err := chunk.Address()
+ if err != nil {
+ return err
+ }
+ ps.logger.Debug("handle gsoc", "peer_address", p.Address, "chunk_address", addr, "wrapped_chunk_address", chunk.WrappedChunk().Address())
+ ps.gsocHandler(chunk)
+ } else {
+ return swarm.ErrInvalidChunk
+ }
+
+ price := ps.pricer.Price(chunkAddress)
+
+ rad, err := ps.radius()
+ if err != nil {
+ return fmt.Errorf("pushsync: storage radius: %w", err)
+ }
+
+ store := func(ctx context.Context) error {
+
+ chunkToPut, err := ps.validStamp(chunk)
+ if err != nil {
+ return fmt.Errorf("invalid stamp: %w", err)
+ }
+
+ err = ps.store.ReservePutter().Put(ctx, chunkToPut)
+ if err != nil {
+ return fmt.Errorf("reserve put: %w", err)
+ }
+
+ signature, err := ps.signer.Sign(chunkToPut.Address().Bytes())
+ if err != nil {
+ return fmt.Errorf("receipt signature: %w", err)
+ }
+
+ // return back receipt
+ debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
+ if err != nil {
+ return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
+ }
+ defer debit.Cleanup()
+
+ attemptedWrite = true
+
+ receipt := pb.Receipt{Address: chunkToPut.Address().Bytes(), Signature: signature, Nonce: ps.nonce, StorageRadius: uint32(rad)}
+ if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
+ return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
+ }
+
+ return debit.Apply()
+ }
+
+ if swarm.Proximity(ps.address.Bytes(), chunkAddress.Bytes()) >= rad {
+ stored, reason = true, "is within AOR"
+ return store(ctx)
+ }
+
+ switch receipt, err := ps.pushToClosest(ctx, chunk, false); {
+ case errors.Is(err, topology.ErrWantSelf):
+ stored, reason = true, "want self"
+ return store(ctx)
+ case err == nil:
+
+ debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
+ if err != nil {
+ return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
+ }
+ defer debit.Cleanup()
+
+ attemptedWrite = true
+
+ // pass back the receipt
+ if err := w.WriteMsgWithContext(ctx, receipt); err != nil {
+ return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
+ }
+
+ return debit.Apply()
+ default:
+ return fmt.Errorf("handler: push to closest chunk %s: %w", chunkAddress, err)
+
+ }
+}
+
+// PushChunkToClosest sends chunk to the closest peer by opening a stream. It then waits for
+// a receipt from that peer and returns error or nil based on the receiving and
+// the validity of the receipt.
+func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error) {
+ r, err := ps.pushToClosest(ctx, ch, true)
+ if errors.Is(err, ErrShallowReceipt) {
+ return &Receipt{
+ Address: swarm.NewAddress(r.Address),
+ Signature: r.Signature,
+ Nonce: r.Nonce,
+ }, err
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &Receipt{
+ Address: swarm.NewAddress(r.Address),
+ Signature: r.Signature,
+ Nonce: r.Nonce,
+ }, nil
+}
+
+// pushToClosest attempts to push the chunk into the network.
+func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bool) (*pb.Receipt, error) {
+ if !ps.stabilizer.IsStabilized() {
+ return nil, ErrWarmup
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var (
+ sentErrorsLeft = 1
+ preemptiveTicker <-chan time.Time
+ inflight int
+ parallelForwards = maxMultiplexForwards
+ )
+
+ if origin {
+ ticker := time.NewTicker(preemptiveInterval)
+ defer ticker.Stop()
+ preemptiveTicker = ticker.C
+ sentErrorsLeft = maxPushErrors
+ }
+
+ idAddress, err := storage.IdentityAddress(ch)
+ if err != nil {
+ return nil, err
+ }
+
+ resultChan := make(chan receiptResult)
+
+ retryC := make(chan struct{}, max(1, parallelForwards))
+
+ retry := func() {
+ select {
+ case retryC <- struct{}{}:
+ case <-ctx.Done():
+ default:
+ }
+ }
+
+ retry()
+
+ rad, err := ps.radius()
+ if err != nil {
+ return nil, fmt.Errorf("pushsync: storage radius: %w", err)
+ }
+
+ skip := skippeers.NewList(0)
+ defer skip.Close()
+
+ for sentErrorsLeft > 0 {
+ select {
+ case <-ctx.Done():
+ return nil, ErrNoPush
+ case <-preemptiveTicker:
+ retry()
+ case <-retryC:
+
+ // Origin peers should not store the chunk initially so that the chunk is always forwarded into the network.
+ // If no peer can be found from an origin peer, the origin peer may store the chunk.
+ // Non-origin peers store the chunk if the chunk is within depth.
+ // For non-origin peers, if the chunk is not within depth, they may store the chunk if they are the closest peer to the chunk.
+ fullSkip := append(skip.ChunkPeers(idAddress), ps.errSkip.ChunkPeers(idAddress)...)
+ peer, err := ps.closestPeer(ch.Address(), origin, fullSkip)
+ if errors.Is(err, topology.ErrNotFound) {
+ if skip.PruneExpiresAfter(idAddress, overDraftRefresh) == 0 { // no overdraft peers, we have depleted ALL peers
+ if inflight == 0 {
+ if ps.fullNode {
+ if cac.Valid(ch) {
+ go ps.unwrap(ch)
+ }
+ return nil, topology.ErrWantSelf
+ }
+ ps.logger.Debug("no peers left", "chunk_address", ch.Address(), "error", err)
+ return nil, err
+ }
+ continue // there is still an inflight request, wait for it's result
+ }
+
+ ps.logger.Debug("sleeping to refresh overdraft balance", "chunk_address", ch.Address())
+
+ select {
+ case <-time.After(overDraftRefresh):
+ retry()
+ continue
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+
+ if err != nil {
+ if inflight == 0 {
+ return nil, err
+ }
+ // inflight request in progress, wait for it's result
+ ps.logger.Debug("next peer", "chunk_address", ch.Address(), "error", err)
+ continue
+ }
+
+ // since we can reach into the neighborhood of the chunk
+ // act as the multiplexer and push the chunk in parallel to multiple peers
+ if swarm.Proximity(peer.Bytes(), ch.Address().Bytes()) >= rad {
+ for ; parallelForwards > 0; parallelForwards-- {
+ retry()
+ sentErrorsLeft++
+ }
+ }
+
+ action, err := ps.prepareCredit(ctx, peer, ch, origin)
+ if err != nil {
+ retry()
+ skip.Add(idAddress, peer, overDraftRefresh)
+ continue
+ }
+ skip.Forever(idAddress, peer)
+
+ inflight++
+
+ go ps.push(ctx, resultChan, peer, ch, action)
+
+ case result := <-resultChan:
+ inflight--
+
+ ps.measurePushPeer(result.pushTime, result.err)
+
+ if result.err == nil {
+
+ if !origin { // forwarder nodes do not need to check the receipt
+ return result.receipt, nil
+ }
+
+ switch err := ps.checkReceipt(result.receipt); {
+ case err == nil:
+ return result.receipt, nil
+ case errors.Is(err, ErrShallowReceipt):
+ ps.errSkip.Add(idAddress, result.peer, skiplistDur)
+ return result.receipt, err
+ }
+ }
+
+ ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "id_address", idAddress, "peer_address", result.peer, "error", result.err)
+
+ sentErrorsLeft--
+ ps.errSkip.Add(idAddress, result.peer, skiplistDur)
+
+ retry()
+ }
+ }
+
+ return nil, ErrNoPush
+}
+
+func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptResult, peer swarm.Address, ch swarm.Chunk, action accounting.Action) {
+ // here we use a background timeout context because we do not want another push attempt to cancel this one
+ ctx, cancel := context.WithTimeout(context.Background(), defaultTTL)
+ defer cancel()
+
+ var (
+ err error
+ receipt *pb.Receipt
+ )
+
+ now := time.Now()
+
+ spanInner, _, _ := ps.tracer.FollowSpanFromContext(context.WithoutCancel(parentCtx), "push-chunk-async", ps.logger, opentracing.Tag{Key: "address", Value: ch.Address().String()})
+
+ defer func() {
+ if err != nil {
+ ext.LogError(spanInner, err)
+ } else {
+ spanInner.LogFields(olog.Bool("success", true))
+ }
+ spanInner.Finish()
+ select {
+ case resultChan <- receiptResult{pushTime: now, peer: peer, err: err, receipt: receipt}:
+ case <-parentCtx.Done():
+ }
+ }()
+
+ defer action.Cleanup()
+
+ spanInner.LogFields(olog.String("peer_address", peer.String()))
+
+ receipt, err = ps.pushChunkToPeer(tracing.WithContext(ctx, spanInner.Context()), peer, ch)
+ if err != nil {
+ return
+ }
+
+ err = action.Apply()
+}
+
+func (ps *PushSync) checkReceipt(receipt *pb.Receipt) error {
+ addr := swarm.NewAddress(receipt.Address)
+
+ publicKey, err := crypto.Recover(receipt.Signature, addr.Bytes())
+ if err != nil {
+ return fmt.Errorf("pushsync: receipt recover: %w", err)
+ }
+
+ peer, err := crypto.NewOverlayAddress(*publicKey, ps.networkID, receipt.Nonce)
+ if err != nil {
+ return fmt.Errorf("pushsync: receipt storer address: %w", err)
+ }
+
+ po := swarm.Proximity(addr.Bytes(), peer.Bytes())
+
+ r, err := ps.radius()
+ if err != nil {
+ return fmt.Errorf("pushsync: storage radius: %w", err)
+ }
+
+ var tolerance uint8
+ if r >= ps.shallowReceiptTolerance { // check for underflow of uint8
+ tolerance = r - ps.shallowReceiptTolerance
+ }
+
+ if po < tolerance || uint32(po) < receipt.StorageRadius {
+
+ ps.logger.Debug("shallow receipt", "chunk_address", addr, "peer_address", peer, "proximity_order", po, "peer_radius", receipt.StorageRadius, "self_radius", r)
+ return ErrShallowReceipt
+ }
+
+ ps.logger.Debug("chunk pushed", "chunk_address", addr, "peer_address", peer, "proximity_order", po)
+
+ return nil
+}
+
+func (ps *PushSync) measurePushPeer(t time.Time, err error) {
+
+}
+
+func (ps *PushSync) validStampWrapper(f postage.ValidStampFn) postage.ValidStampFn {
+ return func(c swarm.Chunk) (swarm.Chunk, error) {
+ chunk, err := f(c)
+
+ return chunk, err
+ }
+}
diff --git a/pkg/pushsync/pushsync_shared.go b/pkg/pushsync/pushsync_shared.go
new file mode 100644
index 00000000000..cb6c96fbc7a
--- /dev/null
+++ b/pkg/pushsync/pushsync_shared.go
@@ -0,0 +1,165 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pushsync provides the pushsync protocol
+// implementation.
+package pushsync
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/accounting"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/pushsync/pb"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "pushsync"
+
+const (
+ protocolName = "pushsync"
+ protocolVersion = "1.3.1"
+ streamName = "pushsync"
+)
+
+const (
+ defaultTTL = 30 * time.Second // request time to live
+ preemptiveInterval = 5 * time.Second // P90 request time to live
+ skiplistDur = 5 * time.Minute
+ overDraftRefresh = time.Millisecond * 600
+)
+
+const (
+ maxMultiplexForwards = 2 // number of extra peers to forward the request from the multiplex node
+ maxPushErrors = 32
+)
+
+var (
+ ErrNoPush = errors.New("could not push chunk")
+ ErrOutOfDepthStoring = errors.New("storing outside of the neighborhood")
+ ErrWarmup = errors.New("node warmup time not complete")
+ ErrShallowReceipt = errors.New("shallow receipt")
+)
+
+type PushSyncer interface {
+ PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error)
+}
+
+type Receipt struct {
+ Address swarm.Address
+ Signature []byte
+ Nonce []byte
+}
+
+type Storer interface {
+ storage.PushReporter
+ ReservePutter() storage.Putter
+}
+
+type receiptResult struct {
+ pushTime time.Time
+ peer swarm.Address
+ receipt *pb.Receipt
+ err error
+}
+
+func (s *PushSync) Protocol() p2p.ProtocolSpec {
+ return p2p.ProtocolSpec{
+ Name: protocolName,
+ Version: protocolVersion,
+ StreamSpecs: []p2p.StreamSpec{
+ {
+ Name: streamName,
+ Handler: s.handler,
+ },
+ },
+ }
+}
+
+func (ps *PushSync) closestPeer(chunkAddress swarm.Address, origin bool, skipList []swarm.Address) (swarm.Address, error) {
+ includeSelf := ps.fullNode && !origin
+
+ peer, err := ps.topologyDriver.ClosestPeer(chunkAddress, includeSelf, topology.Select{Reachable: true, Healthy: true}, skipList...)
+ if errors.Is(err, topology.ErrNotFound) {
+ peer, err := ps.topologyDriver.ClosestPeer(chunkAddress, includeSelf, topology.Select{Reachable: true}, skipList...)
+ if errors.Is(err, topology.ErrNotFound) {
+ return ps.topologyDriver.ClosestPeer(chunkAddress, includeSelf, topology.Select{}, skipList...)
+ }
+ return peer, err
+ }
+
+ return peer, err
+}
+
+func (ps *PushSync) pushChunkToPeer(ctx context.Context, peer swarm.Address, ch swarm.Chunk) (receipt *pb.Receipt, err error) {
+ streamer, err := ps.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
+ if err != nil {
+ return nil, fmt.Errorf("new stream for peer %s: %w", peer.String(), err)
+ }
+
+ defer func() {
+ if err != nil {
+ _ = streamer.Reset()
+ } else {
+ _ = streamer.FullClose()
+ }
+ }()
+
+ w, r := protobuf.NewWriterAndReader(streamer)
+ stamp, err := ch.Stamp().MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+ err = w.WriteMsgWithContext(ctx, &pb.Delivery{
+ Address: ch.Address().Bytes(),
+ Data: ch.Data(),
+ Stamp: stamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // if the chunk has a tag, then it's from a local deferred upload
+ if ch.TagID() != 0 {
+ err = ps.store.Report(ctx, ch, storage.ChunkSent)
+ if err != nil && !errors.Is(err, storage.ErrNotFound) {
+ err = fmt.Errorf("tag %d increment: %w", ch.TagID(), err)
+ return
+ }
+ }
+
+ var rec pb.Receipt
+ if err = r.ReadMsgWithContext(ctx, &rec); err != nil {
+ return nil, err
+ }
+ if rec.Err != "" {
+ return nil, p2p.NewChunkDeliveryError(rec.Err)
+ }
+
+ if !ch.Address().Equal(swarm.NewAddress(rec.Address)) {
+ return nil, fmt.Errorf("invalid receipt. chunk %s, peer %s", ch.Address(), peer)
+ }
+
+ return &rec, nil
+}
+
+func (ps *PushSync) prepareCredit(ctx context.Context, peer swarm.Address, ch swarm.Chunk, origin bool) (accounting.Action, error) {
+ creditAction, err := ps.accounting.PrepareCredit(ctx, peer, ps.pricer.PeerPrice(peer, ch.Address()), origin)
+ if err != nil {
+ return nil, err
+ }
+
+ return creditAction, nil
+}
+
+func (s *PushSync) Close() error {
+ return s.errSkip.Close()
+}
diff --git a/pkg/retrieval/metrics.go b/pkg/retrieval/metrics.go
index f987a75b2d6..e9dfb1d8024 100644
--- a/pkg/retrieval/metrics.go
+++ b/pkg/retrieval/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/retrieval/retrieval.go b/pkg/retrieval/retrieval.go
index 2fce97be5a2..73a28078f02 100644
--- a/pkg/retrieval/retrieval.go
+++ b/pkg/retrieval/retrieval.go
@@ -1,11 +1,6 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package retrieval provides the retrieval protocol
-// implementation. The protocol is used to retrieve
-// chunks over the network using forwarding-kademlia
-// routing.
+//go:build !js
+// +build !js
+
package retrieval
import (
@@ -33,37 +28,6 @@ import (
"resenje.org/singleflight"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "retrieval"
-
-const (
- protocolName = "retrieval"
- protocolVersion = "1.4.0"
- streamName = "retrieval"
-)
-
-var _ Interface = (*Service)(nil)
-
-type Interface interface {
- // RetrieveChunk retrieves a chunk from the network using the retrieval protocol.
- // it takes as parameters a context, a chunk address to retrieve (content-addressed or single-owner) and
- // a source peer address, for the case that we are requesting the chunk for another peer. In case the request
- // originates at the current node (i.e. no forwarding involved), the caller should use swarm.ZeroAddress
- // as the value for sourcePeerAddress.
- RetrieveChunk(ctx context.Context, address, sourcePeerAddr swarm.Address) (chunk swarm.Chunk, err error)
-}
-
-type retrievalResult struct {
- chunk swarm.Chunk
- peer swarm.Address
- err error
-}
-
-type Storer interface {
- Cache() storage.Putter
- Lookup() storage.Getter
-}
-
type Service struct {
addr swarm.Address
radiusFunc func() (uint8, error)
@@ -108,29 +72,6 @@ func New(
}
}
-func (s *Service) Protocol() p2p.ProtocolSpec {
- return p2p.ProtocolSpec{
- Name: protocolName,
- Version: protocolVersion,
- StreamSpecs: []p2p.StreamSpec{
- {
- Name: streamName,
- Handler: s.handler,
- },
- },
- }
-}
-
-const (
- RetrieveChunkTimeout = time.Second * 30
- preemptiveInterval = time.Second
- overDraftRefresh = time.Millisecond * 600
- skiplistDur = time.Minute
- originSuffix = "_origin"
- maxOriginErrors = 32
- maxMultiplexForwards = 2
-)
-
func (s *Service) RetrieveChunk(ctx context.Context, chunkAddr, sourcePeerAddr swarm.Address) (swarm.Chunk, error) {
loggerV1 := s.logger
@@ -381,131 +322,3 @@ func (s *Service) prepareCredit(ctx context.Context, peer, chunk swarm.Address,
return creditAction, nil
}
-
-// closestPeer returns address of the peer that is closest to the chunk with
-// provided address addr. This function will ignore peers with addresses
-// provided in skipPeers and if allowUpstream is true, peers that are further of
-// the chunk than this node is, could also be returned, allowing the upstream
-// retrieve request.
-func (s *Service) closestPeer(addr swarm.Address, skipPeers []swarm.Address, allowUpstream bool) (swarm.Address, error) {
-
- var (
- closest swarm.Address
- err error
- )
-
- closest, err = s.peerSuggester.ClosestPeer(addr, false, topology.Select{Reachable: true, Healthy: true}, skipPeers...)
- if errors.Is(err, topology.ErrNotFound) {
- closest, err = s.peerSuggester.ClosestPeer(addr, false, topology.Select{Reachable: true}, skipPeers...)
- if errors.Is(err, topology.ErrNotFound) {
- closest, err = s.peerSuggester.ClosestPeer(addr, false, topology.Select{}, skipPeers...)
- }
- }
-
- if err != nil {
- return swarm.Address{}, err
- }
-
- if allowUpstream {
- return closest, nil
- }
-
- closer, err := closest.Closer(addr, s.addr)
- if err != nil {
- return swarm.Address{}, fmt.Errorf("distance compare addr %s closest %s base address %s: %w", addr.String(), closest.String(), s.addr.String(), err)
- }
- if !closer {
- return swarm.Address{}, topology.ErrNotFound
- }
-
- return closest, nil
-}
-
-func (s *Service) handler(p2pctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
- ctx, cancel := context.WithTimeout(p2pctx, RetrieveChunkTimeout)
- defer cancel()
-
- w, r := protobuf.NewWriterAndReader(stream)
- var attemptedWrite bool
-
- defer func() {
- if err != nil {
- if !attemptedWrite {
- _ = w.WriteMsgWithContext(ctx, &pb.Delivery{Err: err.Error()})
- }
- _ = stream.Reset()
- } else {
- _ = stream.FullClose()
- }
- }()
- var req pb.Request
- if err := r.ReadMsgWithContext(ctx, &req); err != nil {
- return fmt.Errorf("read request: %w peer %s", err, p.Address.String())
- }
-
- addr := swarm.NewAddress(req.Addr)
-
- if addr.IsZero() || addr.IsEmpty() || !addr.IsValidLength() {
- return fmt.Errorf("invalid address queried by peer %s", p.Address.String())
- }
-
- var forwarded bool
-
- span, _, ctx := s.tracer.StartSpanFromContext(ctx, "handle-retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: addr.String()})
- defer func() {
- if err != nil {
- ext.LogError(span, err)
- } else {
- span.LogFields(olog.Bool("success", true))
- }
- span.LogFields(olog.Bool("forwarded", forwarded))
- span.Finish()
- }()
-
- chunk, err := s.storer.Lookup().Get(ctx, addr)
- if err != nil {
- if errors.Is(err, storage.ErrNotFound) {
- // forward the request
- chunk, err = s.RetrieveChunk(ctx, addr, p.Address)
- if err != nil {
- return fmt.Errorf("retrieve chunk: %w", err)
- }
- forwarded = true
- } else {
- return fmt.Errorf("get from store: %w", err)
- }
- }
-
- chunkPrice := s.pricer.Price(chunk.Address())
- debit, err := s.accounting.PrepareDebit(ctx, p.Address, chunkPrice)
- if err != nil {
- return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
- }
- defer debit.Cleanup()
-
- attemptedWrite = true
-
- if err := w.WriteMsgWithContext(ctx, &pb.Delivery{
- Data: chunk.Data(),
- }); err != nil {
- return fmt.Errorf("write delivery: %w peer %s", err, p.Address.String())
- }
-
- // debit price from p's balance
- if err := debit.Apply(); err != nil {
- return fmt.Errorf("apply debit: %w", err)
- }
-
- // cache the request last, so that putting to the localstore does not slow down the request flow
- if s.caching && forwarded {
- if err := s.storer.Cache().Put(p2pctx, chunk); err != nil {
- s.logger.Debug("retrieve cache put", "error", err)
- }
- }
-
- return nil
-}
-
-func (s *Service) Close() error {
- return s.errSkip.Close()
-}
diff --git a/pkg/retrieval/retrieval_js.go b/pkg/retrieval/retrieval_js.go
new file mode 100644
index 00000000000..2e7f4be1eda
--- /dev/null
+++ b/pkg/retrieval/retrieval_js.go
@@ -0,0 +1,304 @@
+//go:build js
+// +build js
+
+package retrieval
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/accounting"
+ "github.com/ethersphere/bee/v2/pkg/cac"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/pricer"
+ pb "github.com/ethersphere/bee/v2/pkg/retrieval/pb"
+ "github.com/ethersphere/bee/v2/pkg/skippeers"
+ "github.com/ethersphere/bee/v2/pkg/soc"
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+ "resenje.org/singleflight"
+)
+
+type Service struct {
+ addr swarm.Address
+ radiusFunc func() (uint8, error)
+ streamer p2p.Streamer
+ peerSuggester topology.ClosestPeerer
+ storer Storer
+ singleflight singleflight.Group[string, swarm.Chunk]
+ logger log.Logger
+ accounting accounting.Interface
+ pricer pricer.Interface
+ tracer *tracing.Tracer
+ caching bool
+ errSkip *skippeers.List
+}
+
+func New(
+ addr swarm.Address,
+ radiusFunc func() (uint8, error),
+ storer Storer,
+ streamer p2p.Streamer,
+ chunkPeerer topology.ClosestPeerer,
+ logger log.Logger,
+ accounting accounting.Interface,
+ pricer pricer.Interface,
+ tracer *tracing.Tracer,
+ forwarderCaching bool,
+) *Service {
+ return &Service{
+ addr: addr,
+ radiusFunc: radiusFunc,
+ streamer: streamer,
+ peerSuggester: chunkPeerer,
+ storer: storer,
+ logger: logger.WithName(loggerName).Register(),
+ accounting: accounting,
+ pricer: pricer,
+ tracer: tracer,
+ caching: forwarderCaching,
+ errSkip: skippeers.NewList(time.Minute),
+ }
+}
+
+func (s *Service) RetrieveChunk(ctx context.Context, chunkAddr, sourcePeerAddr swarm.Address) (swarm.Chunk, error) {
+ loggerV1 := s.logger
+
+ origin := sourcePeerAddr.IsZero()
+
+ if chunkAddr.IsZero() || chunkAddr.IsEmpty() || !chunkAddr.IsValidLength() {
+ return nil, fmt.Errorf("invalid address queried")
+ }
+
+ flightRoute := chunkAddr.String()
+ if origin {
+ flightRoute = chunkAddr.String() + originSuffix
+ }
+
+ totalRetrieveAttempts := 0
+
+ spanCtx := context.WithoutCancel(ctx)
+
+ v, _, err := s.singleflight.Do(ctx, flightRoute, func(ctx context.Context) (swarm.Chunk, error) {
+
+ skip := skippeers.NewList(0)
+ defer skip.Close()
+
+ var preemptiveTicker <-chan time.Time
+
+ if !sourcePeerAddr.IsZero() {
+ skip.Forever(chunkAddr, sourcePeerAddr)
+ }
+
+ quit := make(chan struct{})
+ defer close(quit)
+
+ var forwards = maxMultiplexForwards
+
+ // if we are the origin node, allow many preemptive retries to speed up the retrieval of the chunk.
+ errorsLeft := 1
+ if origin {
+ ticker := time.NewTicker(preemptiveInterval)
+ defer ticker.Stop()
+ preemptiveTicker = ticker.C
+ errorsLeft = maxOriginErrors
+ }
+
+ resultC := make(chan retrievalResult, 1)
+ retryC := make(chan struct{}, forwards+1)
+
+ retry := func() {
+ select {
+ case retryC <- struct{}{}:
+ case <-ctx.Done():
+ default:
+ }
+ }
+
+ retry()
+
+ inflight := 0
+
+ for errorsLeft > 0 {
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-preemptiveTicker:
+ retry()
+ case <-retryC:
+
+ totalRetrieveAttempts++
+
+ fullSkip := append(skip.ChunkPeers(chunkAddr), s.errSkip.ChunkPeers(chunkAddr)...)
+ peer, err := s.closestPeer(chunkAddr, fullSkip, origin)
+
+ if errors.Is(err, topology.ErrNotFound) {
+ if skip.PruneExpiresAfter(chunkAddr, overDraftRefresh) == 0 { //no overdraft peers, we have depleted ALL peers
+ if inflight == 0 {
+ loggerV1.Debug("no peers left", "chunk_address", chunkAddr, "errors_left", errorsLeft, "isOrigin", origin, "own_proximity", swarm.Proximity(s.addr.Bytes(), chunkAddr.Bytes()), "error", err)
+ return nil, err
+ }
+ continue // there is still an inflight request, wait for it's result
+ }
+
+ loggerV1.Debug("sleeping to refresh overdraft balance", "chunk_address", chunkAddr)
+
+ select {
+ case <-time.After(overDraftRefresh):
+ retry()
+ continue
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+
+ if err != nil {
+ if inflight == 0 {
+ loggerV1.Debug("peer selection", "chunk_address", chunkAddr, "error", err)
+ return nil, err
+ }
+ continue
+ }
+
+ // since we can reach into the neighborhood of the chunk
+ // act as the multiplexer and push the chunk in parallel to multiple peers.
+ // neighbor peers will also have multiple retries, which means almost the entire neighborhood
+ // will be scanned for the chunk, starting from the closest to the furthest peer in the neighborhood.
+ if radius, err := s.radiusFunc(); err == nil && swarm.Proximity(peer.Bytes(), chunkAddr.Bytes()) >= radius {
+ for ; forwards > 0; forwards-- {
+ retry()
+ errorsLeft++
+ }
+ }
+
+ action, err := s.prepareCredit(ctx, peer, chunkAddr, origin)
+ if err != nil {
+ skip.Add(chunkAddr, peer, overDraftRefresh)
+ retry()
+ continue
+ }
+ skip.Forever(chunkAddr, peer)
+
+ inflight++
+
+ go func() {
+ span, _, ctx := s.tracer.FollowSpanFromContext(spanCtx, "retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: chunkAddr.String()})
+ defer span.Finish()
+ s.retrieveChunk(ctx, quit, chunkAddr, peer, resultC, action, span)
+ }()
+
+ case res := <-resultC:
+
+ inflight--
+
+ if res.err == nil {
+ loggerV1.Debug("retrieved chunk", "chunk_address", chunkAddr, "peer_address", res.peer, "peer_proximity", swarm.Proximity(res.peer.Bytes(), chunkAddr.Bytes()))
+ return res.chunk, nil
+ }
+
+ loggerV1.Debug("failed to get chunk", "chunk_address", chunkAddr, "peer_address", res.peer,
+ "peer_proximity", swarm.Proximity(res.peer.Bytes(), chunkAddr.Bytes()), "error", res.err)
+
+ errorsLeft--
+ s.errSkip.Add(chunkAddr, res.peer, skiplistDur)
+ retry()
+ }
+ }
+
+ return nil, storage.ErrNotFound
+ })
+ if err != nil {
+ s.logger.Debug("retrieval failed", "chunk_address", chunkAddr, "error", err)
+ return nil, err
+ }
+
+ return v, nil
+}
+
+func (s *Service) retrieveChunk(ctx context.Context, quit chan struct{}, chunkAddr, peer swarm.Address, result chan retrievalResult, action accounting.Action, span opentracing.Span) {
+
+ var (
+ err error
+ chunk swarm.Chunk
+ )
+
+ defer func() {
+ action.Cleanup()
+ if err != nil {
+ ext.LogError(span, err)
+ } else {
+ span.LogFields(olog.Bool("success", true))
+ }
+ select {
+ case result <- retrievalResult{err: err, chunk: chunk, peer: peer}:
+ case <-quit:
+ return
+ }
+ }()
+
+ ctx, cancel := context.WithTimeout(ctx, RetrieveChunkTimeout)
+ defer cancel()
+
+ stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
+ if err != nil {
+ err = fmt.Errorf("new stream: %w", err)
+ return
+ }
+
+ defer func() {
+ if err != nil {
+ _ = stream.Reset()
+ } else {
+ _ = stream.FullClose()
+ }
+ }()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ err = w.WriteMsgWithContext(ctx, &pb.Request{Addr: chunkAddr.Bytes()})
+ if err != nil {
+ err = fmt.Errorf("write request: %w peer %s", err, peer.String())
+ return
+ }
+
+ var d pb.Delivery
+ if err = r.ReadMsgWithContext(ctx, &d); err != nil {
+ err = fmt.Errorf("read delivery: %w peer %s", err, peer.String())
+ return
+ }
+ if d.Err != "" {
+ err = p2p.NewChunkDeliveryError(d.Err)
+ return
+ }
+
+ chunk = swarm.NewChunk(chunkAddr, d.Data)
+ if !cac.Valid(chunk) {
+ if !soc.Valid(chunk) {
+ err = swarm.ErrInvalidChunk
+ return
+ }
+ }
+
+ err = action.Apply()
+}
+
+func (s *Service) prepareCredit(ctx context.Context, peer, chunk swarm.Address, origin bool) (accounting.Action, error) {
+
+ price := s.pricer.PeerPrice(peer, chunk)
+
+ creditAction, err := s.accounting.PrepareCredit(ctx, peer, price, origin)
+ if err != nil {
+ return nil, err
+ }
+
+ return creditAction, nil
+}
diff --git a/pkg/retrieval/retrieval_shared.go b/pkg/retrieval/retrieval_shared.go
new file mode 100644
index 00000000000..cbb214f9219
--- /dev/null
+++ b/pkg/retrieval/retrieval_shared.go
@@ -0,0 +1,208 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package retrieval provides the retrieval protocol
+// implementation. The protocol is used to retrieve
+// chunks over the network using forwarding-kademlia
+// routing.
+package retrieval
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ pb "github.com/ethersphere/bee/v2/pkg/retrieval/pb"
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ olog "github.com/opentracing/opentracing-go/log"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "retrieval"
+
+const (
+ protocolName = "retrieval"
+ protocolVersion = "1.4.0"
+ streamName = "retrieval"
+)
+
+var _ Interface = (*Service)(nil)
+
+type Interface interface {
+ // RetrieveChunk retrieves a chunk from the network using the retrieval protocol.
+ // it takes as parameters a context, a chunk address to retrieve (content-addressed or single-owner) and
+ // a source peer address, for the case that we are requesting the chunk for another peer. In case the request
+ // originates at the current node (i.e. no forwarding involved), the caller should use swarm.ZeroAddress
+ // as the value for sourcePeerAddress.
+ RetrieveChunk(ctx context.Context, address, sourcePeerAddr swarm.Address) (chunk swarm.Chunk, err error)
+}
+
+type retrievalResult struct {
+ chunk swarm.Chunk
+ peer swarm.Address
+ err error
+}
+
+type Storer interface {
+ Cache() storage.Putter
+ Lookup() storage.Getter
+}
+
+func (s *Service) Protocol() p2p.ProtocolSpec {
+ return p2p.ProtocolSpec{
+ Name: protocolName,
+ Version: protocolVersion,
+ StreamSpecs: []p2p.StreamSpec{
+ {
+ Name: streamName,
+ Handler: s.handler,
+ },
+ },
+ }
+}
+
+const (
+ RetrieveChunkTimeout = time.Second * 30
+ preemptiveInterval = time.Second
+ overDraftRefresh = time.Millisecond * 600
+ skiplistDur = time.Minute
+ originSuffix = "_origin"
+ maxOriginErrors = 32
+ maxMultiplexForwards = 2
+)
+
+// closestPeer returns address of the peer that is closest to the chunk with
+// provided address addr. This function will ignore peers with addresses
+// provided in skipPeers and if allowUpstream is true, peers that are further of
+// the chunk than this node is, could also be returned, allowing the upstream
+// retrieve request.
+func (s *Service) closestPeer(addr swarm.Address, skipPeers []swarm.Address, allowUpstream bool) (swarm.Address, error) {
+
+ var (
+ closest swarm.Address
+ err error
+ )
+
+ closest, err = s.peerSuggester.ClosestPeer(addr, false, topology.Select{Reachable: true, Healthy: true}, skipPeers...)
+ if errors.Is(err, topology.ErrNotFound) {
+ closest, err = s.peerSuggester.ClosestPeer(addr, false, topology.Select{Reachable: true}, skipPeers...)
+ if errors.Is(err, topology.ErrNotFound) {
+ closest, err = s.peerSuggester.ClosestPeer(addr, false, topology.Select{}, skipPeers...)
+ }
+ }
+
+ if err != nil {
+ return swarm.Address{}, err
+ }
+
+ if allowUpstream {
+ return closest, nil
+ }
+
+ closer, err := closest.Closer(addr, s.addr)
+ if err != nil {
+ return swarm.Address{}, fmt.Errorf("distance compare addr %s closest %s base address %s: %w", addr.String(), closest.String(), s.addr.String(), err)
+ }
+ if !closer {
+ return swarm.Address{}, topology.ErrNotFound
+ }
+
+ return closest, nil
+}
+
+func (s *Service) handler(p2pctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
+ ctx, cancel := context.WithTimeout(p2pctx, RetrieveChunkTimeout)
+ defer cancel()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ var attemptedWrite bool
+
+ defer func() {
+ if err != nil {
+ if !attemptedWrite {
+ _ = w.WriteMsgWithContext(ctx, &pb.Delivery{Err: err.Error()})
+ }
+ _ = stream.Reset()
+ } else {
+ _ = stream.FullClose()
+ }
+ }()
+ var req pb.Request
+ if err := r.ReadMsgWithContext(ctx, &req); err != nil {
+ return fmt.Errorf("read request: %w peer %s", err, p.Address.String())
+ }
+
+ addr := swarm.NewAddress(req.Addr)
+
+ if addr.IsZero() || addr.IsEmpty() || !addr.IsValidLength() {
+ return fmt.Errorf("invalid address queried by peer %s", p.Address.String())
+ }
+
+ var forwarded bool
+
+ span, _, ctx := s.tracer.StartSpanFromContext(ctx, "handle-retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: addr.String()})
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ } else {
+ span.LogFields(olog.Bool("success", true))
+ }
+ span.LogFields(olog.Bool("forwarded", forwarded))
+ span.Finish()
+ }()
+
+ chunk, err := s.storer.Lookup().Get(ctx, addr)
+ if err != nil {
+ if errors.Is(err, storage.ErrNotFound) {
+ // forward the request
+ chunk, err = s.RetrieveChunk(ctx, addr, p.Address)
+ if err != nil {
+ return fmt.Errorf("retrieve chunk: %w", err)
+ }
+ forwarded = true
+ } else {
+ return fmt.Errorf("get from store: %w", err)
+ }
+ }
+
+ chunkPrice := s.pricer.Price(chunk.Address())
+ debit, err := s.accounting.PrepareDebit(ctx, p.Address, chunkPrice)
+ if err != nil {
+ return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
+ }
+ defer debit.Cleanup()
+
+ attemptedWrite = true
+
+ if err := w.WriteMsgWithContext(ctx, &pb.Delivery{
+ Data: chunk.Data(),
+ }); err != nil {
+ return fmt.Errorf("write delivery: %w peer %s", err, p.Address.String())
+ }
+
+ // debit price from p's balance
+ if err := debit.Apply(); err != nil {
+ return fmt.Errorf("apply debit: %w", err)
+ }
+
+ // cache the request last, so that putting to the localstore does not slow down the request flow
+ if s.caching && forwarded {
+ if err := s.storer.Cache().Put(p2pctx, chunk); err != nil {
+ s.logger.Debug("retrieve cache put", "error", err)
+ }
+ }
+
+ return nil
+}
+
+func (s *Service) Close() error {
+ return s.errSkip.Close()
+}
diff --git a/pkg/salud/metrics.go b/pkg/salud/metrics.go
index a99087d219b..0e5a31da678 100644
--- a/pkg/salud/metrics.go
+++ b/pkg/salud/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2023 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/salud/salud.go b/pkg/salud/salud.go
index 8822324ba04..79579c738e1 100644
--- a/pkg/salud/salud.go
+++ b/pkg/salud/salud.go
@@ -1,9 +1,6 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
-// Package salud monitors the connected peers, calculates certain thresholds, and marks peers as unhealthy that
-// fall short of the thresholds to maintain network salud (health).
package salud
import (
@@ -14,33 +11,12 @@ import (
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/stabilization"
- "github.com/ethersphere/bee/v2/pkg/status"
"github.com/ethersphere/bee/v2/pkg/storer"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/topology"
"go.uber.org/atomic"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "salud"
-
-const (
- wakeup = time.Minute * 5
- requestTimeout = time.Second * 10
- DefaultMinPeersPerBin = 4
- DefaultDurPercentile = 0.4 // consider 40% as healthy, lower percentile = stricter duration check
- DefaultConnsPercentile = 0.8 // consider 80% as healthy, lower percentile = stricter conns check
-)
-
-type topologyDriver interface {
- UpdatePeerHealth(peer swarm.Address, health bool, dur time.Duration)
- topology.PeerIterator
-}
-
-type peerStatus interface {
- PeerSnapshot(ctx context.Context, peer swarm.Address) (*status.Snapshot, error)
-}
-
type service struct {
wg sync.WaitGroup
quit chan struct{}
@@ -84,45 +60,6 @@ func New(
return s
}
-func (s *service) worker(startupStabilizer stabilization.Subscriber, mode string, minPeersPerbin int, durPercentile float64, connsPercentile float64) {
- defer s.wg.Done()
-
- sub, unsubscribe := startupStabilizer.Subscribe()
- defer unsubscribe()
-
- select {
- case <-s.quit:
- return
- case <-sub:
- s.logger.Debug("node warmup check completed")
- }
-
- for {
-
- s.salud(mode, minPeersPerbin, durPercentile, connsPercentile)
-
- select {
- case <-s.quit:
- return
- case <-time.After(wakeup):
- }
- }
-}
-
-func (s *service) Close() error {
- close(s.quit)
- s.wg.Wait()
- return nil
-}
-
-type peer struct {
- status *status.Snapshot
- dur time.Duration
- addr swarm.Address
- bin uint8
- neighbor bool
-}
-
// salud acquires the status snapshot of every peer and computes an nth percentile of response duration and connected
// per count, the most common storage radius, and the batch commitment, and based on these values, marks peers as unhealhy that fall beyond
// the allowed thresholds.
@@ -135,7 +72,7 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
bins [swarm.MaxBins]int
)
- _ = s.topology.EachConnectedPeer(func(addr swarm.Address, bin uint8) (stop bool, jumpToNext bool, err error) {
+ err := s.topology.EachConnectedPeer(func(addr swarm.Address, bin uint8) (stop bool, jumpToNext bool, err error) {
wg.Add(1)
go func() {
defer wg.Done()
@@ -164,6 +101,9 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
}()
return false, false, nil
}, topology.Select{})
+ if err != nil {
+ s.logger.Error(err, "error iterating over connected peers", "mode", mode)
+ }
wg.Wait()
@@ -233,117 +173,3 @@ func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64,
s.publishRadius(networkRadius)
}
-
-func (s *service) IsHealthy() bool {
- return s.isSelfHealthy.Load()
-}
-
-func (s *service) publishRadius(r uint8) {
- s.radiusSubsMtx.Lock()
- defer s.radiusSubsMtx.Unlock()
- for _, cb := range s.radiusC {
- select {
- case cb <- r:
- default:
- }
- }
-}
-
-func (s *service) SubscribeNetworkStorageRadius() (<-chan uint8, func()) {
- s.radiusSubsMtx.Lock()
- defer s.radiusSubsMtx.Unlock()
-
- c := make(chan uint8, 1)
- s.radiusC = append(s.radiusC, c)
-
- return c, func() {
- s.radiusSubsMtx.Lock()
- defer s.radiusSubsMtx.Unlock()
- for i, cc := range s.radiusC {
- if c == cc {
- s.radiusC = append(s.radiusC[:i], s.radiusC[i+1:]...)
- break
- }
- }
- }
-}
-
-// percentileDur finds the p percentile of response duration.
-// Less is better.
-func percentileDur(peers []peer, p float64) float64 {
- index := int(float64(len(peers)) * p)
-
- sort.Slice(peers, func(i, j int) bool {
- return peers[i].dur < peers[j].dur // ascending
- })
-
- return peers[index].dur.Seconds()
-}
-
-// percentileConns finds the p percentile of connection count.
-// More is better.
-func percentileConns(peers []peer, p float64) uint64 {
- index := int(float64(len(peers)) * p)
-
- sort.Slice(peers, func(i, j int) bool {
- return peers[i].status.ConnectedPeers > peers[j].status.ConnectedPeers // descending
- })
-
- return peers[index].status.ConnectedPeers
-}
-
-// radius finds the most common radius.
-func (s *service) committedDepth(peers []peer) (uint8, uint8) {
- var networkDepth [swarm.MaxBins]int
- var nHoodDepth [swarm.MaxBins]int
-
- for _, peer := range peers {
- if peer.status.CommittedDepth < uint32(swarm.MaxBins) {
- if peer.neighbor {
- nHoodDepth[peer.status.CommittedDepth]++
- }
- networkDepth[peer.status.CommittedDepth]++
- }
- }
-
- networkD := maxIndex(networkDepth[:])
- hoodD := maxIndex(nHoodDepth[:])
-
- return uint8(networkD), uint8(hoodD)
-}
-
-// commitment finds the most common batch commitment.
-func commitment(peers []peer) uint64 {
- commitments := make(map[uint64]int)
-
- for _, peer := range peers {
- commitments[peer.status.BatchCommitment]++
- }
-
- var (
- maxCount = 0
- maxCommitment uint64 = 0
- )
-
- for commitment, count := range commitments {
- if count > maxCount {
- maxCommitment = commitment
- maxCount = count
- }
- }
-
- return maxCommitment
-}
-
-func maxIndex(n []int) int {
- maxValue := 0
- index := 0
- for i, c := range n {
- if c > maxValue {
- maxValue = c
- index = i
- }
- }
-
- return index
-}
diff --git a/pkg/salud/salud_js.go b/pkg/salud/salud_js.go
new file mode 100644
index 00000000000..c4e3db66196
--- /dev/null
+++ b/pkg/salud/salud_js.go
@@ -0,0 +1,162 @@
+//go:build js
+// +build js
+
+package salud
+
+import (
+ "context"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/stabilization"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "go.uber.org/atomic"
+)
+
+type service struct {
+ wg sync.WaitGroup
+ quit chan struct{}
+ logger log.Logger
+ topology topologyDriver
+ status peerStatus
+ isSelfHealthy *atomic.Bool
+ reserve storer.RadiusChecker
+
+ radiusSubsMtx sync.Mutex
+ radiusC []chan uint8
+}
+
+func New(
+ status peerStatus,
+ topology topologyDriver,
+ reserve storer.RadiusChecker,
+ logger log.Logger,
+ startupStabilizer stabilization.Subscriber,
+ mode string,
+ minPeersPerbin int,
+ durPercentile float64,
+ connsPercentile float64,
+) *service {
+
+ s := &service{
+ quit: make(chan struct{}),
+ logger: logger.WithName(loggerName).Register(),
+ status: status,
+ topology: topology,
+ isSelfHealthy: atomic.NewBool(true),
+ reserve: reserve,
+ }
+
+ s.wg.Add(1)
+ go s.worker(startupStabilizer, mode, minPeersPerbin, durPercentile, connsPercentile)
+
+ return s
+}
+
+// salud acquires the status snapshot of every peer and computes an nth percentile of response duration and connected
+// per count, the most common storage radius, and the batch commitment, and based on these values, marks peers as unhealhy that fall beyond
+// the allowed thresholds.
+func (s *service) salud(mode string, minPeersPerbin int, durPercentile float64, connsPercentile float64) {
+ var (
+ mtx sync.Mutex
+ wg sync.WaitGroup
+ totaldur float64
+ peers []peer
+ bins [swarm.MaxBins]int
+ )
+
+ err := s.topology.EachConnectedPeer(func(addr swarm.Address, bin uint8) (stop bool, jumpToNext bool, err error) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
+ defer cancel()
+
+ start := time.Now()
+ snapshot, err := s.status.PeerSnapshot(ctx, addr)
+ dur := time.Since(start)
+
+ if err != nil {
+ s.topology.UpdatePeerHealth(addr, false, dur)
+ return
+ }
+
+ if snapshot.BeeMode != mode {
+ return
+ }
+
+ mtx.Lock()
+ bins[bin]++
+ totaldur += dur.Seconds()
+ peers = append(peers, peer{snapshot, dur, addr, bin, s.reserve.IsWithinStorageRadius(addr)})
+ mtx.Unlock()
+ }()
+ return false, false, nil
+ }, topology.Select{})
+ if err != nil {
+ s.logger.Error(err, "error iterating over connected peers", "mode", mode)
+ }
+
+ wg.Wait()
+
+ if len(peers) == 0 {
+ return
+ }
+
+ networkRadius, nHoodRadius := s.committedDepth(peers)
+ avgDur := totaldur / float64(len(peers))
+ pDur := percentileDur(peers, durPercentile)
+ pConns := percentileConns(peers, connsPercentile)
+ commitment := commitment(peers)
+
+ s.logger.Debug("computed", "avg_dur", avgDur, "pDur", pDur, "pConns", pConns, "network_radius", networkRadius, "neighborhood_radius", nHoodRadius, "batch_commitment", commitment)
+
+ // sort peers by duration, highest first to give priority to the fastest peers
+ sort.Slice(peers, func(i, j int) bool {
+ return peers[i].dur > peers[j].dur // descending
+ })
+
+ for _, peer := range peers {
+
+ var healthy bool
+
+ // every bin should have at least some peers, healthy or not
+ if bins[peer.bin] <= minPeersPerbin {
+ s.topology.UpdatePeerHealth(peer.addr, true, peer.dur)
+ continue
+ }
+
+ if networkRadius > 0 && peer.status.CommittedDepth < uint32(networkRadius-2) {
+ s.logger.Debug("radius health failure", "radius", peer.status.CommittedDepth, "peer_address", peer.addr, "bin", peer.bin)
+ } else if peer.dur.Seconds() > pDur {
+ s.logger.Debug("response duration below threshold", "duration", peer.dur, "peer_address", peer.addr, "bin", peer.bin)
+ } else if peer.status.ConnectedPeers < pConns {
+ s.logger.Debug("connections count below threshold", "connections", peer.status.ConnectedPeers, "peer_address", peer.addr, "bin", peer.bin)
+ } else if peer.status.BatchCommitment != commitment {
+ s.logger.Debug("batch commitment check failure", "commitment", peer.status.BatchCommitment, "peer_address", peer.addr, "bin", peer.bin)
+ } else {
+ healthy = true
+ }
+
+ s.topology.UpdatePeerHealth(peer.addr, healthy, peer.dur)
+ if healthy {
+ } else {
+ bins[peer.bin]--
+ }
+ }
+
+ selfHealth := true
+ if nHoodRadius == networkRadius && s.reserve.CommittedDepth() != networkRadius {
+ selfHealth = false
+ s.logger.Warning("node is unhealthy due to storage radius discrepancy", "self_radius", s.reserve.CommittedDepth(), "network_radius", networkRadius)
+ }
+
+ s.isSelfHealthy.Store(selfHealth)
+
+ s.publishRadius(networkRadius)
+}
diff --git a/pkg/salud/salud_shared.go b/pkg/salud/salud_shared.go
new file mode 100644
index 00000000000..f232b95beba
--- /dev/null
+++ b/pkg/salud/salud_shared.go
@@ -0,0 +1,199 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package salud monitors the connected peers, calculates certain thresholds, and marks peers as unhealthy that
+// fall short of the thresholds to maintain network salud (health).
+package salud
+
+import (
+ "context"
+ "sort"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/stabilization"
+ "github.com/ethersphere/bee/v2/pkg/status"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "salud"
+
+const (
+ requestTimeout = time.Second * 10
+ initialBackoffDelay = 10 * time.Second
+ maxBackoffDelay = 5 * time.Minute
+ backoffFactor = 2
+ DefaultMinPeersPerBin = 4
+ DefaultDurPercentile = 0.4 // consider 40% as healthy, lower percentile = stricter duration check
+ DefaultConnsPercentile = 0.8 // consider 80% as healthy, lower percentile = stricter conns check
+)
+
+type topologyDriver interface {
+ UpdatePeerHealth(peer swarm.Address, health bool, dur time.Duration)
+ topology.PeerIterator
+}
+
+type peerStatus interface {
+ PeerSnapshot(ctx context.Context, peer swarm.Address) (*status.Snapshot, error)
+}
+
+func (s *service) worker(startupStabilizer stabilization.Subscriber, mode string, minPeersPerbin int, durPercentile float64, connsPercentile float64) {
+ defer s.wg.Done()
+
+ sub, unsubscribe := startupStabilizer.Subscribe()
+ defer unsubscribe()
+
+ select {
+ case <-s.quit:
+ return
+ case <-sub:
+ s.logger.Debug("node warmup check completed")
+ }
+
+ currentDelay := initialBackoffDelay
+
+ for {
+ s.salud(mode, minPeersPerbin, durPercentile, connsPercentile)
+
+ select {
+ case <-s.quit:
+ return
+ case <-time.After(currentDelay):
+ }
+
+ currentDelay *= time.Duration(backoffFactor)
+ if currentDelay > maxBackoffDelay {
+ currentDelay = maxBackoffDelay
+ }
+ }
+}
+
+func (s *service) Close() error {
+ close(s.quit)
+ s.wg.Wait()
+ return nil
+}
+
+type peer struct {
+ status *status.Snapshot
+ dur time.Duration
+ addr swarm.Address
+ bin uint8
+ neighbor bool
+}
+
+func (s *service) IsHealthy() bool {
+ return s.isSelfHealthy.Load()
+}
+
+func (s *service) publishRadius(r uint8) {
+ s.radiusSubsMtx.Lock()
+ defer s.radiusSubsMtx.Unlock()
+ for _, cb := range s.radiusC {
+ select {
+ case cb <- r:
+ default:
+ }
+ }
+}
+
+func (s *service) SubscribeNetworkStorageRadius() (<-chan uint8, func()) {
+ s.radiusSubsMtx.Lock()
+ defer s.radiusSubsMtx.Unlock()
+
+ c := make(chan uint8, 1)
+ s.radiusC = append(s.radiusC, c)
+
+ return c, func() {
+ s.radiusSubsMtx.Lock()
+ defer s.radiusSubsMtx.Unlock()
+ for i, cc := range s.radiusC {
+ if c == cc {
+ s.radiusC = append(s.radiusC[:i], s.radiusC[i+1:]...)
+ break
+ }
+ }
+ }
+}
+
+// percentileDur finds the p percentile of response duration.
+// Less is better.
+func percentileDur(peers []peer, p float64) float64 {
+ index := int(float64(len(peers)) * p)
+
+ sort.Slice(peers, func(i, j int) bool {
+ return peers[i].dur < peers[j].dur // ascending
+ })
+
+ return peers[index].dur.Seconds()
+}
+
+// percentileConns finds the p percentile of connection count.
+// More is better.
+func percentileConns(peers []peer, p float64) uint64 {
+ index := int(float64(len(peers)) * p)
+
+ sort.Slice(peers, func(i, j int) bool {
+ return peers[i].status.ConnectedPeers > peers[j].status.ConnectedPeers // descending
+ })
+
+ return peers[index].status.ConnectedPeers
+}
+
+// radius finds the most common radius.
+func (s *service) committedDepth(peers []peer) (uint8, uint8) {
+ var networkDepth [swarm.MaxBins]int
+ var nHoodDepth [swarm.MaxBins]int
+
+ for _, peer := range peers {
+ if peer.status.CommittedDepth < uint32(swarm.MaxBins) {
+ if peer.neighbor {
+ nHoodDepth[peer.status.CommittedDepth]++
+ }
+ networkDepth[peer.status.CommittedDepth]++
+ }
+ }
+
+ networkD := maxIndex(networkDepth[:])
+ hoodD := maxIndex(nHoodDepth[:])
+
+ return uint8(networkD), uint8(hoodD)
+}
+
+// commitment finds the most common batch commitment.
+func commitment(peers []peer) uint64 {
+ commitments := make(map[uint64]int)
+
+ for _, peer := range peers {
+ commitments[peer.status.BatchCommitment]++
+ }
+
+ var (
+ maxCount = 0
+ maxCommitment uint64 = 0
+ )
+
+ for commitment, count := range commitments {
+ if count > maxCount {
+ maxCommitment = commitment
+ maxCount = count
+ }
+ }
+
+ return maxCommitment
+}
+
+func maxIndex(n []int) int {
+ maxValue := 0
+ index := 0
+ for i, c := range n {
+ if c > maxValue {
+ maxValue = c
+ index = i
+ }
+ }
+
+ return index
+}
diff --git a/pkg/settlement/pseudosettle/metrics.go b/pkg/settlement/pseudosettle/metrics.go
index 971795cb6a8..1adf77b04ce 100644
--- a/pkg/settlement/pseudosettle/metrics.go
+++ b/pkg/settlement/pseudosettle/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/settlement/pseudosettle/pseudosettle.go b/pkg/settlement/pseudosettle/pseudosettle.go
index 24531b84641..8cd96b7bbb1 100644
--- a/pkg/settlement/pseudosettle/pseudosettle.go
+++ b/pkg/settlement/pseudosettle/pseudosettle.go
@@ -1,6 +1,5 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package pseudosettle
@@ -9,7 +8,6 @@ import (
"errors"
"fmt"
"math/big"
- "strings"
"sync"
"time"
@@ -22,29 +20,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "pseudosettle"
-
-const (
- protocolName = "pseudosettle"
- protocolVersion = "1.0.0"
- streamName = "pseudosettle"
-)
-
-var (
- SettlementReceivedPrefix = "pseudosettle_total_received_"
- SettlementSentPrefix = "pseudosettle_total_sent_"
-
- ErrSettlementTooSoon = errors.New("settlement too soon")
- ErrNoPseudoSettlePeer = errors.New("settlement peer not found")
- ErrDisconnectAllowanceCheckFailed = errors.New("settlement allowance below enforced amount")
- ErrTimeOutOfSyncAlleged = errors.New("settlement allowance timestamps from peer were decreasing")
- ErrTimeOutOfSyncRecent = errors.New("settlement allowance timestamps from peer differed from our measurement by more than 2 seconds")
- ErrTimeOutOfSyncInterval = errors.New("settlement allowance interval from peer differed from local interval by more than 3 seconds")
- ErrRefreshmentBelowExpected = errors.New("refreshment below expected")
- ErrRefreshmentAboveExpected = errors.New("refreshment above expected")
-)
-
type Service struct {
streamer p2p.Streamer
logger log.Logger
@@ -59,17 +34,6 @@ type Service struct {
peers map[string]*pseudoSettlePeer
}
-type pseudoSettlePeer struct {
- lock sync.Mutex // lock to be held during receiving a payment from this peer
- fullNode bool
-}
-
-type lastPayment struct {
- Timestamp int64
- CheckTimestamp int64
- Total *big.Int
-}
-
func New(streamer p2p.Streamer, logger log.Logger, store storage.StateStorer, accounting settlement.Accounting, refreshRate, lightRefreshRate *big.Int, p2pService p2p.Service) *Service {
return &Service{
streamer: streamer,
@@ -85,100 +49,6 @@ func New(streamer p2p.Streamer, logger log.Logger, store storage.StateStorer, ac
}
}
-func (s *Service) Protocol() p2p.ProtocolSpec {
- return p2p.ProtocolSpec{
- Name: protocolName,
- Version: protocolVersion,
- StreamSpecs: []p2p.StreamSpec{
- {
- Name: streamName,
- Handler: s.handler,
- },
- },
- ConnectIn: s.init,
- ConnectOut: s.init,
- DisconnectIn: s.terminate,
- DisconnectOut: s.terminate,
- }
-}
-
-func (s *Service) init(ctx context.Context, p p2p.Peer) error {
- s.peersMu.Lock()
- defer s.peersMu.Unlock()
-
- _, ok := s.peers[p.Address.String()]
- if !ok {
- peerData := &pseudoSettlePeer{fullNode: p.FullNode}
- s.peers[p.Address.String()] = peerData
- }
-
- go s.accounting.Connect(p.Address, p.FullNode)
- return nil
-}
-
-func (s *Service) terminate(p p2p.Peer) error {
- s.peersMu.Lock()
- defer s.peersMu.Unlock()
-
- delete(s.peers, p.Address.String())
-
- go s.accounting.Disconnect(p.Address)
- return nil
-}
-
-func totalKey(peer swarm.Address, prefix string) string {
- return fmt.Sprintf("%v%v", prefix, peer.String())
-}
-
-func totalKeyPeer(key []byte, prefix string) (peer swarm.Address, err error) {
- k := string(key)
-
- split := strings.SplitAfter(k, prefix)
- if len(split) != 2 {
- return swarm.ZeroAddress, errors.New("no peer in key")
- }
- return swarm.ParseHexAddress(split[1])
-}
-
-// peerAllowance computes the maximum incoming payment value we accept
-// this is the time based allowance or the peers actual debt, whichever is less
-func (s *Service) peerAllowance(peer swarm.Address, fullNode bool) (limit *big.Int, stamp int64, err error) {
- var lastTime lastPayment
- err = s.store.Get(totalKey(peer, SettlementReceivedPrefix), &lastTime)
- if err != nil {
- if !errors.Is(err, storage.ErrNotFound) {
- return nil, 0, err
- }
- lastTime.Timestamp = int64(0)
- }
-
- currentTime := s.timeNow().Unix()
- if currentTime == lastTime.Timestamp {
- return nil, 0, ErrSettlementTooSoon
- }
-
- var refreshRateUsed *big.Int
-
- if fullNode {
- refreshRateUsed = s.refreshRate
- } else {
- refreshRateUsed = s.lightRefreshRate
- }
-
- maxAllowance := new(big.Int).Mul(big.NewInt(currentTime-lastTime.Timestamp), refreshRateUsed)
-
- peerDebt, err := s.accounting.PeerDebt(peer)
- if err != nil {
- return nil, 0, err
- }
-
- if peerDebt.Cmp(maxAllowance) >= 0 {
- return maxAllowance, currentTime, nil
- }
-
- return peerDebt, currentTime, nil
-}
-
func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
loggerV1 := s.logger.V(1).Register()
@@ -357,87 +227,3 @@ func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount *big.Int)
s.accounting.NotifyRefreshmentSent(peer, amount, acceptedAmount, checkTime, allegedInterval, nil)
}
-
-func (s *Service) SetAccounting(accounting settlement.Accounting) {
- s.accounting = accounting
-}
-
-// TotalSent returns the total amount sent to a peer
-func (s *Service) TotalSent(peer swarm.Address) (totalSent *big.Int, err error) {
- var lastTime lastPayment
-
- err = s.store.Get(totalKey(peer, SettlementSentPrefix), &lastTime)
- if err != nil {
- if !errors.Is(err, storage.ErrNotFound) {
- return nil, settlement.ErrPeerNoSettlements
- }
- lastTime.Total = big.NewInt(0)
- }
-
- return lastTime.Total, nil
-}
-
-// TotalReceived returns the total amount received from a peer
-func (s *Service) TotalReceived(peer swarm.Address) (totalReceived *big.Int, err error) {
- var lastTime lastPayment
-
- err = s.store.Get(totalKey(peer, SettlementReceivedPrefix), &lastTime)
- if err != nil {
- if !errors.Is(err, storage.ErrNotFound) {
- return nil, settlement.ErrPeerNoSettlements
- }
- lastTime.Total = big.NewInt(0)
- }
-
- return lastTime.Total, nil
-}
-
-// SettlementsSent returns all stored sent settlement values for a given type of prefix
-func (s *Service) SettlementsSent() (map[string]*big.Int, error) {
- sent := make(map[string]*big.Int)
- err := s.store.Iterate(SettlementSentPrefix, func(key, val []byte) (stop bool, err error) {
- addr, err := totalKeyPeer(key, SettlementSentPrefix)
- if err != nil {
- return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
- }
- if _, ok := sent[addr.String()]; !ok {
- var storevalue lastPayment
- err = s.store.Get(totalKey(addr, SettlementSentPrefix), &storevalue)
- if err != nil {
- return false, fmt.Errorf("get peer %s settlement balance: %w", addr.String(), err)
- }
-
- sent[addr.String()] = storevalue.Total
- }
- return false, nil
- })
- if err != nil {
- return nil, err
- }
- return sent, nil
-}
-
-// SettlementsReceived returns all stored received settlement values for a given type of prefix
-func (s *Service) SettlementsReceived() (map[string]*big.Int, error) {
- received := make(map[string]*big.Int)
- err := s.store.Iterate(SettlementReceivedPrefix, func(key, val []byte) (stop bool, err error) {
- addr, err := totalKeyPeer(key, SettlementReceivedPrefix)
- if err != nil {
- return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
- }
- if _, ok := received[addr.String()]; !ok {
- var storevalue lastPayment
- err = s.store.Get(totalKey(addr, SettlementReceivedPrefix), &storevalue)
- if err != nil {
- return false, fmt.Errorf("get peer %s settlement balance: %w", addr.String(), err)
- }
-
- received[addr.String()] = storevalue.Total
- }
- return false, nil
- })
- if err != nil {
- return nil, err
- }
- return received, nil
-}
diff --git a/pkg/settlement/pseudosettle/pseudosettle_js.go b/pkg/settlement/pseudosettle/pseudosettle_js.go
new file mode 100644
index 00000000000..a9fa62d2314
--- /dev/null
+++ b/pkg/settlement/pseudosettle/pseudosettle_js.go
@@ -0,0 +1,213 @@
+//go:build js
+// +build js
+
+package pseudosettle
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/p2p/protobuf"
+ "github.com/ethersphere/bee/v2/pkg/settlement"
+ pb "github.com/ethersphere/bee/v2/pkg/settlement/pseudosettle/pb"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+type Service struct {
+ streamer p2p.Streamer
+ logger log.Logger
+ store storage.StateStorer
+ accounting settlement.Accounting
+ refreshRate *big.Int
+ lightRefreshRate *big.Int
+ p2pService p2p.Service
+ timeNow func() time.Time
+ peersMu sync.Mutex
+ peers map[string]*pseudoSettlePeer
+}
+
+func New(streamer p2p.Streamer, logger log.Logger, store storage.StateStorer, accounting settlement.Accounting, refreshRate, lightRefreshRate *big.Int, p2pService p2p.Service) *Service {
+ return &Service{
+ streamer: streamer,
+ logger: logger.WithName(loggerName).Register(),
+ store: store,
+ accounting: accounting,
+ p2pService: p2pService,
+ refreshRate: refreshRate,
+ lightRefreshRate: lightRefreshRate,
+ timeNow: time.Now,
+ peers: make(map[string]*pseudoSettlePeer),
+ }
+}
+
+func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
+ loggerV1 := s.logger.V(1).Register()
+
+ w, r := protobuf.NewWriterAndReader(stream)
+ defer func() {
+ if err != nil {
+ _ = stream.Reset()
+ } else {
+ stream.FullClose()
+ }
+ }()
+ var req pb.Payment
+ if err := r.ReadMsgWithContext(ctx, &req); err != nil {
+ return fmt.Errorf("read request from peer %v: %w", p.Address, err)
+ }
+
+ attemptedAmount := big.NewInt(0).SetBytes(req.Amount)
+
+ paymentAmount := new(big.Int).Set(attemptedAmount)
+
+ s.peersMu.Lock()
+ pseudoSettlePeer, ok := s.peers[p.Address.String()]
+ s.peersMu.Unlock()
+ if !ok {
+ return ErrNoPseudoSettlePeer
+ }
+
+ pseudoSettlePeer.lock.Lock()
+ defer pseudoSettlePeer.lock.Unlock()
+
+ allowance, timestamp, err := s.peerAllowance(p.Address, pseudoSettlePeer.fullNode)
+ if err != nil {
+ return err
+ }
+
+ if allowance.Cmp(attemptedAmount) < 0 {
+ paymentAmount.Set(allowance)
+ }
+ loggerV1.Debug("pseudosettle accepting payment message from peer", "peer_address", p.Address, "amount", paymentAmount)
+
+ if paymentAmount.Cmp(big.NewInt(0)) < 0 {
+ paymentAmount.Set(big.NewInt(0))
+ }
+
+ err = w.WriteMsgWithContext(ctx, &pb.PaymentAck{
+ Amount: paymentAmount.Bytes(),
+ Timestamp: timestamp,
+ })
+ if err != nil {
+ return err
+ }
+
+ var lastTime lastPayment
+ err = s.store.Get(totalKey(p.Address, SettlementReceivedPrefix), &lastTime)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return err
+ }
+ lastTime.Total = big.NewInt(0)
+ }
+
+ lastTime.Total = lastTime.Total.Add(lastTime.Total, paymentAmount)
+ lastTime.Timestamp = timestamp
+
+ err = s.store.Put(totalKey(p.Address, SettlementReceivedPrefix), lastTime)
+ if err != nil {
+ return err
+ }
+
+ return s.accounting.NotifyRefreshmentReceived(p.Address, paymentAmount, timestamp)
+}
+
+// Pay initiates a payment to the given peer
+func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount *big.Int) {
+ loggerV1 := s.logger.V(1).Register()
+ ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ defer cancel()
+
+ var err error
+
+ var lastTime lastPayment
+ err = s.store.Get(totalKey(peer, SettlementSentPrefix), &lastTime)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, err)
+ return
+ }
+ lastTime.Total = big.NewInt(0)
+ lastTime.Timestamp = 0
+ }
+
+ stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
+ if err != nil {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, err)
+ return
+ }
+ defer func() {
+ if err != nil {
+ _ = stream.Reset()
+ } else {
+ _ = stream.FullClose()
+ }
+ }()
+
+ loggerV1.Debug("pseudosettle sending payment message to peer", "peer_address", peer, "amount", amount)
+ w, r := protobuf.NewWriterAndReader(stream)
+
+ err = w.WriteMsgWithContext(ctx, &pb.Payment{
+ Amount: amount.Bytes(),
+ })
+ if err != nil {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, err)
+ return
+ }
+
+ var paymentAck pb.PaymentAck
+ err = r.ReadMsgWithContext(ctx, &paymentAck)
+ if err != nil {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, err)
+ return
+ }
+
+ checkTime := s.timeNow().UnixMilli()
+
+ acceptedAmount := new(big.Int).SetBytes(paymentAck.Amount)
+ if acceptedAmount.Cmp(amount) > 0 {
+ err = fmt.Errorf("pseudosettle: peer %v: %w", peer, ErrRefreshmentAboveExpected)
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, err)
+ return
+ }
+
+ experiencedInterval := checkTime/1000 - lastTime.CheckTimestamp
+ allegedInterval := paymentAck.Timestamp - lastTime.Timestamp
+
+ if allegedInterval < 0 {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, ErrTimeOutOfSyncAlleged)
+ return
+ }
+
+ experienceDifferenceRecent := paymentAck.Timestamp - checkTime/1000
+
+ if experienceDifferenceRecent < -2 || experienceDifferenceRecent > 2 {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, ErrTimeOutOfSyncRecent)
+ return
+ }
+
+ experienceDifferenceInterval := experiencedInterval - allegedInterval
+ if experienceDifferenceInterval < -3 || experienceDifferenceInterval > 3 {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, ErrTimeOutOfSyncInterval)
+ return
+ }
+
+ lastTime.Total = lastTime.Total.Add(lastTime.Total, acceptedAmount)
+ lastTime.Timestamp = paymentAck.Timestamp
+ lastTime.CheckTimestamp = checkTime / 1000
+
+ err = s.store.Put(totalKey(peer, SettlementSentPrefix), lastTime)
+ if err != nil {
+ s.accounting.NotifyRefreshmentSent(peer, nil, nil, 0, 0, err)
+ return
+ }
+
+ s.accounting.NotifyRefreshmentSent(peer, amount, acceptedAmount, checkTime, allegedInterval, nil)
+}
diff --git a/pkg/settlement/pseudosettle/pseudosettle_shared.go b/pkg/settlement/pseudosettle/pseudosettle_shared.go
new file mode 100644
index 00000000000..015875637f3
--- /dev/null
+++ b/pkg/settlement/pseudosettle/pseudosettle_shared.go
@@ -0,0 +1,231 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pseudosettle
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "strings"
+ "sync"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/settlement"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "pseudosettle"
+
+const (
+ protocolName = "pseudosettle"
+ protocolVersion = "1.0.0"
+ streamName = "pseudosettle"
+)
+
+var (
+ SettlementReceivedPrefix = "pseudosettle_total_received_"
+ SettlementSentPrefix = "pseudosettle_total_sent_"
+
+ ErrSettlementTooSoon = errors.New("settlement too soon")
+ ErrNoPseudoSettlePeer = errors.New("settlement peer not found")
+ ErrDisconnectAllowanceCheckFailed = errors.New("settlement allowance below enforced amount")
+ ErrTimeOutOfSyncAlleged = errors.New("settlement allowance timestamps from peer were decreasing")
+ ErrTimeOutOfSyncRecent = errors.New("settlement allowance timestamps from peer differed from our measurement by more than 2 seconds")
+ ErrTimeOutOfSyncInterval = errors.New("settlement allowance interval from peer differed from local interval by more than 3 seconds")
+ ErrRefreshmentBelowExpected = errors.New("refreshment below expected")
+ ErrRefreshmentAboveExpected = errors.New("refreshment above expected")
+)
+
+type pseudoSettlePeer struct {
+ lock sync.Mutex // lock to be held during receiving a payment from this peer
+ fullNode bool
+}
+
+type lastPayment struct {
+ Timestamp int64
+ CheckTimestamp int64
+ Total *big.Int
+}
+
+func (s *Service) Protocol() p2p.ProtocolSpec {
+ return p2p.ProtocolSpec{
+ Name: protocolName,
+ Version: protocolVersion,
+ StreamSpecs: []p2p.StreamSpec{
+ {
+ Name: streamName,
+ Handler: s.handler,
+ },
+ },
+ ConnectIn: s.init,
+ ConnectOut: s.init,
+ DisconnectIn: s.terminate,
+ DisconnectOut: s.terminate,
+ }
+}
+
+func (s *Service) init(ctx context.Context, p p2p.Peer) error {
+ s.peersMu.Lock()
+ defer s.peersMu.Unlock()
+
+ _, ok := s.peers[p.Address.String()]
+ if !ok {
+ peerData := &pseudoSettlePeer{fullNode: p.FullNode}
+ s.peers[p.Address.String()] = peerData
+ }
+
+ go s.accounting.Connect(p.Address, p.FullNode)
+ return nil
+}
+
+func (s *Service) terminate(p p2p.Peer) error {
+ s.peersMu.Lock()
+ defer s.peersMu.Unlock()
+
+ delete(s.peers, p.Address.String())
+
+ go s.accounting.Disconnect(p.Address)
+ return nil
+}
+
+func totalKey(peer swarm.Address, prefix string) string {
+ return fmt.Sprintf("%v%v", prefix, peer.String())
+}
+
+func totalKeyPeer(key []byte, prefix string) (peer swarm.Address, err error) {
+ k := string(key)
+
+ split := strings.SplitAfter(k, prefix)
+ if len(split) != 2 {
+ return swarm.ZeroAddress, errors.New("no peer in key")
+ }
+ return swarm.ParseHexAddress(split[1])
+}
+
+// peerAllowance computes the maximum incoming payment value we accept
+// this is the time based allowance or the peers actual debt, whichever is less
+func (s *Service) peerAllowance(peer swarm.Address, fullNode bool) (limit *big.Int, stamp int64, err error) {
+ var lastTime lastPayment
+ err = s.store.Get(totalKey(peer, SettlementReceivedPrefix), &lastTime)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, 0, err
+ }
+ lastTime.Timestamp = int64(0)
+ }
+
+ currentTime := s.timeNow().Unix()
+ if currentTime == lastTime.Timestamp {
+ return nil, 0, ErrSettlementTooSoon
+ }
+
+ var refreshRateUsed *big.Int
+
+ if fullNode {
+ refreshRateUsed = s.refreshRate
+ } else {
+ refreshRateUsed = s.lightRefreshRate
+ }
+
+ maxAllowance := new(big.Int).Mul(big.NewInt(currentTime-lastTime.Timestamp), refreshRateUsed)
+
+ peerDebt, err := s.accounting.PeerDebt(peer)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if peerDebt.Cmp(maxAllowance) >= 0 {
+ return maxAllowance, currentTime, nil
+ }
+
+ return peerDebt, currentTime, nil
+}
+
+func (s *Service) SetAccounting(accounting settlement.Accounting) {
+ s.accounting = accounting
+}
+
+// TotalSent returns the total amount sent to a peer
+func (s *Service) TotalSent(peer swarm.Address) (totalSent *big.Int, err error) {
+ var lastTime lastPayment
+
+ err = s.store.Get(totalKey(peer, SettlementSentPrefix), &lastTime)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, settlement.ErrPeerNoSettlements
+ }
+ lastTime.Total = big.NewInt(0)
+ }
+
+ return lastTime.Total, nil
+}
+
+// TotalReceived returns the total amount received from a peer
+func (s *Service) TotalReceived(peer swarm.Address) (totalReceived *big.Int, err error) {
+ var lastTime lastPayment
+
+ err = s.store.Get(totalKey(peer, SettlementReceivedPrefix), &lastTime)
+ if err != nil {
+ if !errors.Is(err, storage.ErrNotFound) {
+ return nil, settlement.ErrPeerNoSettlements
+ }
+ lastTime.Total = big.NewInt(0)
+ }
+
+ return lastTime.Total, nil
+}
+
+// SettlementsSent returns all stored sent settlement values for a given type of prefix
+func (s *Service) SettlementsSent() (map[string]*big.Int, error) {
+ sent := make(map[string]*big.Int)
+ err := s.store.Iterate(SettlementSentPrefix, func(key, val []byte) (stop bool, err error) {
+ addr, err := totalKeyPeer(key, SettlementSentPrefix)
+ if err != nil {
+ return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
+ }
+ if _, ok := sent[addr.String()]; !ok {
+ var storevalue lastPayment
+ err = s.store.Get(totalKey(addr, SettlementSentPrefix), &storevalue)
+ if err != nil {
+ return false, fmt.Errorf("get peer %s settlement balance: %w", addr.String(), err)
+ }
+
+ sent[addr.String()] = storevalue.Total
+ }
+ return false, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return sent, nil
+}
+
+// SettlementsReceived returns all stored received settlement values for a given type of prefix
+func (s *Service) SettlementsReceived() (map[string]*big.Int, error) {
+ received := make(map[string]*big.Int)
+ err := s.store.Iterate(SettlementReceivedPrefix, func(key, val []byte) (stop bool, err error) {
+ addr, err := totalKeyPeer(key, SettlementReceivedPrefix)
+ if err != nil {
+ return false, fmt.Errorf("parse address from key: %s: %w", string(key), err)
+ }
+ if _, ok := received[addr.String()]; !ok {
+ var storevalue lastPayment
+ err = s.store.Get(totalKey(addr, SettlementReceivedPrefix), &storevalue)
+ if err != nil {
+ return false, fmt.Errorf("get peer %s settlement balance: %w", addr.String(), err)
+ }
+
+ received[addr.String()] = storevalue.Total
+ }
+ return false, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return received, nil
+}
diff --git a/pkg/settlement/swap/chequebook/cashout_test.go b/pkg/settlement/swap/chequebook/cashout_test.go
index 563b6ce4aa2..dc195e6a465 100644
--- a/pkg/settlement/swap/chequebook/cashout_test.go
+++ b/pkg/settlement/swap/chequebook/cashout_test.go
@@ -21,7 +21,7 @@ import (
)
var (
- chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_6_9)
+ chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_6_5)
chequeCashedEventType = chequebookABI.Events["ChequeCashed"]
chequeBouncedEventType = chequebookABI.Events["ChequeBounced"]
)
diff --git a/pkg/settlement/swap/chequebook/chequebook.go b/pkg/settlement/swap/chequebook/chequebook.go
index 6a1b46417d2..35c58593ea9 100644
--- a/pkg/settlement/swap/chequebook/chequebook.go
+++ b/pkg/settlement/swap/chequebook/chequebook.go
@@ -38,7 +38,7 @@ var (
// ErrInsufficientFunds is the error when the chequebook has not enough free funds for a user action
ErrInsufficientFunds = errors.New("insufficient token balance")
- chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_6_9)
+ chequebookABI = abiutil.MustParseABI(sw3abi.ERC20SimpleSwapABIv0_6_5)
chequeCashedEventType = chequebookABI.Events["ChequeCashed"]
chequeBouncedEventType = chequebookABI.Events["ChequeBounced"]
)
diff --git a/pkg/settlement/swap/chequebook/factory.go b/pkg/settlement/swap/chequebook/factory.go
index 53457679d2e..27bd38db3b4 100644
--- a/pkg/settlement/swap/chequebook/factory.go
+++ b/pkg/settlement/swap/chequebook/factory.go
@@ -23,7 +23,7 @@ var (
ErrNotDeployedByFactory = errors.New("chequebook not deployed by factory")
errDecodeABI = errors.New("could not decode abi data")
- factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_6_9)
+ factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_6_5)
simpleSwapDeployedEventType = factoryABI.Events["SimpleSwapDeployed"]
)
diff --git a/pkg/settlement/swap/chequebook/factory_test.go b/pkg/settlement/swap/chequebook/factory_test.go
index e47daadbca2..16667ec303c 100644
--- a/pkg/settlement/swap/chequebook/factory_test.go
+++ b/pkg/settlement/swap/chequebook/factory_test.go
@@ -21,7 +21,7 @@ import (
)
var (
- factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_6_9)
+ factoryABI = abiutil.MustParseABI(sw3abi.SimpleSwapFactoryABIv0_6_5)
simpleSwapDeployedEvent = factoryABI.Events["SimpleSwapDeployed"]
)
diff --git a/pkg/settlement/swap/erc20/erc20.go b/pkg/settlement/swap/erc20/erc20.go
index d5ff69bedf9..2733fa51887 100644
--- a/pkg/settlement/swap/erc20/erc20.go
+++ b/pkg/settlement/swap/erc20/erc20.go
@@ -18,7 +18,7 @@ import (
)
var (
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_9)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
errDecodeABI = errors.New("could not decode abi data")
)
diff --git a/pkg/settlement/swap/erc20/erc20_test.go b/pkg/settlement/swap/erc20/erc20_test.go
index 5322afb4a1d..e5752aca5df 100644
--- a/pkg/settlement/swap/erc20/erc20_test.go
+++ b/pkg/settlement/swap/erc20/erc20_test.go
@@ -17,7 +17,7 @@ import (
)
var (
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_9)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
)
func TestBalanceOf(t *testing.T) {
diff --git a/pkg/settlement/swap/metrics.go b/pkg/settlement/swap/metrics.go
index 2e6fb1a4bb6..c2ab14d3851 100644
--- a/pkg/settlement/swap/metrics.go
+++ b/pkg/settlement/swap/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/settlement/swap/priceoracle/priceoracle.go b/pkg/settlement/swap/priceoracle/priceoracle.go
index 1b60b7c89ec..0ba49e6eff2 100644
--- a/pkg/settlement/swap/priceoracle/priceoracle.go
+++ b/pkg/settlement/swap/priceoracle/priceoracle.go
@@ -47,7 +47,7 @@ type Service interface {
}
var (
- priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_6_9)
+ priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_2_0)
)
func New(logger log.Logger, priceOracleAddress common.Address, transactionService transaction.Service, timeDivisor int64) Service {
diff --git a/pkg/settlement/swap/priceoracle/priceoracle_test.go b/pkg/settlement/swap/priceoracle/priceoracle_test.go
index 53b1c4ef911..58fa344b675 100644
--- a/pkg/settlement/swap/priceoracle/priceoracle_test.go
+++ b/pkg/settlement/swap/priceoracle/priceoracle_test.go
@@ -18,7 +18,7 @@ import (
)
var (
- priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_6_9)
+ priceOracleABI = abiutil.MustParseABI(priceoracleabi.PriceOracleABIv0_2_0)
)
func TestExchangeGetPrice(t *testing.T) {
diff --git a/pkg/settlement/swap/swap.go b/pkg/settlement/swap/swap.go
index 4e7f99bd060..a99474af3e0 100644
--- a/pkg/settlement/swap/swap.go
+++ b/pkg/settlement/swap/swap.go
@@ -1,18 +1,15 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package swap
import (
"context"
- "errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/v2/pkg/log"
- "github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
"github.com/ethersphere/bee/v2/pkg/settlement"
"github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook"
"github.com/ethersphere/bee/v2/pkg/settlement/swap/swapprotocol"
@@ -20,35 +17,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "swap"
-
-var (
- // ErrWrongChequebook is the error if a peer uses a different chequebook from before.
- ErrWrongChequebook = errors.New("wrong chequebook")
- // ErrUnknownBeneficary is the error if a peer has never announced a beneficiary.
- ErrUnknownBeneficary = errors.New("unknown beneficiary for peer")
- // ErrChequeValueTooLow is the error a peer issued a cheque not covering 1 accounting credit
- ErrChequeValueTooLow = errors.New("cheque value too low")
- ErrNoChequebook = errors.New("no chequebook")
-)
-
-type Interface interface {
- settlement.Interface
- // LastSentCheque returns the last sent cheque for the peer
- LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
- // LastSentCheques returns the list of last sent cheques for all peers
- LastSentCheques() (map[string]*chequebook.SignedCheque, error)
- // LastReceivedCheque returns the last received cheque for the peer
- LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
- // LastReceivedCheques returns the list of last received cheques for all peers
- LastReceivedCheques() (map[string]*chequebook.SignedCheque, error)
- // CashCheque sends a cashing transaction for the last cheque of the peer
- CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error)
- // CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
- CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error)
-}
-
// Service is the implementation of the swap settlement layer.
type Service struct {
proto swapprotocol.Interface
@@ -156,284 +124,3 @@ func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount *big.Int)
s.metrics.TotalSent.Add(amountFloat)
s.metrics.ChequesSent.Inc()
}
-
-func (s *Service) SetAccounting(accounting settlement.Accounting) {
- s.accounting = accounting
-}
-
-// TotalSent returns the total amount sent to a peer
-func (s *Service) TotalSent(peer swarm.Address) (totalSent *big.Int, err error) {
- beneficiary, known, err := s.addressbook.Beneficiary(peer)
- if err != nil {
- return nil, err
- }
- if !known {
- return nil, settlement.ErrPeerNoSettlements
- }
- if s.chequebook == nil {
- return big.NewInt(0), nil
- }
- cheque, err := s.chequebook.LastCheque(beneficiary)
- if err != nil {
- if errors.Is(err, chequebook.ErrNoCheque) {
- return nil, settlement.ErrPeerNoSettlements
- }
- return nil, err
- }
- return cheque.CumulativePayout, nil
-}
-
-// TotalReceived returns the total amount received from a peer
-func (s *Service) TotalReceived(peer swarm.Address) (totalReceived *big.Int, err error) {
- chequebookAddress, known, err := s.addressbook.Chequebook(peer)
- if err != nil {
- return nil, err
- }
- if !known {
- return nil, settlement.ErrPeerNoSettlements
- }
-
- cheque, err := s.chequeStore.LastCheque(chequebookAddress)
- if err != nil {
- if errors.Is(err, chequebook.ErrNoCheque) {
- return nil, settlement.ErrPeerNoSettlements
- }
- return nil, err
- }
- return cheque.CumulativePayout, nil
-}
-
-// SettlementsSent returns sent settlements for each individual known peer
-func (s *Service) SettlementsSent() (map[string]*big.Int, error) {
- result := make(map[string]*big.Int)
- if s.chequebook == nil {
- return result, nil
- }
- cheques, err := s.chequebook.LastCheques()
- if err != nil {
- return nil, err
- }
-
- for beneficiary, cheque := range cheques {
- peer, known, err := s.addressbook.BeneficiaryPeer(beneficiary)
- if err != nil {
- return nil, err
- }
- if !known {
- continue
- }
- result[peer.String()] = cheque.CumulativePayout
- }
-
- return result, nil
-}
-
-// SettlementsReceived returns received settlements for each individual known peer.
-func (s *Service) SettlementsReceived() (map[string]*big.Int, error) {
- result := make(map[string]*big.Int)
- cheques, err := s.chequeStore.LastCheques()
- if err != nil {
- return nil, err
- }
-
- for chequebook, cheque := range cheques {
- peer, known, err := s.addressbook.ChequebookPeer(chequebook)
- if err != nil {
- return nil, err
- }
- if !known {
- continue
- }
- result[peer.String()] = cheque.CumulativePayout
- }
- return result, err
-}
-
-// Handshake is called by the swap protocol when a handshake is received.
-func (s *Service) Handshake(peer swarm.Address, beneficiary common.Address) error {
- loggerV1 := s.logger.V(1).Register()
-
- oldPeer, known, err := s.addressbook.BeneficiaryPeer(beneficiary)
- if err != nil {
- return err
- }
- if known && !peer.Equal(oldPeer) {
- s.logger.Debug("migrating swap addresses", "old_peer_address", oldPeer, "new_peer_address", peer)
- return s.addressbook.MigratePeer(oldPeer, peer)
- }
-
- _, known, err = s.addressbook.Beneficiary(peer)
- if err != nil {
- return err
- }
- if !known {
- loggerV1.Debug("initial swap handshake", "peer_address", peer, "beneficiary_address", beneficiary)
- return s.addressbook.PutBeneficiary(peer, beneficiary)
- }
-
- return nil
-}
-
-// LastSentCheque returns the last sent cheque for the peer
-func (s *Service) LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
-
- common, known, err := s.addressbook.Beneficiary(peer)
-
- if err != nil {
- return nil, err
- }
-
- if !known {
- return nil, chequebook.ErrNoCheque
- }
-
- if s.chequebook == nil {
- return nil, ErrNoChequebook
- }
-
- return s.chequebook.LastCheque(common)
-}
-
-// LastReceivedCheque returns the last received cheque for the peer
-func (s *Service) LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
-
- common, known, err := s.addressbook.Chequebook(peer)
-
- if err != nil {
- return nil, err
- }
-
- if !known {
- return nil, chequebook.ErrNoCheque
- }
-
- return s.chequeStore.LastCheque(common)
-}
-
-// LastSentCheques returns the list of last sent cheques for all peers
-func (s *Service) LastSentCheques() (map[string]*chequebook.SignedCheque, error) {
- if s.chequebook == nil {
- return nil, ErrNoChequebook
- }
- lastcheques, err := s.chequebook.LastCheques()
- if err != nil {
- return nil, err
- }
-
- resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
-
- for i, j := range lastcheques {
- addr, known, err := s.addressbook.BeneficiaryPeer(i)
- if err == nil && known {
- resultmap[addr.String()] = j
- }
- }
-
- return resultmap, nil
-}
-
-// LastReceivedCheques returns the list of last received cheques for all peers
-func (s *Service) LastReceivedCheques() (map[string]*chequebook.SignedCheque, error) {
- lastcheques, err := s.chequeStore.LastCheques()
- if err != nil {
- return nil, err
- }
-
- resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
-
- for i, j := range lastcheques {
- addr, known, err := s.addressbook.ChequebookPeer(i)
- if err == nil && known {
- resultmap[addr.String()] = j
- }
- }
-
- return resultmap, nil
-}
-
-// CashCheque sends a cashing transaction for the last cheque of the peer
-func (s *Service) CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error) {
- chequebookAddress, known, err := s.addressbook.Chequebook(peer)
- if err != nil {
- return common.Hash{}, err
- }
- if !known {
- return common.Hash{}, chequebook.ErrNoCheque
- }
- return s.cashout.CashCheque(ctx, chequebookAddress, s.cashoutAddress)
-}
-
-// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
-func (s *Service) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error) {
- chequebookAddress, known, err := s.addressbook.Chequebook(peer)
- if err != nil {
- return nil, err
- }
- if !known {
- return nil, chequebook.ErrNoCheque
- }
- return s.cashout.CashoutStatus(ctx, chequebookAddress)
-}
-
-func (s *Service) GetDeductionForPeer(peer swarm.Address) (bool, error) {
- return s.addressbook.GetDeductionFor(peer)
-}
-
-func (s *Service) GetDeductionByPeer(peer swarm.Address) (bool, error) {
- return s.addressbook.GetDeductionBy(peer)
-}
-
-func (s *Service) AddDeductionByPeer(peer swarm.Address) error {
- return s.addressbook.AddDeductionBy(peer)
-}
-
-type NoOpSwap struct {
-}
-
-func (*NoOpSwap) TotalSent(peer swarm.Address) (totalSent *big.Int, err error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-// TotalReceived returns the total amount received from a peer
-func (*NoOpSwap) TotalReceived(peer swarm.Address) (totalSent *big.Int, err error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-// SettlementsSent returns sent settlements for each individual known peer
-func (*NoOpSwap) SettlementsSent() (map[string]*big.Int, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-// SettlementsReceived returns received settlements for each individual known peer
-func (*NoOpSwap) SettlementsReceived() (map[string]*big.Int, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-func (*NoOpSwap) LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-// LastSentCheques returns the list of last sent cheques for all peers
-func (*NoOpSwap) LastSentCheques() (map[string]*chequebook.SignedCheque, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-// LastReceivedCheque returns the last received cheque for the peer
-func (*NoOpSwap) LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-// LastReceivedCheques returns the list of last received cheques for all peers
-func (*NoOpSwap) LastReceivedCheques() (map[string]*chequebook.SignedCheque, error) {
- return nil, postagecontract.ErrChainDisabled
-}
-
-// CashCheque sends a cashing transaction for the last cheque of the peer
-func (*NoOpSwap) CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error) {
- return common.Hash{}, postagecontract.ErrChainDisabled
-}
-
-// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
-func (*NoOpSwap) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error) {
- return nil, postagecontract.ErrChainDisabled
-}
diff --git a/pkg/settlement/swap/swap_js.go b/pkg/settlement/swap/swap_js.go
new file mode 100644
index 00000000000..6484a7be9e0
--- /dev/null
+++ b/pkg/settlement/swap/swap_js.go
@@ -0,0 +1,114 @@
+//go:build js
+// +build js
+
+package swap
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/settlement"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/swapprotocol"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// Service is the implementation of the swap settlement layer.
+type Service struct {
+ proto swapprotocol.Interface
+ logger log.Logger
+ store storage.StateStorer
+ accounting settlement.Accounting
+ chequebook chequebook.Service
+ chequeStore chequebook.ChequeStore
+ cashout chequebook.CashoutService
+ addressbook Addressbook
+ networkID uint64
+ cashoutAddress common.Address
+}
+
+// New creates a new swap Service.
+func New(proto swapprotocol.Interface, logger log.Logger, store storage.StateStorer, chequebook chequebook.Service, chequeStore chequebook.ChequeStore, addressbook Addressbook, networkID uint64, cashout chequebook.CashoutService, accounting settlement.Accounting, cashoutAddress common.Address) *Service {
+ return &Service{
+ proto: proto,
+ logger: logger.WithName(loggerName).Register(),
+ store: store,
+ chequebook: chequebook,
+ chequeStore: chequeStore,
+ addressbook: addressbook,
+ networkID: networkID,
+ cashout: cashout,
+ accounting: accounting,
+ cashoutAddress: cashoutAddress,
+ }
+}
+
+// ReceiveCheque is called by the swap protocol if a cheque is received.
+func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque, exchangeRate, deduction *big.Int) (err error) {
+ // check this is the same chequebook for this peer as previously
+ expectedChequebook, known, err := s.addressbook.Chequebook(peer)
+ if err != nil {
+ return err
+ }
+ if known && expectedChequebook != cheque.Chequebook {
+ return ErrWrongChequebook
+ }
+
+ receivedAmount, err := s.chequeStore.ReceiveCheque(ctx, cheque, exchangeRate, deduction)
+ if err != nil {
+ return fmt.Errorf("rejecting cheque: %w", err)
+ }
+
+ if deduction.Cmp(big.NewInt(0)) > 0 {
+ err = s.addressbook.AddDeductionFor(peer)
+ if err != nil {
+ return err
+ }
+ }
+
+ decreasedAmount := new(big.Int).Sub(receivedAmount, deduction)
+ amount := new(big.Int).Div(decreasedAmount, exchangeRate)
+
+ if !known {
+ err = s.addressbook.PutChequebook(peer, cheque.Chequebook)
+ if err != nil {
+ return err
+ }
+ }
+
+ return s.accounting.NotifyPaymentReceived(peer, amount)
+}
+
+// Pay initiates a payment to the given peer
+func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount *big.Int) {
+ var err error
+ defer func() {
+ if err != nil {
+ s.accounting.NotifyPaymentSent(peer, amount, err)
+ }
+ }()
+ if s.chequebook == nil {
+ err = ErrNoChequebook
+ return
+ }
+ beneficiary, known, err := s.addressbook.Beneficiary(peer)
+ if err != nil {
+ return
+ }
+ if !known {
+ err = ErrUnknownBeneficary
+ return
+ }
+
+ _, err = s.proto.EmitCheque(ctx, peer, beneficiary, amount, s.chequebook.Issue)
+
+ if err != nil {
+ return
+ }
+
+ s.accounting.NotifyPaymentSent(peer, amount, nil)
+}
diff --git a/pkg/settlement/swap/swap_shared.go b/pkg/settlement/swap/swap_shared.go
new file mode 100644
index 00000000000..c9a7a0fea40
--- /dev/null
+++ b/pkg/settlement/swap/swap_shared.go
@@ -0,0 +1,327 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package swap
+
+import (
+ "context"
+ "errors"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
+ "github.com/ethersphere/bee/v2/pkg/settlement"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/chequebook"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "swap"
+
+var (
+ // ErrWrongChequebook is the error if a peer uses a different chequebook from before.
+ ErrWrongChequebook = errors.New("wrong chequebook")
+ // ErrUnknownBeneficary is the error if a peer has never announced a beneficiary.
+ ErrUnknownBeneficary = errors.New("unknown beneficiary for peer")
+ // ErrChequeValueTooLow is the error a peer issued a cheque not covering 1 accounting credit
+ ErrChequeValueTooLow = errors.New("cheque value too low")
+ ErrNoChequebook = errors.New("no chequebook")
+)
+
+type Interface interface {
+ settlement.Interface
+ // LastSentCheque returns the last sent cheque for the peer
+ LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
+ // LastSentCheques returns the list of last sent cheques for all peers
+ LastSentCheques() (map[string]*chequebook.SignedCheque, error)
+ // LastReceivedCheque returns the last received cheque for the peer
+ LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
+ // LastReceivedCheques returns the list of last received cheques for all peers
+ LastReceivedCheques() (map[string]*chequebook.SignedCheque, error)
+ // CashCheque sends a cashing transaction for the last cheque of the peer
+ CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error)
+ // CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
+ CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error)
+}
+
+func (s *Service) SetAccounting(accounting settlement.Accounting) {
+ s.accounting = accounting
+}
+
+// TotalSent returns the total amount sent to a peer
+func (s *Service) TotalSent(peer swarm.Address) (totalSent *big.Int, err error) {
+ beneficiary, known, err := s.addressbook.Beneficiary(peer)
+ if err != nil {
+ return nil, err
+ }
+ if !known {
+ return nil, settlement.ErrPeerNoSettlements
+ }
+ if s.chequebook == nil {
+ return big.NewInt(0), nil
+ }
+ cheque, err := s.chequebook.LastCheque(beneficiary)
+ if err != nil {
+ if errors.Is(err, chequebook.ErrNoCheque) {
+ return nil, settlement.ErrPeerNoSettlements
+ }
+ return nil, err
+ }
+ return cheque.CumulativePayout, nil
+}
+
+// TotalReceived returns the total amount received from a peer
+func (s *Service) TotalReceived(peer swarm.Address) (totalReceived *big.Int, err error) {
+ chequebookAddress, known, err := s.addressbook.Chequebook(peer)
+ if err != nil {
+ return nil, err
+ }
+ if !known {
+ return nil, settlement.ErrPeerNoSettlements
+ }
+
+ cheque, err := s.chequeStore.LastCheque(chequebookAddress)
+ if err != nil {
+ if errors.Is(err, chequebook.ErrNoCheque) {
+ return nil, settlement.ErrPeerNoSettlements
+ }
+ return nil, err
+ }
+ return cheque.CumulativePayout, nil
+}
+
+// SettlementsSent returns sent settlements for each individual known peer
+func (s *Service) SettlementsSent() (map[string]*big.Int, error) {
+ result := make(map[string]*big.Int)
+ if s.chequebook == nil {
+ return result, nil
+ }
+ cheques, err := s.chequebook.LastCheques()
+ if err != nil {
+ return nil, err
+ }
+
+ for beneficiary, cheque := range cheques {
+ peer, known, err := s.addressbook.BeneficiaryPeer(beneficiary)
+ if err != nil {
+ return nil, err
+ }
+ if !known {
+ continue
+ }
+ result[peer.String()] = cheque.CumulativePayout
+ }
+
+ return result, nil
+}
+
+// SettlementsReceived returns received settlements for each individual known peer.
+func (s *Service) SettlementsReceived() (map[string]*big.Int, error) {
+ result := make(map[string]*big.Int)
+ cheques, err := s.chequeStore.LastCheques()
+ if err != nil {
+ return nil, err
+ }
+
+ for chequebook, cheque := range cheques {
+ peer, known, err := s.addressbook.ChequebookPeer(chequebook)
+ if err != nil {
+ return nil, err
+ }
+ if !known {
+ continue
+ }
+ result[peer.String()] = cheque.CumulativePayout
+ }
+ return result, err
+}
+
+// Handshake is called by the swap protocol when a handshake is received.
+func (s *Service) Handshake(peer swarm.Address, beneficiary common.Address) error {
+ loggerV1 := s.logger.V(1).Register()
+
+ oldPeer, known, err := s.addressbook.BeneficiaryPeer(beneficiary)
+ if err != nil {
+ return err
+ }
+ if known && !peer.Equal(oldPeer) {
+ s.logger.Debug("migrating swap addresses", "old_peer_address", oldPeer, "new_peer_address", peer)
+ return s.addressbook.MigratePeer(oldPeer, peer)
+ }
+
+ _, known, err = s.addressbook.Beneficiary(peer)
+ if err != nil {
+ return err
+ }
+ if !known {
+ loggerV1.Debug("initial swap handshake", "peer_address", peer, "beneficiary_address", beneficiary)
+ return s.addressbook.PutBeneficiary(peer, beneficiary)
+ }
+
+ return nil
+}
+
+// LastSentCheque returns the last sent cheque for the peer
+func (s *Service) LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
+
+ common, known, err := s.addressbook.Beneficiary(peer)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if !known {
+ return nil, chequebook.ErrNoCheque
+ }
+
+ if s.chequebook == nil {
+ return nil, ErrNoChequebook
+ }
+
+ return s.chequebook.LastCheque(common)
+}
+
+// LastReceivedCheque returns the last received cheque for the peer
+func (s *Service) LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
+
+ common, known, err := s.addressbook.Chequebook(peer)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if !known {
+ return nil, chequebook.ErrNoCheque
+ }
+
+ return s.chequeStore.LastCheque(common)
+}
+
+// LastSentCheques returns the list of last sent cheques for all peers
+func (s *Service) LastSentCheques() (map[string]*chequebook.SignedCheque, error) {
+ if s.chequebook == nil {
+ return nil, ErrNoChequebook
+ }
+ lastcheques, err := s.chequebook.LastCheques()
+ if err != nil {
+ return nil, err
+ }
+
+ resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
+
+ for i, j := range lastcheques {
+ addr, known, err := s.addressbook.BeneficiaryPeer(i)
+ if err == nil && known {
+ resultmap[addr.String()] = j
+ }
+ }
+
+ return resultmap, nil
+}
+
+// LastReceivedCheques returns the list of last received cheques for all peers
+func (s *Service) LastReceivedCheques() (map[string]*chequebook.SignedCheque, error) {
+ lastcheques, err := s.chequeStore.LastCheques()
+ if err != nil {
+ return nil, err
+ }
+
+ resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
+
+ for i, j := range lastcheques {
+ addr, known, err := s.addressbook.ChequebookPeer(i)
+ if err == nil && known {
+ resultmap[addr.String()] = j
+ }
+ }
+
+ return resultmap, nil
+}
+
+// CashCheque sends a cashing transaction for the last cheque of the peer
+func (s *Service) CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error) {
+ chequebookAddress, known, err := s.addressbook.Chequebook(peer)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ if !known {
+ return common.Hash{}, chequebook.ErrNoCheque
+ }
+ return s.cashout.CashCheque(ctx, chequebookAddress, s.cashoutAddress)
+}
+
+// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
+func (s *Service) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error) {
+ chequebookAddress, known, err := s.addressbook.Chequebook(peer)
+ if err != nil {
+ return nil, err
+ }
+ if !known {
+ return nil, chequebook.ErrNoCheque
+ }
+ return s.cashout.CashoutStatus(ctx, chequebookAddress)
+}
+
+func (s *Service) GetDeductionForPeer(peer swarm.Address) (bool, error) {
+ return s.addressbook.GetDeductionFor(peer)
+}
+
+func (s *Service) GetDeductionByPeer(peer swarm.Address) (bool, error) {
+ return s.addressbook.GetDeductionBy(peer)
+}
+
+func (s *Service) AddDeductionByPeer(peer swarm.Address) error {
+ return s.addressbook.AddDeductionBy(peer)
+}
+
+type NoOpSwap struct {
+}
+
+func (*NoOpSwap) TotalSent(peer swarm.Address) (totalSent *big.Int, err error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+// TotalReceived returns the total amount received from a peer
+func (*NoOpSwap) TotalReceived(peer swarm.Address) (totalSent *big.Int, err error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+// SettlementsSent returns sent settlements for each individual known peer
+func (*NoOpSwap) SettlementsSent() (map[string]*big.Int, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+// SettlementsReceived returns received settlements for each individual known peer
+func (*NoOpSwap) SettlementsReceived() (map[string]*big.Int, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+func (*NoOpSwap) LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+// LastSentCheques returns the list of last sent cheques for all peers
+func (*NoOpSwap) LastSentCheques() (map[string]*chequebook.SignedCheque, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+// LastReceivedCheque returns the last received cheque for the peer
+func (*NoOpSwap) LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+// LastReceivedCheques returns the list of last received cheques for all peers
+func (*NoOpSwap) LastReceivedCheques() (map[string]*chequebook.SignedCheque, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
+
+// CashCheque sends a cashing transaction for the last cheque of the peer
+func (*NoOpSwap) CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error) {
+ return common.Hash{}, postagecontract.ErrChainDisabled
+}
+
+// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
+func (*NoOpSwap) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error) {
+ return nil, postagecontract.ErrChainDisabled
+}
diff --git a/pkg/sharky/metrics.go b/pkg/sharky/metrics.go
index 53db4675b06..0b31fc9ba11 100644
--- a/pkg/sharky/metrics.go
+++ b/pkg/sharky/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/sharky/recovery.go b/pkg/sharky/recovery.go
index 2b19ee93935..de7aead7eab 100644
--- a/pkg/sharky/recovery.go
+++ b/pkg/sharky/recovery.go
@@ -9,10 +9,12 @@ import (
"errors"
"fmt"
"io/fs"
- "os"
"path"
"sync"
+ "os"
+
+ sharedFs "github.com/ethersphere/bee/v2/pkg/fs"
"github.com/hashicorp/go-multierror"
)
@@ -20,7 +22,7 @@ import (
type Recovery struct {
mtx sync.Mutex
shards []*slots
- shardFiles []*os.File
+ shardFiles []sharedFs.OsFile
datasize int
}
@@ -28,10 +30,10 @@ var ErrShardNotFound = errors.New("shard not found")
func NewRecovery(dir string, shardCnt int, datasize int) (*Recovery, error) {
shards := make([]*slots, shardCnt)
- shardFiles := make([]*os.File, shardCnt)
+ shardFiles := make([]sharedFs.OsFile, shardCnt)
for i := 0; i < shardCnt; i++ {
- file, err := os.OpenFile(path.Join(dir, fmt.Sprintf("shard_%03d", i)), os.O_RDWR, 0666)
+ file, err := sharedFs.OpenFile(path.Join(dir, fmt.Sprintf("shard_%03d", i)), os.O_RDWR, 0666)
if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("index %d: %w", i, ErrShardNotFound)
}
@@ -43,7 +45,7 @@ func NewRecovery(dir string, shardCnt int, datasize int) (*Recovery, error) {
return nil, err
}
size := uint32(fi.Size() / int64(datasize))
- ffile, err := os.OpenFile(path.Join(dir, fmt.Sprintf("free_%03d", i)), os.O_RDWR|os.O_CREATE, 0666)
+ ffile, err := sharedFs.OpenFile(path.Join(dir, fmt.Sprintf("free_%03d", i)), os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
return nil, err
}
diff --git a/pkg/sharky/store.go b/pkg/sharky/store.go
index 59549ee4a4a..d793ed13f35 100644
--- a/pkg/sharky/store.go
+++ b/pkg/sharky/store.go
@@ -1,25 +1,13 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package sharky
import (
"context"
- "errors"
- "fmt"
"io/fs"
"strconv"
"sync"
-
- "github.com/hashicorp/go-multierror"
-)
-
-var (
- // ErrTooLong returned by Write if the blob length exceeds the max blobsize.
- ErrTooLong = errors.New("data too long")
- // ErrQuitting returned by Write when the store is Closed before the write completes.
- ErrQuitting = errors.New("quitting")
)
// Store models the sharded fix-length blobstore
@@ -63,57 +51,6 @@ func New(basedir fs.FS, shardCnt int, maxDataSize int) (*Store, error) {
return store, nil
}
-// Close closes each shard and return incidental errors from each shard
-func (s *Store) Close() error {
- close(s.quit)
- err := new(multierror.Error)
- for _, sh := range s.shards {
- err = multierror.Append(err, sh.close())
- }
-
- return err.ErrorOrNil()
-}
-
-// create creates a new shard with index, max capacity limit, file within base directory
-func (s *Store) create(index uint8, maxDataSize int, basedir fs.FS) (*shard, error) {
- file, err := basedir.Open(fmt.Sprintf("shard_%03d", index))
- if err != nil {
- return nil, err
- }
- ffile, err := basedir.Open(fmt.Sprintf("free_%03d", index))
- if err != nil {
- return nil, err
- }
- sl := newSlots(ffile.(sharkyFile), s.wg)
- err = sl.load()
- if err != nil {
- return nil, err
- }
- sh := &shard{
- reads: make(chan read),
- errc: make(chan error),
- writes: s.writes,
- index: index,
- maxDataSize: maxDataSize,
- file: file.(sharkyFile),
- slots: sl,
- quit: s.quit,
- }
- terminated := make(chan struct{})
- sh.slots.wg.Add(1)
- go func() {
- defer sh.slots.wg.Done()
- sh.process()
- close(terminated)
- }()
- sh.slots.wg.Add(1)
- go func() {
- defer sh.slots.wg.Done()
- sl.process(terminated)
- }()
- return sh, nil
-}
-
// Read reads the content of the blob found at location into the byte buffer given
// The location is assumed to be obtained by an earlier Write call storing the blob
func (s *Store) Read(ctx context.Context, loc Location, buf []byte) (err error) {
@@ -144,6 +81,27 @@ func (s *Store) Read(ctx context.Context, loc Location, buf []byte) (err error)
}
}
+// Release gives back the slot to the shard
+// From here on the slot can be reused and overwritten
+// Release is meant to be called when an entry in the upstream db is removed
+// Note that releasing is not safe for obfuscating earlier content, since
+// even after reuse, the slot may be used by a very short blob and leaves the
+// rest of the old blob bytes untouched
+func (s *Store) Release(ctx context.Context, loc Location) error {
+ sh := s.shards[loc.Shard]
+ err := sh.release(ctx, loc.Slot)
+ s.metrics.TotalReleaseCalls.Inc()
+ if err == nil {
+ shard := strconv.Itoa(int(sh.index))
+ s.metrics.CurrentShardSize.WithLabelValues(shard).Dec()
+ s.metrics.ShardFragmentation.WithLabelValues(shard).Sub(float64(s.maxDataSize - int(loc.Length)))
+ s.metrics.LastReleasedShardSlot.WithLabelValues(shard).Set(float64(loc.Slot))
+ } else {
+ s.metrics.TotalReleaseCallsErr.Inc()
+ }
+ return err
+}
+
// Write stores a new blob and returns its location to be used as a reference
// It can be given to a Read call to return the stored blob.
func (s *Store) Write(ctx context.Context, data []byte) (loc Location, err error) {
@@ -181,24 +139,3 @@ func (s *Store) Write(ctx context.Context, data []byte) (loc Location, err error
return loc, ctx.Err()
}
}
-
-// Release gives back the slot to the shard
-// From here on the slot can be reused and overwritten
-// Release is meant to be called when an entry in the upstream db is removed
-// Note that releasing is not safe for obfuscating earlier content, since
-// even after reuse, the slot may be used by a very short blob and leaves the
-// rest of the old blob bytes untouched
-func (s *Store) Release(ctx context.Context, loc Location) error {
- sh := s.shards[loc.Shard]
- err := sh.release(ctx, loc.Slot)
- s.metrics.TotalReleaseCalls.Inc()
- if err == nil {
- shard := strconv.Itoa(int(sh.index))
- s.metrics.CurrentShardSize.WithLabelValues(shard).Dec()
- s.metrics.ShardFragmentation.WithLabelValues(shard).Sub(float64(s.maxDataSize - int(loc.Length)))
- s.metrics.LastReleasedShardSlot.WithLabelValues(shard).Set(float64(loc.Slot))
- } else {
- s.metrics.TotalReleaseCallsErr.Inc()
- }
- return err
-}
diff --git a/pkg/sharky/store_js.go b/pkg/sharky/store_js.go
new file mode 100644
index 00000000000..84d2fd6b0ae
--- /dev/null
+++ b/pkg/sharky/store_js.go
@@ -0,0 +1,103 @@
+//go:build js
+// +build js
+
+package sharky
+
+import (
+ "context"
+ "io/fs"
+ "sync"
+)
+
+// Store models the sharded fix-length blobstore
+// Design provides lockless sharding:
+// - shard choice responding to backpressure by running operation
+// - read prioritisation over writing
+// - free slots allow write
+type Store struct {
+ maxDataSize int // max length of blobs
+ writes chan write // shared write operations channel
+ shards []*shard // shards
+ wg *sync.WaitGroup // count started operations
+ quit chan struct{} // quit channel
+
+}
+
+// New constructs a sharded blobstore
+// arguments:
+// - base directory string
+// - shard count - positive integer < 256 - cannot be zero or expect panic
+// - shard size - positive integer multiple of 8 - for others expect undefined behaviour
+// - maxDataSize - positive integer representing the maximum blob size to be stored
+func New(basedir fs.FS, shardCnt int, maxDataSize int) (*Store, error) {
+ store := &Store{
+ maxDataSize: maxDataSize,
+ writes: make(chan write),
+ shards: make([]*shard, shardCnt),
+ wg: &sync.WaitGroup{},
+ quit: make(chan struct{}),
+ }
+ for i := range store.shards {
+ s, err := store.create(uint8(i), maxDataSize, basedir)
+ if err != nil {
+ return nil, err
+ }
+ store.shards[i] = s
+ }
+
+ return store, nil
+}
+
+// Read reads the content of the blob found at location into the byte buffer given
+// The location is assumed to be obtained by an earlier Write call storing the blob
+func (s *Store) Read(ctx context.Context, loc Location, buf []byte) (err error) {
+ sh := s.shards[loc.Shard]
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-sh.quit:
+ return ErrQuitting
+ }
+}
+
+// Release gives back the slot to the shard
+// From here on the slot can be reused and overwritten
+// Release is meant to be called when an entry in the upstream db is removed
+// Note that releasing is not safe for obfuscating earlier content, since
+// even after reuse, the slot may be used by a very short blob and leaves the
+// rest of the old blob bytes untouched
+func (s *Store) Release(ctx context.Context, loc Location) error {
+ sh := s.shards[loc.Shard]
+ err := sh.release(ctx, loc.Slot)
+
+ return err
+}
+
+// Write stores a new blob and returns its location to be used as a reference
+// It can be given to a Read call to return the stored blob.
+func (s *Store) Write(ctx context.Context, data []byte) (loc Location, err error) {
+ if len(data) > s.maxDataSize {
+ return loc, ErrTooLong
+ }
+ s.wg.Add(1)
+ defer s.wg.Done()
+
+ c := make(chan entry, 1) // buffer the channel to avoid blocking in shard.process on quit or context done
+
+ select {
+ case <-s.quit:
+ return loc, ErrQuitting
+ case <-ctx.Done():
+ return loc, ctx.Err()
+ }
+
+ select {
+ case e := <-c:
+
+ return e.loc, e.err
+ case <-s.quit:
+ return loc, ErrQuitting
+ case <-ctx.Done():
+ return loc, ctx.Err()
+ }
+}
diff --git a/pkg/sharky/store_shared.go b/pkg/sharky/store_shared.go
new file mode 100644
index 00000000000..58f99f1f356
--- /dev/null
+++ b/pkg/sharky/store_shared.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sharky
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+var (
+ // ErrTooLong returned by Write if the blob length exceeds the max blobsize.
+ ErrTooLong = errors.New("data too long")
+ // ErrQuitting returned by Write when the store is Closed before the write completes.
+ ErrQuitting = errors.New("quitting")
+)
+
+// Close closes each shard and return incidental errors from each shard
+func (s *Store) Close() error {
+ close(s.quit)
+ err := new(multierror.Error)
+ for _, sh := range s.shards {
+ err = multierror.Append(err, sh.close())
+ }
+
+ return err.ErrorOrNil()
+}
+
+// create creates a new shard with index, max capacity limit, file within base directory
+func (s *Store) create(index uint8, maxDataSize int, basedir fs.FS) (*shard, error) {
+ file, err := basedir.Open(fmt.Sprintf("shard_%03d", index))
+ if err != nil {
+ return nil, err
+ }
+ ffile, err := basedir.Open(fmt.Sprintf("free_%03d", index))
+ if err != nil {
+ return nil, err
+ }
+ sl := newSlots(ffile.(sharkyFile), s.wg)
+ err = sl.load()
+ if err != nil {
+ return nil, err
+ }
+ sh := &shard{
+ reads: make(chan read),
+ errc: make(chan error),
+ writes: s.writes,
+ index: index,
+ maxDataSize: maxDataSize,
+ file: file.(sharkyFile),
+ slots: sl,
+ quit: s.quit,
+ }
+ terminated := make(chan struct{})
+ sh.slots.wg.Add(1)
+ go func() {
+ defer sh.slots.wg.Done()
+ sh.process()
+ close(terminated)
+ }()
+ sh.slots.wg.Add(1)
+ go func() {
+ defer sh.slots.wg.Done()
+ sl.process(terminated)
+ }()
+ return sh, nil
+}
diff --git a/pkg/shed/db.go b/pkg/shed/db.go
index 1106897d008..8917ccac1b6 100644
--- a/pkg/shed/db.go
+++ b/pkg/shed/db.go
@@ -1,25 +1,6 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
+//go:build !js
+// +build !js
-// Package shed provides a simple abstraction components to compose
-// more complex operations on storage data organized in fields and indexes.
-//
-// Only type which holds logical information about swarm storage chunks data
-// and metadata is Item. This part is not generalized mostly for
-// performance reasons.
package shed
import (
@@ -27,25 +8,8 @@ import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
- "github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/syndtr/goleveldb/leveldb/storage"
- "github.com/syndtr/goleveldb/leveldb/util"
)
-var (
- defaultOpenFilesLimit = uint64(256)
- defaultBlockCacheCapacity = uint64(1 * 1024 * 1024)
- defaultWriteBufferSize = uint64(1 * 1024 * 1024)
- defaultDisableSeeksCompaction = false
-)
-
-type Options struct {
- BlockCacheCapacity uint64
- WriteBufferSize uint64
- OpenFilesLimit uint64
- DisableSeeksCompaction bool
-}
-
// DB provides abstractions over LevelDB in order to
// implement complex structures using fields and ordered indexes.
// It provides a schema functionality to store fields and indexes
@@ -56,37 +20,6 @@ type DB struct {
quit chan struct{} // Quit channel to stop the metrics collection before closing the database
}
-// NewDB constructs a new DB and validates the schema
-// if it exists in database on the given path.
-// metricsPrefix is used for metrics collection for the given DB.
-func NewDB(path string, o *Options) (db *DB, err error) {
- if o == nil {
- o = &Options{
- OpenFilesLimit: defaultOpenFilesLimit,
- BlockCacheCapacity: defaultBlockCacheCapacity,
- WriteBufferSize: defaultWriteBufferSize,
- DisableSeeksCompaction: defaultDisableSeeksCompaction,
- }
- }
- var ldb *leveldb.DB
- if path == "" {
- ldb, err = leveldb.Open(storage.NewMemStorage(), nil)
- } else {
- ldb, err = leveldb.OpenFile(path, &opt.Options{
- OpenFilesCacheCapacity: int(o.OpenFilesLimit),
- BlockCacheCapacity: int(o.BlockCacheCapacity),
- WriteBuffer: int(o.WriteBufferSize),
- DisableSeeksCompaction: o.DisableSeeksCompaction,
- })
- }
-
- if err != nil {
- return nil, err
- }
-
- return NewDBWrap(ldb)
-}
-
// NewDBWrap returns new DB which uses the given ldb as its underlying storage.
// The function will panics if the given ldb is nil.
func NewDBWrap(ldb *leveldb.DB) (db *DB, err error) {
@@ -183,16 +116,3 @@ func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
db.metrics.WriteBatchCounter.Inc()
return nil
}
-
-// Compact triggers a full database compaction on the underlying
-// LevelDB instance. Use with care! This can be very expensive!
-func (db *DB) Compact(start, end []byte) error {
- r := util.Range{Start: start, Limit: end}
- return db.ldb.CompactRange(r)
-}
-
-// Close closes LevelDB database.
-func (db *DB) Close() (err error) {
- close(db.quit)
- return db.ldb.Close()
-}
diff --git a/pkg/shed/db_js.go b/pkg/shed/db_js.go
new file mode 100644
index 00000000000..f6ab52324a1
--- /dev/null
+++ b/pkg/shed/db_js.go
@@ -0,0 +1,101 @@
+//go:build js
+// +build js
+
+package shed
+
+import (
+ "errors"
+
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+)
+
+// DB provides abstractions over LevelDB in order to
+// implement complex structures using fields and ordered indexes.
+// It provides a schema functionality to store fields and indexes
+// information about naming and types.
+type DB struct {
+ ldb *leveldb.DB
+ quit chan struct{} // Quit channel to stop the metrics collection before closing the database
+}
+
+// NewDBWrap returns new DB which uses the given ldb as its underlying storage.
+// The function will panics if the given ldb is nil.
+func NewDBWrap(ldb *leveldb.DB) (db *DB, err error) {
+ if ldb == nil {
+ panic(errors.New("shed: NewDBWrap: nil ldb"))
+ }
+
+ db = &DB{
+ ldb: ldb,
+ }
+
+ if _, err = db.getSchema(); err != nil {
+ if errors.Is(err, leveldb.ErrNotFound) {
+ // Save schema with initialized default fields.
+ if err = db.putSchema(schema{
+ Fields: make(map[string]fieldSpec),
+ Indexes: make(map[byte]indexSpec),
+ }); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+
+ // Create a quit channel for the periodic metrics collector and run it.
+ db.quit = make(chan struct{})
+
+ return db, nil
+}
+
+// Put wraps LevelDB Put method to increment metrics counter.
+func (db *DB) Put(key, value []byte) (err error) {
+ err = db.ldb.Put(key, value, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Get wraps LevelDB Get method to increment metrics counter.
+func (db *DB) Get(key []byte) (value []byte, err error) {
+ value, err = db.ldb.Get(key, nil)
+ if err != nil {
+ return nil, err
+ }
+ return value, nil
+}
+
+// Has wraps LevelDB Has method to increment metrics counter.
+func (db *DB) Has(key []byte) (yes bool, err error) {
+ yes, err = db.ldb.Has(key, nil)
+ if err != nil {
+ return false, err
+ }
+ return yes, nil
+}
+
+// Delete wraps LevelDB Delete method to increment metrics counter.
+func (db *DB) Delete(key []byte) (err error) {
+ err = db.ldb.Delete(key, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// NewIterator wraps LevelDB NewIterator method to increment metrics counter.
+func (db *DB) NewIterator() iterator.Iterator {
+ return db.ldb.NewIterator(nil, nil)
+}
+
+// WriteBatch wraps LevelDB Write method to increment metrics counter.
+func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
+ err = db.ldb.Write(batch, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/shed/db_shared.go b/pkg/shed/db_shared.go
new file mode 100644
index 00000000000..e4bdd65ef7d
--- /dev/null
+++ b/pkg/shed/db_shared.go
@@ -0,0 +1,88 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package shed provides a simple abstraction components to compose
+// more complex operations on storage data organized in fields and indexes.
+//
+// Only type which holds logical information about swarm storage chunks data
+// and metadata is Item. This part is not generalized mostly for
+// performance reasons.
+package shed
+
+import (
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ defaultOpenFilesLimit = uint64(256)
+ defaultBlockCacheCapacity = uint64(1 * 1024 * 1024)
+ defaultWriteBufferSize = uint64(1 * 1024 * 1024)
+ defaultDisableSeeksCompaction = false
+)
+
+type Options struct {
+ BlockCacheCapacity uint64
+ WriteBufferSize uint64
+ OpenFilesLimit uint64
+ DisableSeeksCompaction bool
+}
+
+// NewDB constructs a new DB and validates the schema
+// if it exists in database on the given path.
+// metricsPrefix is used for metrics collection for the given DB.
+func NewDB(path string, o *Options) (db *DB, err error) {
+ if o == nil {
+ o = &Options{
+ OpenFilesLimit: defaultOpenFilesLimit,
+ BlockCacheCapacity: defaultBlockCacheCapacity,
+ WriteBufferSize: defaultWriteBufferSize,
+ DisableSeeksCompaction: defaultDisableSeeksCompaction,
+ }
+ }
+ var ldb *leveldb.DB
+ if path == "" {
+ ldb, err = leveldb.Open(storage.NewMemStorage(), nil)
+ } else {
+ ldb, err = leveldb.OpenFile(path, &opt.Options{
+ OpenFilesCacheCapacity: int(o.OpenFilesLimit),
+ BlockCacheCapacity: int(o.BlockCacheCapacity),
+ WriteBuffer: int(o.WriteBufferSize),
+ DisableSeeksCompaction: o.DisableSeeksCompaction,
+ })
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return NewDBWrap(ldb)
+}
+
+// Compact triggers a full database compaction on the underlying
+// LevelDB instance. Use with care! This can be very expensive!
+func (db *DB) Compact(start, end []byte) error {
+ r := util.Range{Start: start, Limit: end}
+ return db.ldb.CompactRange(r)
+}
+
+// Close closes LevelDB database.
+func (db *DB) Close() (err error) {
+ close(db.quit)
+ return db.ldb.Close()
+}
diff --git a/pkg/shed/metrics.go b/pkg/shed/metrics.go
index 58820407c9d..1e4b5635926 100644
--- a/pkg/shed/metrics.go
+++ b/pkg/shed/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/storage/cache/cache.go b/pkg/storage/cache/cache.go
index c727b428288..81a6e39c7ab 100644
--- a/pkg/storage/cache/cache.go
+++ b/pkg/storage/cache/cache.go
@@ -1,22 +1,13 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package cache
import (
"github.com/ethersphere/bee/v2/pkg/storage"
- "github.com/ethersphere/bee/v2/pkg/storage/storageutil"
lru "github.com/hashicorp/golang-lru/v2"
)
-// key returns a string representation of the given key.
-func key(key storage.Key) string {
- return storageutil.JoinFields(key.Namespace(), key.ID())
-}
-
-var _ storage.IndexStore = (*Cache)(nil)
-
// Cache is a wrapper around a storage.Store that adds a layer
// of in-memory caching for the Get and Has operations.
type Cache struct {
@@ -38,15 +29,6 @@ func Wrap(store storage.IndexStore, capacity int) (*Cache, error) {
return &Cache{store, lru, newMetrics()}, nil
}
-// add caches given item.
-func (c *Cache) add(i storage.Item) {
- b, err := i.Marshal()
- if err != nil {
- return
- }
- c.lru.Add(key(i), b)
-}
-
// Get implements storage.Store interface.
// On a call it tries to first retrieve the item from cache.
// If the item does not exist in cache, it tries to retrieve
@@ -80,23 +62,3 @@ func (c *Cache) Has(k storage.Key) (bool, error) {
c.metrics.CacheMiss.Inc()
return c.IndexStore.Has(k)
}
-
-// Put implements storage.Store interface.
-// On a call it also inserts the item into the cache so that the next
-// call to Put and Has will be able to retrieve the item from cache.
-func (c *Cache) Put(i storage.Item) error {
- c.add(i)
- return c.IndexStore.Put(i)
-}
-
-// Delete implements storage.Store interface.
-// On a call it also removes the item from the cache.
-func (c *Cache) Delete(i storage.Item) error {
- _ = c.lru.Remove(key(i))
- return c.IndexStore.Delete(i)
-}
-
-func (c *Cache) Close() error {
- c.lru.Purge()
- return nil
-}
diff --git a/pkg/storage/cache/cache_js.go b/pkg/storage/cache/cache_js.go
new file mode 100644
index 00000000000..501a0294b1e
--- /dev/null
+++ b/pkg/storage/cache/cache_js.go
@@ -0,0 +1,60 @@
+//go:build js
+// +build js
+
+package cache
+
+import (
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ lru "github.com/hashicorp/golang-lru/v2"
+)
+
+// Cache is a wrapper around a storage.Store that adds a layer
+// of in-memory caching for the Get and Has operations.
+type Cache struct {
+ storage.IndexStore
+
+ lru *lru.Cache[string, []byte]
+}
+
+// Wrap adds a layer of in-memory caching to storage.Reader Get and Has operations.
+// It returns an error if the capacity is less than or equal to zero or if the
+// given store implements storage.Tx
+func Wrap(store storage.IndexStore, capacity int) (*Cache, error) {
+ lru, err := lru.New[string, []byte](capacity)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Cache{store, lru}, nil
+}
+
+// Get implements storage.Store interface.
+// On a call it tries to first retrieve the item from cache.
+// If the item does not exist in cache, it tries to retrieve
+// it from the underlying store.
+func (c *Cache) Get(i storage.Item) error {
+ if val, ok := c.lru.Get(key(i)); ok {
+ return i.Unmarshal(val)
+ }
+
+ if err := c.IndexStore.Get(i); err != nil {
+ return err
+ }
+
+ c.add(i)
+
+ return nil
+}
+
+// Has implements storage.Store interface.
+// On a call it tries to first retrieve the item from cache.
+// If the item does not exist in cache, it tries to retrieve
+// it from the underlying store.
+func (c *Cache) Has(k storage.Key) (bool, error) {
+ if _, ok := c.lru.Get(key(k)); ok {
+
+ return true, nil
+ }
+
+ return c.IndexStore.Has(k)
+}
diff --git a/pkg/storage/cache/cache_shared.go b/pkg/storage/cache/cache_shared.go
new file mode 100644
index 00000000000..fd8e1052fd8
--- /dev/null
+++ b/pkg/storage/cache/cache_shared.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage/storageutil"
+)
+
+// key returns a string representation of the given key.
+func key(key storage.Key) string {
+ return storageutil.JoinFields(key.Namespace(), key.ID())
+}
+
+var _ storage.IndexStore = (*Cache)(nil)
+
+// add caches given item.
+func (c *Cache) add(i storage.Item) {
+ b, err := i.Marshal()
+ if err != nil {
+ return
+ }
+ c.lru.Add(key(i), b)
+}
+
+// Put implements storage.Store interface.
+// On a call it also inserts the item into the cache so that the next
+// call to Put and Has will be able to retrieve the item from cache.
+func (c *Cache) Put(i storage.Item) error {
+ c.add(i)
+ return c.IndexStore.Put(i)
+}
+
+// Delete implements storage.Store interface.
+// On a call it also removes the item from the cache.
+func (c *Cache) Delete(i storage.Item) error {
+ _ = c.lru.Remove(key(i))
+ return c.IndexStore.Delete(i)
+}
+
+func (c *Cache) Close() error {
+ c.lru.Purge()
+ return nil
+}
diff --git a/pkg/storage/cache/metrics.go b/pkg/storage/cache/metrics.go
index ec9a6051786..fe2c6dcfaa0 100644
--- a/pkg/storage/cache/metrics.go
+++ b/pkg/storage/cache/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2023 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/storageincentives/agent.go b/pkg/storageincentives/agent.go
index a87540d390d..cd440ab9f25 100644
--- a/pkg/storageincentives/agent.go
+++ b/pkg/storageincentives/agent.go
@@ -1,13 +1,11 @@
-// Copyright 2022 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package storageincentives
import (
"context"
"crypto/rand"
- "errors"
"fmt"
"io"
"math/big"
@@ -15,8 +13,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethersphere/bee/v2/pkg/crypto"
"github.com/ethersphere/bee/v2/pkg/log"
"github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
@@ -29,30 +25,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/transaction"
)
-const loggerName = "storageincentives"
-
-const (
- DefaultBlocksPerRound = 152
- DefaultBlocksPerPhase = DefaultBlocksPerRound / 4
-
- // min # of transactions our wallet should be able to cover
- minTxCountToCover = 15
-
- // average tx gas used by transactions issued from agent
- avgTxGas = 250_000
-)
-
-type ChainBackend interface {
- BlockNumber(context.Context) (uint64, error)
- HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
- BalanceAt(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error)
- SuggestGasPrice(ctx context.Context) (*big.Int, error)
-}
-
-type Health interface {
- IsHealthy() bool
-}
-
type Agent struct {
logger log.Logger
metrics metrics
@@ -258,34 +230,6 @@ func (a *Agent) start(blockTime time.Duration, blocksPerRound, blocksPerPhase ui
}
}
-func (a *Agent) handleCommit(ctx context.Context, round uint64) error {
- // commit event handler has to be guarded with lock to avoid
- // race conditions when handler is triggered again from sample phase
- a.commitLock.Lock()
- defer a.commitLock.Unlock()
-
- if _, exists := a.state.CommitKey(round); exists {
- // already committed on this round, phase is skipped
- return nil
- }
-
- // the sample has to come from previous round to be able to commit it
- sample, exists := a.state.SampleData(round - 1)
- if !exists {
- // In absence of sample, phase is skipped
- return nil
- }
-
- err := a.commit(ctx, sample, round)
- if err != nil {
- return err
- }
-
- a.state.SetLastPlayedRound(round)
-
- return nil
-}
-
func (a *Agent) handleReveal(ctx context.Context, round uint64) error {
// reveal requires the commitKey from the same round
commitKey, exists := a.state.CommitKey(round)
@@ -445,46 +389,6 @@ func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) {
return true, nil
}
-func (a *Agent) makeSample(ctx context.Context, committedDepth uint8) (SampleData, error) {
- salt, err := a.contract.ReserveSalt(ctx)
- if err != nil {
- return SampleData{}, err
- }
-
- timeLimiter, err := a.getPreviousRoundTime(ctx)
- if err != nil {
- return SampleData{}, err
- }
-
- rSample, err := a.store.ReserveSample(ctx, salt, committedDepth, uint64(timeLimiter), a.minBatchBalance())
- if err != nil {
- return SampleData{}, err
- }
-
- sampleHash, err := sampleHash(rSample.Items)
- if err != nil {
- return SampleData{}, err
- }
-
- sample := SampleData{
- Anchor1: salt,
- ReserveSampleItems: rSample.Items,
- ReserveSampleHash: sampleHash,
- StorageRadius: committedDepth,
- }
-
- return sample, nil
-}
-
-func (a *Agent) minBatchBalance() *big.Int {
- cs := a.chainStateGetter.GetChainState()
- nextRoundBlockNumber := ((a.state.currentBlock() / a.blocksPerRound) + 2) * a.blocksPerRound
- difference := nextRoundBlockNumber - cs.Block
- minBalance := new(big.Int).Add(cs.TotalAmount, new(big.Int).Mul(cs.CurrentPrice, big.NewInt(int64(difference))))
-
- return minBalance
-}
-
func (a *Agent) getPreviousRoundTime(ctx context.Context) (time.Duration, error) {
previousRoundBlockNumber := ((a.state.currentBlock() / a.blocksPerRound) - 1) * a.blocksPerRound
@@ -523,94 +427,3 @@ func (a *Agent) commit(ctx context.Context, sample SampleData, round uint64) err
return nil
}
-
-func (a *Agent) Close() error {
- close(a.quit)
-
- stopped := make(chan struct{})
- go func() {
- a.wg.Wait()
- close(stopped)
- }()
-
- select {
- case <-stopped:
- return nil
- case <-time.After(5 * time.Second):
- return errors.New("stopping incentives with ongoing worker goroutine")
- }
-}
-
-func (a *Agent) wrapCommit(storageRadius uint8, sample []byte, key []byte) ([]byte, error) {
- storageRadiusByte := []byte{storageRadius}
-
- data := append(a.overlay.Bytes(), storageRadiusByte...)
- data = append(data, sample...)
- data = append(data, key...)
-
- return crypto.LegacyKeccak256(data)
-}
-
-// Status returns the node status
-func (a *Agent) Status() (*Status, error) {
- return a.state.Status()
-}
-
-type SampleWithProofs struct {
- Hash swarm.Address `json:"hash"`
- Proofs redistribution.ChunkInclusionProofs `json:"proofs"`
- Duration time.Duration `json:"duration"`
-}
-
-// SampleWithProofs is called only by rchash API
-func (a *Agent) SampleWithProofs(
- ctx context.Context,
- anchor1 []byte,
- anchor2 []byte,
- storageRadius uint8,
-) (SampleWithProofs, error) {
- sampleStartTime := time.Now()
-
- timeLimiter, err := a.getPreviousRoundTime(ctx)
- if err != nil {
- return SampleWithProofs{}, err
- }
-
- rSample, err := a.store.ReserveSample(ctx, anchor1, storageRadius, uint64(timeLimiter), a.minBatchBalance())
- if err != nil {
- return SampleWithProofs{}, err
- }
-
- hash, err := sampleHash(rSample.Items)
- if err != nil {
- return SampleWithProofs{}, fmt.Errorf("sample hash: %w", err)
- }
-
- proofs, err := makeInclusionProofs(rSample.Items, anchor1, anchor2)
- if err != nil {
- return SampleWithProofs{}, fmt.Errorf("make proofs: %w", err)
- }
-
- return SampleWithProofs{
- Hash: hash,
- Proofs: proofs,
- Duration: time.Since(sampleStartTime),
- }, nil
-}
-
-func (a *Agent) HasEnoughFundsToPlay(ctx context.Context) (*big.Int, bool, error) {
- balance, err := a.backend.BalanceAt(ctx, a.state.ethAddress, nil)
- if err != nil {
- return nil, false, err
- }
-
- price, err := a.backend.SuggestGasPrice(ctx)
- if err != nil {
- return nil, false, err
- }
-
- avgTxFee := new(big.Int).Mul(big.NewInt(avgTxGas), price)
- minBalance := new(big.Int).Mul(avgTxFee, big.NewInt(minTxCountToCover))
-
- return minBalance, balance.Cmp(minBalance) >= 1, nil
-}
diff --git a/pkg/storageincentives/agent_js.go b/pkg/storageincentives/agent_js.go
new file mode 100644
index 00000000000..3b2a725081b
--- /dev/null
+++ b/pkg/storageincentives/agent_js.go
@@ -0,0 +1,407 @@
+//go:build js
+// +build js
+
+package storageincentives
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/postage/postagecontract"
+ "github.com/ethersphere/bee/v2/pkg/settlement/swap/erc20"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives/redistribution"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives/staking"
+ "github.com/ethersphere/bee/v2/pkg/storer"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/transaction"
+)
+
+type Agent struct {
+ logger log.Logger
+ backend ChainBackend
+ blocksPerRound uint64
+ contract redistribution.Contract
+ batchExpirer postagecontract.PostageBatchExpirer
+ redistributionStatuser staking.RedistributionStatuser
+ store storer.Reserve
+ fullSyncedFunc func() bool
+ overlay swarm.Address
+ quit chan struct{}
+ wg sync.WaitGroup
+ state *RedistributionState
+ chainStateGetter postage.ChainStateGetter
+ commitLock sync.Mutex
+ health Health
+}
+
+func New(overlay swarm.Address,
+ ethAddress common.Address,
+ backend ChainBackend,
+ contract redistribution.Contract,
+ batchExpirer postagecontract.PostageBatchExpirer,
+ redistributionStatuser staking.RedistributionStatuser,
+ store storer.Reserve,
+ fullSyncedFunc func() bool,
+ blockTime time.Duration,
+ blocksPerRound,
+ blocksPerPhase uint64,
+ stateStore storage.StateStorer,
+ chainStateGetter postage.ChainStateGetter,
+ erc20Service erc20.Service,
+ tranService transaction.Service,
+ health Health,
+ logger log.Logger,
+) (*Agent, error) {
+ a := &Agent{
+ overlay: overlay,
+ backend: backend,
+ logger: logger.WithName(loggerName).Register(),
+ contract: contract,
+ batchExpirer: batchExpirer,
+ store: store,
+ fullSyncedFunc: fullSyncedFunc,
+ blocksPerRound: blocksPerRound,
+ quit: make(chan struct{}),
+ redistributionStatuser: redistributionStatuser,
+ health: health,
+ chainStateGetter: chainStateGetter,
+ }
+
+ state, err := NewRedistributionState(logger, ethAddress, stateStore, erc20Service, tranService)
+ if err != nil {
+ return nil, err
+ }
+
+ a.state = state
+
+ a.wg.Add(1)
+ go a.start(blockTime, a.blocksPerRound, blocksPerPhase)
+
+ return a, nil
+}
+
+// start polls the current block number, calculates, and publishes only once the current phase.
+// Each round is blocksPerRound long and is divided into three blocksPerPhase long phases: commit, reveal, claim.
+// The sample phase is triggered upon entering the claim phase and may run until the end of the commit phase.
+// If our neighborhood is selected to participate, a sample is created during the sample phase. In the commit phase,
+// the sample is submitted, and in the reveal phase, the obfuscation key from the commit phase is submitted.
+// Next, in the claim phase, we check if we've won, and the cycle repeats. The cycle must occur in the length of one round.
+func (a *Agent) start(blockTime time.Duration, blocksPerRound, blocksPerPhase uint64) {
+ defer a.wg.Done()
+
+ phaseEvents := newEvents()
+ defer phaseEvents.Close()
+
+ logErr := func(phase PhaseType, round uint64, err error) {
+ if err != nil {
+ a.logger.Error(err, "phase failed", "phase", phase, "round", round)
+ }
+ }
+
+ phaseEvents.On(commit, func(ctx context.Context) {
+ phaseEvents.Cancel(claim)
+
+ round, _ := a.state.currentRoundAndPhase()
+ err := a.handleCommit(ctx, round)
+ logErr(commit, round, err)
+ })
+
+ phaseEvents.On(reveal, func(ctx context.Context) {
+ phaseEvents.Cancel(commit, sample)
+ round, _ := a.state.currentRoundAndPhase()
+ logErr(reveal, round, a.handleReveal(ctx, round))
+ })
+
+ phaseEvents.On(claim, func(ctx context.Context) {
+ phaseEvents.Cancel(reveal)
+ phaseEvents.Publish(sample)
+
+ round, _ := a.state.currentRoundAndPhase()
+ logErr(claim, round, a.handleClaim(ctx, round))
+ })
+
+ phaseEvents.On(sample, func(ctx context.Context) {
+ round, _ := a.state.currentRoundAndPhase()
+ isPhasePlayed, err := a.handleSample(ctx, round)
+ logErr(sample, round, err)
+
+ // Sample handled could potentially take long time, therefore it could overlap with commit
+ // phase of next round. When that case happens commit event needs to be triggered once more
+ // in order to handle commit phase with delay.
+ currentRound, currentPhase := a.state.currentRoundAndPhase()
+ if isPhasePlayed &&
+ currentPhase == commit &&
+ currentRound-1 == round {
+ phaseEvents.Publish(commit)
+ }
+ })
+
+ var (
+ prevPhase PhaseType = -1
+ currentPhase PhaseType
+ )
+
+ phaseCheck := func(ctx context.Context) {
+ ctx, cancel := context.WithTimeout(ctx, blockTime*time.Duration(blocksPerRound))
+ defer cancel()
+
+ block, err := a.backend.BlockNumber(ctx)
+ if err != nil {
+ a.logger.Error(err, "getting block number")
+ return
+ }
+
+ a.state.SetCurrentBlock(block)
+
+ round := block / blocksPerRound
+
+ p := block % blocksPerRound
+ if p < blocksPerPhase {
+ currentPhase = commit // [0, 37]
+ } else if p >= blocksPerPhase && p < 2*blocksPerPhase { // [38, 75]
+ currentPhase = reveal
+ } else if p >= 2*blocksPerPhase {
+ currentPhase = claim // [76, 151]
+ }
+
+ // write the current phase only once
+ if currentPhase == prevPhase {
+ return
+ }
+
+ prevPhase = currentPhase
+
+ a.logger.Info("entered new phase", "phase", currentPhase.String(), "round", round, "block", block)
+
+ a.state.SetCurrentEvent(currentPhase, round)
+ a.state.SetFullySynced(a.fullSyncedFunc())
+ a.state.SetHealthy(a.health.IsHealthy())
+ go a.state.purgeStaleRoundData()
+
+ // check if node is frozen starting from the next block
+ isFrozen, err := a.redistributionStatuser.IsOverlayFrozen(ctx, block+1)
+ if err != nil {
+ a.logger.Error(err, "error checking if stake is frozen")
+ } else {
+ a.state.SetFrozen(isFrozen, round)
+ }
+
+ phaseEvents.Publish(currentPhase)
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ <-a.quit
+ cancel()
+ }()
+
+ // manually invoke phaseCheck initially in order to set initial data asap
+ phaseCheck(ctx)
+
+ phaseCheckInterval := blockTime
+ // optimization, we do not need to check the phase change at every new block
+ if blocksPerPhase > 10 {
+ phaseCheckInterval = blockTime * 5
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(phaseCheckInterval):
+ phaseCheck(ctx)
+ }
+ }
+}
+
+func (a *Agent) handleReveal(ctx context.Context, round uint64) error {
+ // reveal requires the commitKey from the same round
+ commitKey, exists := a.state.CommitKey(round)
+ if !exists {
+ // In absence of commitKey, phase is skipped
+ return nil
+ }
+
+ // reveal requires sample from previous round
+ sample, exists := a.state.SampleData(round - 1)
+ if !exists {
+ // Sample must have been saved so far
+ return fmt.Errorf("sample not found in reveal phase")
+ }
+
+ rsh := sample.ReserveSampleHash.Bytes()
+ txHash, err := a.contract.Reveal(ctx, sample.StorageRadius, rsh, commitKey)
+ if err != nil {
+ return err
+ }
+ a.state.AddFee(ctx, txHash)
+
+ a.state.SetHasRevealed(round)
+
+ return nil
+}
+
+func (a *Agent) handleClaim(ctx context.Context, round uint64) error {
+ hasRevealed := a.state.HasRevealed(round)
+ if !hasRevealed {
+ // When there was no reveal in same round, phase is skipped
+ return nil
+ }
+
+ isWinner, err := a.contract.IsWinner(ctx)
+ if err != nil {
+ return err
+ }
+
+ if !isWinner {
+ a.logger.Info("not a winner")
+ // When there is nothing to claim (node is not a winner), phase is played
+ return nil
+ }
+
+ a.state.SetLastWonRound(round)
+
+ // In case when there are too many expired batches, Claim trx could runs out of gas.
+ // To prevent this, node should first expire batches before Claiming a reward.
+ err = a.batchExpirer.ExpireBatches(ctx)
+ if err != nil {
+ a.logger.Info("expire batches failed", "err", err)
+ // Even when error happens, proceed with claim handler
+ // because this should not prevent node from claiming a reward
+ }
+
+ errBalance := a.state.SetBalance(ctx)
+ if errBalance != nil {
+ a.logger.Info("could not set balance", "err", err)
+ }
+
+ sampleData, exists := a.state.SampleData(round - 1)
+ if !exists {
+ return fmt.Errorf("sample not found")
+ }
+
+ anchor2, err := a.contract.ReserveSalt(ctx)
+ if err != nil {
+ a.logger.Info("failed getting anchor after second reveal", "err", err)
+ }
+
+ proofs, err := makeInclusionProofs(sampleData.ReserveSampleItems, sampleData.Anchor1, anchor2)
+ if err != nil {
+ return fmt.Errorf("making inclusion proofs: %w", err)
+ }
+
+ txHash, err := a.contract.Claim(ctx, proofs)
+ if err != nil {
+ return fmt.Errorf("claiming win: %w", err)
+ }
+
+ a.logger.Info("claimed win")
+
+ if errBalance == nil {
+ errReward := a.state.CalculateWinnerReward(ctx)
+ if errReward != nil {
+ a.logger.Info("calculate winner reward", "err", err)
+ }
+ }
+
+ a.state.AddFee(ctx, txHash)
+
+ return nil
+}
+
+func (a *Agent) handleSample(ctx context.Context, round uint64) (bool, error) {
+ // minimum proximity between the anchor and the stored chunks
+ committedDepth := a.store.CommittedDepth()
+
+ if a.state.IsFrozen() {
+ a.logger.Info("skipping round because node is frozen")
+ return false, nil
+ }
+
+ isPlaying, err := a.contract.IsPlaying(ctx, committedDepth)
+ if err != nil {
+ return false, err
+ }
+ if !isPlaying {
+ a.logger.Info("not playing in this round")
+ return false, nil
+ }
+ a.state.SetLastSelectedRound(round + 1)
+ a.logger.Info("neighbourhood chosen", "round", round)
+
+ if !a.state.IsFullySynced() {
+ a.logger.Info("skipping round because node is not fully synced")
+ return false, nil
+ }
+
+ if !a.state.IsHealthy() {
+ a.logger.Info("skipping round because node is unhealhy", "round", round)
+ return false, nil
+ }
+
+ _, hasFunds, err := a.HasEnoughFundsToPlay(ctx)
+ if err != nil {
+ return false, fmt.Errorf("has enough funds to play: %w", err)
+ } else if !hasFunds {
+ a.logger.Info("insufficient funds to play in next round", "round", round)
+
+ return false, nil
+ }
+
+ now := time.Now()
+ sample, err := a.makeSample(ctx, committedDepth)
+ if err != nil {
+ return false, err
+ }
+ dur := time.Since(now)
+
+ a.logger.Info("produced sample", "hash", sample.ReserveSampleHash, "radius", committedDepth, "round", round)
+
+ a.state.SetSampleData(round, sample, dur)
+
+ return true, nil
+}
+
+func (a *Agent) getPreviousRoundTime(ctx context.Context) (time.Duration, error) {
+ previousRoundBlockNumber := ((a.state.currentBlock() / a.blocksPerRound) - 1) * a.blocksPerRound
+
+ timeLimiterBlock, err := a.backend.HeaderByNumber(ctx, new(big.Int).SetUint64(previousRoundBlockNumber))
+ if err != nil {
+ return 0, err
+ }
+
+ return time.Duration(timeLimiterBlock.Time) * time.Second / time.Nanosecond, nil
+}
+
+func (a *Agent) commit(ctx context.Context, sample SampleData, round uint64) error {
+
+ key := make([]byte, swarm.HashSize)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ return err
+ }
+
+ rsh := sample.ReserveSampleHash.Bytes()
+ obfuscatedHash, err := a.wrapCommit(sample.StorageRadius, rsh, key)
+ if err != nil {
+ return err
+ }
+
+ txHash, err := a.contract.Commit(ctx, obfuscatedHash, round)
+ if err != nil {
+ return err
+ }
+ a.state.AddFee(ctx, txHash)
+
+ a.state.SetCommitKey(round, key)
+
+ return nil
+}
diff --git a/pkg/storageincentives/agent_shared.go b/pkg/storageincentives/agent_shared.go
new file mode 100644
index 00000000000..735154bbbf4
--- /dev/null
+++ b/pkg/storageincentives/agent_shared.go
@@ -0,0 +1,202 @@
+// Copyright 2022 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storageincentives
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethersphere/bee/v2/pkg/crypto"
+ "github.com/ethersphere/bee/v2/pkg/storageincentives/redistribution"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+const loggerName = "storageincentives"
+
+const (
+ DefaultBlocksPerRound = 152
+ DefaultBlocksPerPhase = DefaultBlocksPerRound / 4
+
+ // min # of transactions our wallet should be able to cover
+ minTxCountToCover = 15
+
+ // average tx gas used by transactions issued from agent
+ avgTxGas = 250_000
+)
+
+type ChainBackend interface {
+ BlockNumber(context.Context) (uint64, error)
+ HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
+ BalanceAt(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error)
+ SuggestGasPrice(ctx context.Context) (*big.Int, error)
+}
+
+type Health interface {
+ IsHealthy() bool
+}
+
+func (a *Agent) handleCommit(ctx context.Context, round uint64) error {
+ // commit event handler has to be guarded with lock to avoid
+ // race conditions when handler is triggered again from sample phase
+ a.commitLock.Lock()
+ defer a.commitLock.Unlock()
+
+ if _, exists := a.state.CommitKey(round); exists {
+ // already committed on this round, phase is skipped
+ return nil
+ }
+
+ // the sample has to come from previous round to be able to commit it
+ sample, exists := a.state.SampleData(round - 1)
+ if !exists {
+ // In absence of sample, phase is skipped
+ return nil
+ }
+
+ err := a.commit(ctx, sample, round)
+ if err != nil {
+ return err
+ }
+
+ a.state.SetLastPlayedRound(round)
+
+ return nil
+}
+
+func (a *Agent) makeSample(ctx context.Context, committedDepth uint8) (SampleData, error) {
+ salt, err := a.contract.ReserveSalt(ctx)
+ if err != nil {
+ return SampleData{}, err
+ }
+
+ timeLimiter, err := a.getPreviousRoundTime(ctx)
+ if err != nil {
+ return SampleData{}, err
+ }
+
+ rSample, err := a.store.ReserveSample(ctx, salt, committedDepth, uint64(timeLimiter), a.minBatchBalance())
+ if err != nil {
+ return SampleData{}, err
+ }
+
+ sampleHash, err := sampleHash(rSample.Items)
+ if err != nil {
+ return SampleData{}, err
+ }
+
+ sample := SampleData{
+ Anchor1: salt,
+ ReserveSampleItems: rSample.Items,
+ ReserveSampleHash: sampleHash,
+ StorageRadius: committedDepth,
+ }
+
+ return sample, nil
+}
+
+func (a *Agent) minBatchBalance() *big.Int {
+ cs := a.chainStateGetter.GetChainState()
+ nextRoundBlockNumber := ((a.state.currentBlock() / a.blocksPerRound) + 2) * a.blocksPerRound
+ difference := nextRoundBlockNumber - cs.Block
+ minBalance := new(big.Int).Add(cs.TotalAmount, new(big.Int).Mul(cs.CurrentPrice, big.NewInt(int64(difference))))
+
+ return minBalance
+}
+
+func (a *Agent) Close() error {
+ close(a.quit)
+
+ stopped := make(chan struct{})
+ go func() {
+ a.wg.Wait()
+ close(stopped)
+ }()
+
+ select {
+ case <-stopped:
+ return nil
+ case <-time.After(5 * time.Second):
+ return errors.New("stopping incentives with ongoing worker goroutine")
+ }
+}
+
+func (a *Agent) wrapCommit(storageRadius uint8, sample []byte, key []byte) ([]byte, error) {
+ storageRadiusByte := []byte{storageRadius}
+
+ data := append(a.overlay.Bytes(), storageRadiusByte...)
+ data = append(data, sample...)
+ data = append(data, key...)
+
+ return crypto.LegacyKeccak256(data)
+}
+
+// Status returns the node status
+func (a *Agent) Status() (*Status, error) {
+ return a.state.Status()
+}
+
+type SampleWithProofs struct {
+ Hash swarm.Address `json:"hash"`
+ Proofs redistribution.ChunkInclusionProofs `json:"proofs"`
+ Duration time.Duration `json:"duration"`
+}
+
+// SampleWithProofs is called only by rchash API
+func (a *Agent) SampleWithProofs(
+ ctx context.Context,
+ anchor1 []byte,
+ anchor2 []byte,
+ storageRadius uint8,
+) (SampleWithProofs, error) {
+ sampleStartTime := time.Now()
+
+ timeLimiter, err := a.getPreviousRoundTime(ctx)
+ if err != nil {
+ return SampleWithProofs{}, err
+ }
+
+ rSample, err := a.store.ReserveSample(ctx, anchor1, storageRadius, uint64(timeLimiter), a.minBatchBalance())
+ if err != nil {
+ return SampleWithProofs{}, err
+ }
+
+ hash, err := sampleHash(rSample.Items)
+ if err != nil {
+ return SampleWithProofs{}, fmt.Errorf("sample hash: %w", err)
+ }
+
+ proofs, err := makeInclusionProofs(rSample.Items, anchor1, anchor2)
+ if err != nil {
+ return SampleWithProofs{}, fmt.Errorf("make proofs: %w", err)
+ }
+
+ return SampleWithProofs{
+ Hash: hash,
+ Proofs: proofs,
+ Duration: time.Since(sampleStartTime),
+ }, nil
+}
+
+func (a *Agent) HasEnoughFundsToPlay(ctx context.Context) (*big.Int, bool, error) {
+ balance, err := a.backend.BalanceAt(ctx, a.state.ethAddress, nil)
+ if err != nil {
+ return nil, false, err
+ }
+
+ price, err := a.backend.SuggestGasPrice(ctx)
+ if err != nil {
+ return nil, false, err
+ }
+
+ avgTxFee := new(big.Int).Mul(big.NewInt(avgTxGas), price)
+ minBalance := new(big.Int).Mul(avgTxFee, big.NewInt(minTxCountToCover))
+
+ return minBalance, balance.Cmp(minBalance) >= 1, nil
+}
diff --git a/pkg/storageincentives/metrics.go b/pkg/storageincentives/metrics.go
index b376d9d20b2..460d688520c 100644
--- a/pkg/storageincentives/metrics.go
+++ b/pkg/storageincentives/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2022 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/storageincentives/staking/contract.go b/pkg/storageincentives/staking/contract.go
index d039f7ecb67..882721fd797 100644
--- a/pkg/storageincentives/staking/contract.go
+++ b/pkg/storageincentives/staking/contract.go
@@ -22,7 +22,7 @@ import (
var (
MinimumStakeAmount = big.NewInt(100000000000000000)
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_9)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
ErrInsufficientStakeAmount = errors.New("insufficient stake amount")
ErrInsufficientFunds = errors.New("insufficient token balance")
diff --git a/pkg/storer/cachestore.go b/pkg/storer/cachestore.go
index 6cc51e5550e..b5e65d0228f 100644
--- a/pkg/storer/cachestore.go
+++ b/pkg/storer/cachestore.go
@@ -1,6 +1,5 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package storer
@@ -10,15 +9,11 @@ import (
"fmt"
"time"
- "github.com/ethersphere/bee/v2/pkg/storage"
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
"github.com/ethersphere/bee/v2/pkg/swarm"
)
-const (
- cacheOverCapacity = "cacheOverCapacity"
-)
-
func (db *DB) cacheWorker(ctx context.Context) {
defer db.inFlight.Done()
diff --git a/pkg/storer/cachestore_js.go b/pkg/storer/cachestore_js.go
new file mode 100644
index 00000000000..9b0aaea0275
--- /dev/null
+++ b/pkg/storer/cachestore_js.go
@@ -0,0 +1,106 @@
+//go:build js
+// +build js
+
+package storer
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+func (db *DB) cacheWorker(ctx context.Context) {
+
+ defer db.inFlight.Done()
+
+ overCapTrigger, overCapUnsub := db.events.Subscribe(cacheOverCapacity)
+ defer overCapUnsub()
+
+ db.triggerCacheEviction()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-overCapTrigger:
+
+ size, capc := db.cacheObj.Size(), db.cacheObj.Capacity()
+ if size <= capc {
+ continue
+ }
+
+ evict := uint64(size - capc)
+ if evict < db.reserveOptions.cacheMinEvictCount { // evict at least a min count
+ evict = db.reserveOptions.cacheMinEvictCount
+ }
+
+ err := db.cacheObj.RemoveOldest(ctx, db.storage, evict)
+ if err != nil {
+
+ db.logger.Warning("cache eviction failure", "error", err)
+ } else {
+ db.logger.Debug("cache eviction finished", "evicted", evict)
+
+ }
+ db.triggerCacheEviction()
+ case <-db.quit:
+ return
+ }
+ }
+}
+
+// Lookup is the implementation of the CacheStore.Lookup method.
+func (db *DB) Lookup() storage.Getter {
+ return storage.GetterFunc(func(ctx context.Context, address swarm.Address) (swarm.Chunk, error) {
+ ch, err := db.cacheObj.Getter(db.storage).Get(ctx, address)
+ switch {
+ case err == nil:
+ return ch, nil
+ case errors.Is(err, storage.ErrNotFound):
+ // here we would ideally have nothing to do but just to return this
+ // error to the client. The commit is mainly done to end the txn.
+ return nil, err
+ }
+ // if we are here, it means there was some unexpected error, in which
+ // case we need to rollback any changes that were already made.
+ return nil, fmt.Errorf("cache.Get: %w", err)
+ })
+}
+
+// Cache is the implementation of the CacheStore.Cache method.
+func (db *DB) Cache() storage.Putter {
+ return storage.PutterFunc(func(ctx context.Context, ch swarm.Chunk) error {
+ defer db.triggerCacheEviction()
+ err := db.cacheObj.Putter(db.storage).Put(ctx, ch)
+ if err != nil {
+ return fmt.Errorf("cache.Put: %w", err)
+ }
+ return nil
+ })
+}
+
+// CacheShallowCopy creates cache entries with the expectation that the chunk already exists in the chunkstore.
+func (db *DB) CacheShallowCopy(ctx context.Context, store transaction.Storage, addrs ...swarm.Address) error {
+ defer db.triggerCacheEviction()
+ err := db.cacheObj.ShallowCopy(ctx, store, addrs...)
+ if err != nil {
+ err = fmt.Errorf("cache shallow copy: %w", err)
+ }
+ return err
+}
+
+func (db *DB) triggerCacheEviction() {
+
+ var (
+ size = db.cacheObj.Size()
+ capc = db.cacheObj.Capacity()
+ )
+
+ if size > capc {
+ db.events.Trigger(cacheOverCapacity)
+ }
+}
diff --git a/pkg/storer/cachestore_shared.go b/pkg/storer/cachestore_shared.go
new file mode 100644
index 00000000000..d519e132e28
--- /dev/null
+++ b/pkg/storer/cachestore_shared.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storer
+
+const (
+ cacheOverCapacity = "cacheOverCapacity"
+)
diff --git a/pkg/storer/internal/cache/cache.go b/pkg/storer/internal/cache/cache.go
index bea87954ed8..2cfc56e394a 100644
--- a/pkg/storer/internal/cache/cache.go
+++ b/pkg/storer/internal/cache/cache.go
@@ -42,7 +42,7 @@ var (
type Cache struct {
size atomic.Int64
capacity int
- glock *multex.Multex // blocks Get and Put ops while shallow copy is running.
+ glock *multex.Multex[any] // blocks Get and Put ops while shallow copy is running.
}
// New creates a new Cache component with the specified capacity. The store is used
@@ -54,7 +54,7 @@ func New(ctx context.Context, store storage.Reader, capacity uint64) (*Cache, er
return nil, fmt.Errorf("failed counting cache entries: %w", err)
}
- c := &Cache{capacity: int(capacity), glock: multex.New()}
+ c := &Cache{capacity: int(capacity), glock: multex.New[any]()}
c.size.Store(int64(count))
return c, nil
diff --git a/pkg/storer/internal/reserve/reserve.go b/pkg/storer/internal/reserve/reserve.go
index 80e301e72a8..8bfd9df12a5 100644
--- a/pkg/storer/internal/reserve/reserve.go
+++ b/pkg/storer/internal/reserve/reserve.go
@@ -38,7 +38,7 @@ type Reserve struct {
size atomic.Int64
radius atomic.Uint32
- multx *multex.Multex
+ multx *multex.Multex[any]
st transaction.Storage
}
@@ -55,7 +55,7 @@ func New(
capacity: capacity,
radiusSetter: radiusSetter,
logger: logger.WithName(reserveScope).Register(),
- multx: multex.New(),
+ multx: multex.New[any](),
}
err := st.Run(context.Background(), func(s transaction.Store) error {
diff --git a/pkg/storer/internal/stampindex/stampindex.go b/pkg/storer/internal/stampindex/stampindex.go
index c8fff8698f9..b4d70839731 100644
--- a/pkg/storer/internal/stampindex/stampindex.go
+++ b/pkg/storer/internal/stampindex/stampindex.go
@@ -213,7 +213,7 @@ func Delete(s storage.Writer, scope string, stamp swarm.Stamp) error {
StampIndex: stamp.Index(),
}
if err := s.Delete(item); err != nil {
- return fmt.Errorf("failed to delete stampindex.Item %s: %w", item, err)
+ return fmt.Errorf("failed to delete stampindex.Item %s: %w", item.ID(), err)
}
return nil
}
diff --git a/pkg/storer/internal/transaction/metrics.go b/pkg/storer/internal/transaction/metrics.go
index 39434881d8d..ae1cb66e68d 100644
--- a/pkg/storer/internal/transaction/metrics.go
+++ b/pkg/storer/internal/transaction/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2024 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/storer/internal/transaction/transaction.go b/pkg/storer/internal/transaction/transaction.go
index ae97e06ce3b..d0db98ca681 100644
--- a/pkg/storer/internal/transaction/transaction.go
+++ b/pkg/storer/internal/transaction/transaction.go
@@ -1,22 +1,5 @@
-// Copyright 2024 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package transaction provides transaction support for localstore operations.
-All writes to the localstore (both indexstore and chunkstore) must be made using a transaction.
-The transaction must be committed for the writes to be stored on the disk.
-
-The rules of the transaction is as follows:
-
--sharky_write -> write to disk, keep sharky location in memory
--sharky_release -> keep location in memory, do not release from the disk
--indexstore write -> write to batch
--on commit -> if batch_commit succeeds, release sharky_release locations from the disk
- -> if batch_commit fails or is not called, release all sharky_write location from the disk, do nothing for sharky_release
-
-See the NewTransaction method for more details.
-*/
+//go:build !js
+// +build !js
package transaction
@@ -35,37 +18,15 @@ import (
"resenje.org/multex"
)
-type Transaction interface {
- Store
- Commit() error
-}
-
-type Store interface {
- ChunkStore() storage.ChunkStore
- IndexStore() storage.IndexStore
-}
-
-type ReadOnlyStore interface {
- IndexStore() storage.Reader
- ChunkStore() storage.ReadOnlyChunkStore
-}
-
-type Storage interface {
- ReadOnlyStore
- NewTransaction(context.Context) (Transaction, func())
- Run(context.Context, func(Store) error) error
- Close() error
-}
-
type store struct {
sharky *sharky.Store
bstore storage.BatchStore
metrics metrics
- chunkLocker *multex.Multex
+ chunkLocker *multex.Multex[any]
}
func NewStorage(sharky *sharky.Store, bstore storage.BatchStore) Storage {
- return &store{sharky, bstore, newMetrics(), multex.New()}
+ return &store{sharky, bstore, newMetrics(), multex.New[any]()}
}
type transaction struct {
@@ -77,6 +38,28 @@ type transaction struct {
metrics metrics
}
+type indexTrx struct {
+ store storage.Reader
+ batch storage.Batch
+ metrics metrics
+}
+
+type sharkyTrx struct {
+ sharky *sharky.Store
+ metrics metrics
+ writtenLocs []sharky.Location
+ releasedLocs []sharky.Location
+}
+
+type chunkStoreTrx struct {
+ indexStore storage.IndexStore
+ sharkyTrx *sharkyTrx
+ globalLocker *multex.Multex[any]
+ lockedAddrs map[string]struct{}
+ metrics metrics
+ readOnly bool
+}
+
// NewTransaction returns a new storage transaction.
// Commit must be called to persist data to the disk.
// The callback function must be the final call of the transaction whether or not any errors
@@ -125,36 +108,41 @@ func (s *store) ChunkStore() storage.ReadOnlyChunkStore {
return &chunkStoreTrx{indexStore, sharyTrx, s.chunkLocker, nil, s.metrics, true}
}
-// Run creates a new transaction and gives the caller access to the transaction
-// in the form of a callback function. After the callback returns, the transaction
-// is committed to the disk. See the NewTransaction method for more details on how transactions operate internally.
-// By design, it is best to not batch too many writes to a single transaction, including multiple chunks writes.
-// Calls made to the transaction are NOT thread-safe.
-func (s *store) Run(ctx context.Context, f func(Store) error) error {
- trx, done := s.NewTransaction(ctx)
- defer done()
-
- err := f(trx)
- if err != nil {
- return err
- }
- return trx.Commit()
+func (c *chunkStoreTrx) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, err error) {
+ defer handleMetric("chunkstore_get", c.metrics)(&err)
+ unlock := c.lock(addr)
+ defer unlock()
+ ch, err = chunkstore.Get(ctx, c.indexStore, c.sharkyTrx, addr)
+ return ch, err
}
-
-// Metrics returns set of prometheus collectors.
-func (s *store) Metrics() []prometheus.Collector {
- return m.PrometheusCollectorsFromFields(s.metrics)
+func (c *chunkStoreTrx) Has(ctx context.Context, addr swarm.Address) (_ bool, err error) {
+ defer handleMetric("chunkstore_has", c.metrics)(&err)
+ unlock := c.lock(addr)
+ defer unlock()
+ return chunkstore.Has(ctx, c.indexStore, addr)
}
-
-// StatusMetrics exposes metrics that are exposed on the status protocol.
-func (s *store) StatusMetrics() []prometheus.Collector {
- return []prometheus.Collector{
- s.metrics.MethodDuration,
- }
+func (c *chunkStoreTrx) Put(ctx context.Context, ch swarm.Chunk) (err error) {
+ defer handleMetric("chunkstore_put", c.metrics)(&err)
+ unlock := c.lock(ch.Address())
+ defer unlock()
+ return chunkstore.Put(ctx, c.indexStore, c.sharkyTrx, ch)
+}
+func (c *chunkStoreTrx) Delete(ctx context.Context, addr swarm.Address) (err error) {
+ defer handleMetric("chunkstore_delete", c.metrics)(&err)
+ unlock := c.lock(addr)
+ defer unlock()
+ return chunkstore.Delete(ctx, c.indexStore, c.sharkyTrx, addr)
+}
+func (c *chunkStoreTrx) Iterate(ctx context.Context, fn storage.IterateChunkFn) (err error) {
+ defer handleMetric("chunkstore_iterate", c.metrics)(&err)
+ return chunkstore.Iterate(ctx, c.indexStore, c.sharkyTrx, fn)
}
-func (s *store) Close() error {
- return errors.Join(s.bstore.Close(), s.sharky.Close())
+func (c *chunkStoreTrx) Replace(ctx context.Context, ch swarm.Chunk, emplace bool) (err error) {
+ defer handleMetric("chunkstore_replace", c.metrics)(&err)
+ unlock := c.lock(ch.Address())
+ defer unlock()
+ return chunkstore.Replace(ctx, c.indexStore, c.sharkyTrx, ch, emplace)
}
func (t *transaction) Commit() (err error) {
@@ -198,102 +186,16 @@ func (t *transaction) Commit() (err error) {
return err
}
-// IndexStore gives access to the index store of the transaction.
-// Note that no writes are persisted to the disk until the commit is called.
-func (t *transaction) IndexStore() storage.IndexStore {
- return t.indexstore
-}
-
-// ChunkStore gives access to the chunkstore of the transaction.
-// Note that no writes are persisted to the disk until the commit is called.
-func (t *transaction) ChunkStore() storage.ChunkStore {
- return t.chunkStore
-}
-
-type chunkStoreTrx struct {
- indexStore storage.IndexStore
- sharkyTrx *sharkyTrx
- globalLocker *multex.Multex
- lockedAddrs map[string]struct{}
- metrics metrics
- readOnly bool
-}
-
-func (c *chunkStoreTrx) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, err error) {
- defer handleMetric("chunkstore_get", c.metrics)(&err)
- unlock := c.lock(addr)
- defer unlock()
- ch, err = chunkstore.Get(ctx, c.indexStore, c.sharkyTrx, addr)
- return ch, err
-}
-func (c *chunkStoreTrx) Has(ctx context.Context, addr swarm.Address) (_ bool, err error) {
- defer handleMetric("chunkstore_has", c.metrics)(&err)
- unlock := c.lock(addr)
- defer unlock()
- return chunkstore.Has(ctx, c.indexStore, addr)
-}
-func (c *chunkStoreTrx) Put(ctx context.Context, ch swarm.Chunk) (err error) {
- defer handleMetric("chunkstore_put", c.metrics)(&err)
- unlock := c.lock(ch.Address())
- defer unlock()
- return chunkstore.Put(ctx, c.indexStore, c.sharkyTrx, ch)
-}
-func (c *chunkStoreTrx) Delete(ctx context.Context, addr swarm.Address) (err error) {
- defer handleMetric("chunkstore_delete", c.metrics)(&err)
- unlock := c.lock(addr)
- defer unlock()
- return chunkstore.Delete(ctx, c.indexStore, c.sharkyTrx, addr)
-}
-func (c *chunkStoreTrx) Iterate(ctx context.Context, fn storage.IterateChunkFn) (err error) {
- defer handleMetric("chunkstore_iterate", c.metrics)(&err)
- return chunkstore.Iterate(ctx, c.indexStore, c.sharkyTrx, fn)
-}
-
-func (c *chunkStoreTrx) Replace(ctx context.Context, ch swarm.Chunk, emplace bool) (err error) {
- defer handleMetric("chunkstore_replace", c.metrics)(&err)
- unlock := c.lock(ch.Address())
- defer unlock()
- return chunkstore.Replace(ctx, c.indexStore, c.sharkyTrx, ch, emplace)
+// Metrics returns set of prometheus collectors.
+func (s *store) Metrics() []prometheus.Collector {
+ return m.PrometheusCollectorsFromFields(s.metrics)
}
-func (c *chunkStoreTrx) lock(addr swarm.Address) func() {
- // directly lock
- if c.readOnly {
- c.globalLocker.Lock(addr.ByteString())
- return func() { c.globalLocker.Unlock(addr.ByteString()) }
- }
-
- // lock chunk only once in the same transaction
- if _, ok := c.lockedAddrs[addr.ByteString()]; !ok {
- c.globalLocker.Lock(addr.ByteString())
- c.lockedAddrs[addr.ByteString()] = struct{}{}
+// StatusMetrics exposes metrics that are exposed on the status protocol.
+func (s *store) StatusMetrics() []prometheus.Collector {
+ return []prometheus.Collector{
+ s.metrics.MethodDuration,
}
-
- return func() {} // unlocking the chunk will be done in the Commit()
-}
-
-type indexTrx struct {
- store storage.Reader
- batch storage.Batch
- metrics metrics
-}
-
-func (s *indexTrx) Get(i storage.Item) error { return s.store.Get(i) }
-func (s *indexTrx) Has(k storage.Key) (bool, error) { return s.store.Has(k) }
-func (s *indexTrx) GetSize(k storage.Key) (int, error) { return s.store.GetSize(k) }
-func (s *indexTrx) Iterate(q storage.Query, f storage.IterateFn) (err error) {
- defer handleMetric("iterate", s.metrics)(&err)
- return s.store.Iterate(q, f)
-}
-func (s *indexTrx) Count(k storage.Key) (int, error) { return s.store.Count(k) }
-func (s *indexTrx) Put(i storage.Item) error { return s.batch.Put(i) }
-func (s *indexTrx) Delete(i storage.Item) error { return s.batch.Delete(i) }
-
-type sharkyTrx struct {
- sharky *sharky.Store
- metrics metrics
- writtenLocs []sharky.Location
- releasedLocs []sharky.Location
}
func (s *sharkyTrx) Read(ctx context.Context, loc sharky.Location, buf []byte) (err error) {
@@ -312,11 +214,6 @@ func (s *sharkyTrx) Write(ctx context.Context, data []byte) (_ sharky.Location,
return loc, nil
}
-func (s *sharkyTrx) Release(ctx context.Context, loc sharky.Location) error {
- s.releasedLocs = append(s.releasedLocs, loc)
- return nil
-}
-
func handleMetric(key string, m metrics) func(*error) {
t := time.Now()
return func(err *error) {
@@ -329,3 +226,8 @@ func handleMetric(key string, m metrics) func(*error) {
}
}
}
+
+func (s *indexTrx) Iterate(q storage.Query, f storage.IterateFn) (err error) {
+ defer handleMetric("iterate", s.metrics)(&err)
+ return s.store.Iterate(q, f)
+}
diff --git a/pkg/storer/internal/transaction/transaction_js.go b/pkg/storer/internal/transaction/transaction_js.go
new file mode 100644
index 00000000000..42d48090e68
--- /dev/null
+++ b/pkg/storer/internal/transaction/transaction_js.go
@@ -0,0 +1,190 @@
+//go:build js
+// +build js
+
+package transaction
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/sharky"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstore"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "resenje.org/multex"
+)
+
+type store struct {
+ sharky *sharky.Store
+ bstore storage.BatchStore
+ chunkLocker *multex.Multex[any]
+}
+
+func NewStorage(sharky *sharky.Store, bstore storage.BatchStore) Storage {
+ return &store{sharky, bstore, multex.New[any]()}
+}
+
+type transaction struct {
+ start time.Time
+ batch storage.Batch
+ indexstore storage.IndexStore
+ chunkStore *chunkStoreTrx
+ sharkyTrx *sharkyTrx
+}
+
+type indexTrx struct {
+ store storage.Reader
+ batch storage.Batch
+}
+
+type sharkyTrx struct {
+ sharky *sharky.Store
+ writtenLocs []sharky.Location
+ releasedLocs []sharky.Location
+}
+
+type chunkStoreTrx struct {
+ indexStore storage.IndexStore
+ sharkyTrx *sharkyTrx
+ globalLocker *multex.Multex[any]
+ lockedAddrs map[string]struct{}
+ readOnly bool
+}
+
+// NewTransaction returns a new storage transaction.
+// Commit must be called to persist data to the disk.
+// The callback function must be the final call of the transaction whether or not any errors
+// were returned from the storage ops or commit. Safest option is to do a defer call immediately after
+// creating the transaction.
+// By design, it is best to not batch too many writes to a single transaction, including multiple chunks writes.
+// Calls made to the transaction are NOT thread-safe.
+func (s *store) NewTransaction(ctx context.Context) (Transaction, func()) {
+
+ b := s.bstore.Batch(ctx)
+
+ index := &indexTrx{store: s.bstore, batch: b}
+ sharky := &sharkyTrx{s.sharky, nil, nil}
+
+ t := &transaction{
+ start: time.Now(),
+ batch: b,
+ indexstore: index,
+ chunkStore: &chunkStoreTrx{index, sharky, s.chunkLocker, make(map[string]struct{}), false},
+ sharkyTrx: sharky,
+ }
+
+ return t, func() {
+ // for whatever reason, commit was not called
+ // release uncommitted but written sharky locations
+ // unlock the locked addresses
+ for _, l := range t.sharkyTrx.writtenLocs {
+ _ = t.sharkyTrx.sharky.Release(context.TODO(), l)
+ }
+ for addr := range t.chunkStore.lockedAddrs {
+ s.chunkLocker.Unlock(addr)
+ }
+ t.sharkyTrx.writtenLocs = nil
+ t.chunkStore.lockedAddrs = nil
+ }
+}
+
+func (s *store) IndexStore() storage.Reader {
+ return &indexTrx{s.bstore, nil}
+}
+
+func (s *store) ChunkStore() storage.ReadOnlyChunkStore {
+ indexStore := &indexTrx{s.bstore, nil}
+ sharyTrx := &sharkyTrx{s.sharky, nil, nil}
+ return &chunkStoreTrx{indexStore, sharyTrx, s.chunkLocker, nil, true}
+}
+
+func (c *chunkStoreTrx) Get(ctx context.Context, addr swarm.Address) (ch swarm.Chunk, err error) {
+ unlock := c.lock(addr)
+ defer unlock()
+ ch, err = chunkstore.Get(ctx, c.indexStore, c.sharkyTrx, addr)
+ return ch, err
+}
+func (c *chunkStoreTrx) Has(ctx context.Context, addr swarm.Address) (_ bool, err error) {
+
+ unlock := c.lock(addr)
+ defer unlock()
+ return chunkstore.Has(ctx, c.indexStore, addr)
+}
+func (c *chunkStoreTrx) Put(ctx context.Context, ch swarm.Chunk) (err error) {
+
+ unlock := c.lock(ch.Address())
+ defer unlock()
+ return chunkstore.Put(ctx, c.indexStore, c.sharkyTrx, ch)
+}
+func (c *chunkStoreTrx) Delete(ctx context.Context, addr swarm.Address) (err error) {
+
+ unlock := c.lock(addr)
+ defer unlock()
+ return chunkstore.Delete(ctx, c.indexStore, c.sharkyTrx, addr)
+}
+func (c *chunkStoreTrx) Iterate(ctx context.Context, fn storage.IterateChunkFn) (err error) {
+
+ return chunkstore.Iterate(ctx, c.indexStore, c.sharkyTrx, fn)
+}
+
+func (c *chunkStoreTrx) Replace(ctx context.Context, ch swarm.Chunk, emplace bool) (err error) {
+
+ unlock := c.lock(ch.Address())
+ defer unlock()
+ return chunkstore.Replace(ctx, c.indexStore, c.sharkyTrx, ch, emplace)
+}
+
+func (t *transaction) Commit() (err error) {
+
+ defer func() {
+ for addr := range t.chunkStore.lockedAddrs {
+ t.chunkStore.globalLocker.Unlock(addr)
+ }
+ t.chunkStore.lockedAddrs = nil
+ t.sharkyTrx.writtenLocs = nil
+ }()
+
+ err = t.batch.Commit()
+
+ if err != nil {
+ // since the batch commit has failed, we must release the written chunks from sharky.
+ for _, l := range t.sharkyTrx.writtenLocs {
+ if rerr := t.sharkyTrx.sharky.Release(context.TODO(), l); rerr != nil {
+ err = errors.Join(err, fmt.Errorf("failed releasing location during commit rollback %s: %w", l, rerr))
+ }
+ }
+ return err
+ }
+
+ // the batch commit was successful, we can now release the accumulated locations from sharky.
+ for _, l := range t.sharkyTrx.releasedLocs {
+
+ rerr := t.sharkyTrx.sharky.Release(context.TODO(), l)
+
+ if rerr != nil {
+ err = errors.Join(err, fmt.Errorf("failed releasing location after commit %s: %w", l, rerr))
+ }
+ }
+
+ return err
+}
+
+func (s *sharkyTrx) Read(ctx context.Context, loc sharky.Location, buf []byte) (err error) {
+ return s.sharky.Read(ctx, loc, buf)
+}
+
+func (s *sharkyTrx) Write(ctx context.Context, data []byte) (_ sharky.Location, err error) {
+ loc, err := s.sharky.Write(ctx, data)
+ if err != nil {
+ return sharky.Location{}, err
+ }
+
+ s.writtenLocs = append(s.writtenLocs, loc)
+ return loc, nil
+}
+
+func (s *indexTrx) Iterate(q storage.Query, f storage.IterateFn) (err error) {
+ return s.store.Iterate(q, f)
+}
diff --git a/pkg/storer/internal/transaction/transaction_shared.go b/pkg/storer/internal/transaction/transaction_shared.go
new file mode 100644
index 00000000000..36cfd4d878e
--- /dev/null
+++ b/pkg/storer/internal/transaction/transaction_shared.go
@@ -0,0 +1,113 @@
+// Copyright 2024 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package transaction provides transaction support for localstore operations.
+All writes to the localstore (both indexstore and chunkstore) must be made using a transaction.
+The transaction must be committed for the writes to be stored on the disk.
+
+The rules of the transaction is as follows:
+
+-sharky_write -> write to disk, keep sharky location in memory
+-sharky_release -> keep location in memory, do not release from the disk
+-indexstore write -> write to batch
+-on commit -> if batch_commit succeeds, release sharky_release locations from the disk
+ -> if batch_commit fails or is not called, release all sharky_write location from the disk, do nothing for sharky_release
+
+See the NewTransaction method for more details.
+*/
+
+package transaction
+
+import (
+ "context"
+ "errors"
+
+ "github.com/ethersphere/bee/v2/pkg/sharky"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+type Transaction interface {
+ Store
+ Commit() error
+}
+
+type Store interface {
+ ChunkStore() storage.ChunkStore
+ IndexStore() storage.IndexStore
+}
+
+type ReadOnlyStore interface {
+ IndexStore() storage.Reader
+ ChunkStore() storage.ReadOnlyChunkStore
+}
+
+type Storage interface {
+ ReadOnlyStore
+ NewTransaction(context.Context) (Transaction, func())
+ Run(context.Context, func(Store) error) error
+ Close() error
+}
+
+// Run creates a new transaction and gives the caller access to the transaction
+// in the form of a callback function. After the callback returns, the transaction
+// is committed to the disk. See the NewTransaction method for more details on how transactions operate internally.
+// By design, it is best to not batch too many writes to a single transaction, including multiple chunks writes.
+// Calls made to the transaction are NOT thread-safe.
+func (s *store) Run(ctx context.Context, f func(Store) error) error {
+ trx, done := s.NewTransaction(ctx)
+ defer done()
+
+ err := f(trx)
+ if err != nil {
+ return err
+ }
+ return trx.Commit()
+}
+
+func (s *store) Close() error {
+ return errors.Join(s.bstore.Close(), s.sharky.Close())
+}
+
+// IndexStore gives access to the index store of the transaction.
+// Note that no writes are persisted to the disk until the commit is called.
+func (t *transaction) IndexStore() storage.IndexStore {
+ return t.indexstore
+}
+
+// ChunkStore gives access to the chunkstore of the transaction.
+// Note that no writes are persisted to the disk until the commit is called.
+func (t *transaction) ChunkStore() storage.ChunkStore {
+ return t.chunkStore
+}
+
+func (c *chunkStoreTrx) lock(addr swarm.Address) func() {
+ // directly lock
+ if c.readOnly {
+ c.globalLocker.Lock(addr.ByteString())
+ return func() { c.globalLocker.Unlock(addr.ByteString()) }
+ }
+
+ // lock chunk only once in the same transaction
+ if _, ok := c.lockedAddrs[addr.ByteString()]; !ok {
+ c.globalLocker.Lock(addr.ByteString())
+ c.lockedAddrs[addr.ByteString()] = struct{}{}
+ }
+
+ return func() {} // unlocking the chunk will be done in the Commit()
+}
+
+func (s *indexTrx) Get(i storage.Item) error { return s.store.Get(i) }
+func (s *indexTrx) Has(k storage.Key) (bool, error) { return s.store.Has(k) }
+func (s *indexTrx) GetSize(k storage.Key) (int, error) { return s.store.GetSize(k) }
+
+func (s *indexTrx) Count(k storage.Key) (int, error) { return s.store.Count(k) }
+func (s *indexTrx) Put(i storage.Item) error { return s.batch.Put(i) }
+func (s *indexTrx) Delete(i storage.Item) error { return s.batch.Delete(i) }
+
+func (s *sharkyTrx) Release(ctx context.Context, loc sharky.Location) error {
+ s.releasedLocs = append(s.releasedLocs, loc)
+ return nil
+}
diff --git a/pkg/storer/metrics.go b/pkg/storer/metrics.go
index 7b9295ee9f7..3cf2aca2d24 100644
--- a/pkg/storer/metrics.go
+++ b/pkg/storer/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2023 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/storer/netstore.go b/pkg/storer/netstore.go
index 1a192114920..ce39ce1c07a 100644
--- a/pkg/storer/netstore.go
+++ b/pkg/storer/netstore.go
@@ -1,6 +1,5 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package storer
@@ -10,12 +9,13 @@ import (
"github.com/ethersphere/bee/v2/pkg/pusher"
"github.com/ethersphere/bee/v2/pkg/pushsync"
- "github.com/ethersphere/bee/v2/pkg/storage"
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/swarm"
"github.com/ethersphere/bee/v2/pkg/topology"
"github.com/opentracing/opentracing-go/ext"
- olog "github.com/opentracing/opentracing-go/log"
"golang.org/x/sync/errgroup"
+
+ olog "github.com/opentracing/opentracing-go/log"
)
// DirectUpload is the implementation of the NetStore.DirectUpload method.
@@ -133,8 +133,3 @@ func (db *DB) Download(cache bool) storage.Getter {
"netstore",
}
}
-
-// PusherFeed is the implementation of the NetStore.PusherFeed method.
-func (db *DB) PusherFeed() <-chan *pusher.Op {
- return db.pusherFeed
-}
diff --git a/pkg/storer/netstore_js.go b/pkg/storer/netstore_js.go
new file mode 100644
index 00000000000..fc646751888
--- /dev/null
+++ b/pkg/storer/netstore_js.go
@@ -0,0 +1,127 @@
+//go:build js
+// +build js
+
+package storer
+
+import (
+ "context"
+ "errors"
+
+ "github.com/ethersphere/bee/v2/pkg/pusher"
+ "github.com/ethersphere/bee/v2/pkg/pushsync"
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/opentracing/opentracing-go/ext"
+ "golang.org/x/sync/errgroup"
+
+ olog "github.com/opentracing/opentracing-go/log"
+)
+
+// DirectUpload is the implementation of the NetStore.DirectUpload method.
+func (db *DB) DirectUpload() PutterSession {
+ // egCtx will allow early exit of Put operations if we have
+ // already encountered error.
+ eg, egCtx := errgroup.WithContext(context.Background())
+
+ return &putterSession{
+ Putter: storage.PutterFunc(func(ctx context.Context, ch swarm.Chunk) error {
+ db.directUploadLimiter <- struct{}{}
+ eg.Go(func() (err error) {
+ defer func() { <-db.directUploadLimiter }()
+
+ span, logger, ctx := db.tracer.FollowSpanFromContext(ctx, "put-direct-upload", db.logger)
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ }
+ span.Finish()
+ }()
+
+ for {
+ op := &pusher.Op{Chunk: ch, Err: make(chan error, 1), Direct: true, Span: span}
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-egCtx.Done():
+ return egCtx.Err()
+ case <-db.quit:
+ return ErrDBQuit
+ case db.pusherFeed <- op:
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-egCtx.Done():
+ return egCtx.Err()
+ case <-db.quit:
+ return ErrDBQuit
+ case err := <-op.Err:
+ if errors.Is(err, pushsync.ErrShallowReceipt) {
+ logger.Debug("direct upload: shallow receipt received, retrying", "chunk", ch.Address())
+ } else if errors.Is(err, topology.ErrNotFound) {
+ logger.Debug("direct upload: no peers available, retrying", "chunk", ch.Address())
+ } else {
+ return err
+ }
+ }
+ }
+ }
+ })
+ return nil
+ }),
+ done: func(swarm.Address) error { return eg.Wait() },
+ cleanup: func() error { _ = eg.Wait(); return nil },
+ }
+}
+
+// Download is the implementation of the NetStore.Download method.
+func (db *DB) Download(cache bool) storage.Getter {
+ return storage.GetterFunc(func(ctx context.Context, address swarm.Address) (ch swarm.Chunk, err error) {
+
+ span, logger, ctx := db.tracer.StartSpanFromContext(ctx, "get-chunk", db.logger)
+ defer func() {
+ if err != nil {
+ ext.LogError(span, err)
+ } else {
+ span.LogFields(olog.Bool("success", true))
+ }
+ span.Finish()
+ }()
+
+ ch, err = db.Lookup().Get(ctx, address)
+ switch {
+ case err == nil:
+ span.LogFields(olog.String("step", "chunk found locally"))
+ return ch, nil
+ case errors.Is(err, storage.ErrNotFound):
+ span.LogFields(olog.String("step", "retrieve chunk from network"))
+ if db.retrieval != nil {
+ // if chunk is not found locally, retrieve it from the network
+ ch, err = db.retrieval.RetrieveChunk(ctx, address, swarm.ZeroAddress)
+ if err == nil && cache {
+ select {
+ case <-ctx.Done():
+ case <-db.quit:
+ case db.cacheLimiter.sem <- struct{}{}:
+ db.cacheLimiter.wg.Add(1)
+ go func() {
+ defer func() {
+ <-db.cacheLimiter.sem
+ db.cacheLimiter.wg.Done()
+ }()
+
+ err := db.Cache().Put(db.cacheLimiter.ctx, ch)
+ if err != nil {
+ logger.Debug("putting chunk to cache failed", "error", err, "chunk_address", ch.Address())
+ }
+ }()
+ }
+ }
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return ch, nil
+ })
+}
diff --git a/pkg/storer/netstore_shared.go b/pkg/storer/netstore_shared.go
new file mode 100644
index 00000000000..4cbf54c5f9e
--- /dev/null
+++ b/pkg/storer/netstore_shared.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storer
+
+import (
+ "github.com/ethersphere/bee/v2/pkg/pusher"
+)
+
+// PusherFeed is the implementation of the NetStore.PusherFeed method.
+func (db *DB) PusherFeed() <-chan *pusher.Op {
+ return db.pusherFeed
+}
diff --git a/pkg/storer/netstore_test.go b/pkg/storer/netstore_test.go
index 9c073c09a20..c32f3f9252b 100644
--- a/pkg/storer/netstore_test.go
+++ b/pkg/storer/netstore_test.go
@@ -8,7 +8,6 @@ import (
"context"
"errors"
"fmt"
- "slices"
"testing"
"time"
@@ -53,7 +52,13 @@ func testNetStore(t *testing.T, newStorer func(r retrieval.Interface) (*storer.D
for {
select {
case op := <-lstore.PusherFeed():
- found := slices.ContainsFunc(chunks, op.Chunk.Equal)
+ found := false
+ for _, ch := range chunks {
+ if op.Chunk.Equal(ch) {
+ found = true
+ break
+ }
+ }
if !found {
op.Err <- fmt.Errorf("incorrect chunk for push: have %s", op.Chunk.Address())
continue
@@ -105,7 +110,13 @@ func testNetStore(t *testing.T, newStorer func(r retrieval.Interface) (*storer.D
for {
select {
case op := <-lstore.PusherFeed():
- found := slices.ContainsFunc(chunks, op.Chunk.Equal)
+ found := false
+ for _, ch := range chunks {
+ if op.Chunk.Equal(ch) {
+ found = true
+ break
+ }
+ }
if !found {
op.Err <- fmt.Errorf("incorrect chunk for push: have %s", op.Chunk.Address())
continue
diff --git a/pkg/storer/pinstore.go b/pkg/storer/pinstore.go
index c57580b71e6..101469c2bc9 100644
--- a/pkg/storer/pinstore.go
+++ b/pkg/storer/pinstore.go
@@ -1,6 +1,5 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package storer
@@ -9,7 +8,7 @@ import (
"fmt"
"time"
- storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storer/internal"
pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
"github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
@@ -109,7 +108,3 @@ func (db *DB) HasPin(root swarm.Address) (has bool, err error) {
return pinstore.HasPin(db.storage.IndexStore(), root)
}
-
-func (db *DB) IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error {
- return pinstore.IterateCollection(db.storage.IndexStore(), root, iterateFn)
-}
diff --git a/pkg/storer/pinstore_js.go b/pkg/storer/pinstore_js.go
new file mode 100644
index 00000000000..cae48454088
--- /dev/null
+++ b/pkg/storer/pinstore_js.go
@@ -0,0 +1,79 @@
+//go:build js
+// +build js
+
+package storer
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal"
+ pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// NewCollection is the implementation of the PinStore.NewCollection method.
+func (db *DB) NewCollection(ctx context.Context) (PutterSession, error) {
+ var (
+ pinningPutter internal.PutterCloserWithReference
+ err error
+ )
+ err = db.storage.Run(ctx, func(store transaction.Store) error {
+ pinningPutter, err = pinstore.NewCollection(store.IndexStore())
+ if err != nil {
+ return fmt.Errorf("pinstore.NewCollection: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &putterSession{
+ Putter: storage.PutterFunc(
+ func(ctx context.Context, chunk swarm.Chunk) error {
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+ return db.storage.Run(ctx, func(s transaction.Store) error {
+ return pinningPutter.Put(ctx, s, chunk)
+ })
+ },
+ ),
+
+ done: func(address swarm.Address) error {
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+ return db.storage.Run(ctx, func(s transaction.Store) error {
+ return pinningPutter.Close(s.IndexStore(), address)
+ })
+ },
+ cleanup: func() error {
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+ return pinningPutter.Cleanup(db.storage)
+ },
+ }, nil
+}
+
+// DeletePin is the implementation of the PinStore.DeletePin method.
+func (db *DB) DeletePin(ctx context.Context, root swarm.Address) (err error) {
+
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+
+ return pinstore.DeletePin(ctx, db.storage, root)
+}
+
+// Pins is the implementation of the PinStore.Pins method.
+func (db *DB) Pins() (address []swarm.Address, err error) {
+
+ return pinstore.Pins(db.storage.IndexStore())
+}
+
+// HasPin is the implementation of the PinStore.HasPin method.
+func (db *DB) HasPin(root swarm.Address) (has bool, err error) {
+
+ return pinstore.HasPin(db.storage.IndexStore(), root)
+}
diff --git a/pkg/storer/pinstore_shared.go b/pkg/storer/pinstore_shared.go
new file mode 100644
index 00000000000..b91cf00be2a
--- /dev/null
+++ b/pkg/storer/pinstore_shared.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storer
+
+import (
+ pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+func (db *DB) IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error {
+ return pinstore.IterateCollection(db.storage.IndexStore(), root, iterateFn)
+}
diff --git a/pkg/storer/recover.go b/pkg/storer/recover.go
index 07d300e5d46..55ee59619d4 100644
--- a/pkg/storer/recover.go
+++ b/pkg/storer/recover.go
@@ -8,7 +8,6 @@ import (
"context"
"errors"
"io/fs"
- "os"
"path/filepath"
"time"
@@ -16,6 +15,8 @@ import (
storage "github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstore"
"github.com/ethersphere/bee/v2/pkg/swarm"
+
+ sharedFs "github.com/ethersphere/bee/v2/pkg/fs"
)
const (
@@ -29,10 +30,10 @@ func sharkyRecovery(ctx context.Context, sharkyBasePath string, store storage.St
logger := opts.Logger.WithName(loggerName).Register()
dirtyFilePath := filepath.Join(sharkyBasePath, sharkyDirtyFileName)
- closer := func() error { return os.Remove(dirtyFilePath) }
+ closer := func() error { return sharedFs.Remove(dirtyFilePath) }
- if _, err := os.Stat(dirtyFilePath); errors.Is(err, fs.ErrNotExist) {
- return closer, os.WriteFile(dirtyFilePath, []byte{}, 0644)
+ if _, err := sharedFs.Stat(dirtyFilePath); errors.Is(err, fs.ErrNotExist) {
+ return closer, sharedFs.WriteFile(dirtyFilePath, []byte{}, 0644)
}
logger.Info("localstore sharky .DIRTY file exists: starting recovery due to previous dirty exit")
diff --git a/pkg/storer/reserve.go b/pkg/storer/reserve.go
index 329e9461f50..f79479186a9 100644
--- a/pkg/storer/reserve.go
+++ b/pkg/storer/reserve.go
@@ -1,6 +1,5 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package storer
@@ -9,217 +8,14 @@ import (
"encoding/hex"
"errors"
"fmt"
- "math"
- "math/bits"
- "slices"
- "sync"
- "sync/atomic"
"time"
"github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/storage"
- "github.com/ethersphere/bee/v2/pkg/storage/storageutil"
"github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
- "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
"github.com/ethersphere/bee/v2/pkg/swarm"
)
-const (
- reserveOverCapacity = "reserveOverCapacity"
- reserveUnreserved = "reserveUnreserved"
- batchExpiry = "batchExpiry"
- batchExpiryDone = "batchExpiryDone"
-)
-
-var (
- errMaxRadius = errors.New("max radius reached")
- reserveSizeWithinRadius atomic.Uint64
-)
-
-type Syncer interface {
- // Number of active historical syncing jobs.
- SyncRate() float64
- Start(context.Context)
-}
-
-func threshold(capacity int) int { return capacity * 5 / 10 }
-
-func (db *DB) startReserveWorkers(
- ctx context.Context,
- radius func() (uint8, error),
-) {
- ctx, cancel := context.WithCancel(ctx)
- go func() {
- <-db.quit
- cancel()
- }()
-
- db.inFlight.Add(1)
- go db.reserveWorker(ctx)
-
- sub, unsubscribe := db.reserveOptions.startupStabilizer.Subscribe()
- defer unsubscribe()
-
- select {
- case <-sub:
- db.logger.Debug("node warmup check completed")
- case <-db.quit:
- return
- }
-
- r, err := radius()
- if err != nil {
- db.logger.Error(err, "reserve worker initial radius")
- return // node shutdown
- }
-
- err = db.reserve.SetRadius(r)
- if err != nil {
- db.logger.Error(err, "reserve set radius")
- } else {
- db.metrics.StorageRadius.Set(float64(r))
- }
-
- // syncing can now begin now that the reserver worker is running
- db.syncer.Start(ctx)
-}
-
-func (db *DB) countWithinRadius(ctx context.Context) (int, error) {
- count := 0
- missing := 0
- radius := db.StorageRadius()
-
- evictBatches := make(map[string]bool)
- err := db.reserve.IterateChunksItems(0, func(ci *reserve.ChunkBinItem) (bool, error) {
- if ci.Bin >= radius {
- count++
- }
-
- if exists, err := db.batchstore.Exists(ci.BatchID); err == nil && !exists {
- missing++
- evictBatches[string(ci.BatchID)] = true
- }
- return false, nil
- })
- if err != nil {
- return 0, err
- }
-
- for batch := range evictBatches {
- db.logger.Debug("reserve: invalid batch", "batch_id", hex.EncodeToString([]byte(batch)))
- err = errors.Join(err, db.EvictBatch(ctx, []byte(batch)))
- }
-
- db.metrics.ReserveSizeWithinRadius.Set(float64(count))
- db.metrics.ReserveMissingBatch.Set(float64(missing))
- reserveSizeWithinRadius.Store(uint64(count))
-
- return count, err
-}
-
-func (db *DB) reserveWorker(ctx context.Context) {
- defer db.inFlight.Done()
-
- batchExpiryTrigger, batchExpiryUnsub := db.events.Subscribe(batchExpiry)
- defer batchExpiryUnsub()
-
- overCapTrigger, overCapUnsub := db.events.Subscribe(reserveOverCapacity)
- defer overCapUnsub()
-
- thresholdTicker := time.NewTicker(db.reserveOptions.wakeupDuration)
- defer thresholdTicker.Stop()
-
- _, _ = db.countWithinRadius(ctx)
-
- if !db.reserve.IsWithinCapacity() {
- db.events.Trigger(reserveOverCapacity)
- }
-
- for {
- select {
- case <-ctx.Done():
- return
- case <-batchExpiryTrigger:
-
- err := db.evictExpiredBatches(ctx)
- if err != nil {
- db.logger.Warning("reserve worker evict expired batches", "error", err)
- }
-
- db.events.Trigger(batchExpiryDone)
-
- if !db.reserve.IsWithinCapacity() {
- db.events.Trigger(reserveOverCapacity)
- }
-
- case <-overCapTrigger:
-
- db.metrics.OverCapTriggerCount.Inc()
- if err := db.unreserve(ctx); err != nil {
- db.logger.Warning("reserve worker unreserve", "error", err)
- }
-
- case <-thresholdTicker.C:
-
- radius := db.reserve.Radius()
- count, err := db.countWithinRadius(ctx)
- if err != nil {
- db.logger.Warning("reserve worker count within radius", "error", err)
- continue
- }
-
- if count < threshold(db.reserve.Capacity()) && db.syncer.SyncRate() == 0 && radius > db.reserveOptions.minimumRadius {
- radius--
- if err := db.reserve.SetRadius(radius); err != nil {
- db.logger.Error(err, "reserve set radius")
- }
- db.metrics.StorageRadius.Set(float64(radius))
- db.logger.Info("reserve radius decrease", "radius", radius)
- }
- }
- }
-}
-
-func (db *DB) evictExpiredBatches(ctx context.Context) error {
- batches, err := db.getExpiredBatches()
- if err != nil {
- return err
- }
-
- for _, batchID := range batches {
- evicted, err := db.evictBatch(ctx, batchID, math.MaxInt, swarm.MaxBins)
- if err != nil {
- return err
- }
- if evicted > 0 {
- db.logger.Debug("evicted expired batch", "batch_id", hex.EncodeToString(batchID), "total_evicted", evicted)
- }
- err = db.storage.Run(ctx, func(st transaction.Store) error {
- return st.IndexStore().Delete(&expiredBatchItem{BatchID: batchID})
- })
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (db *DB) getExpiredBatches() ([][]byte, error) {
- var batchesToEvict [][]byte
- err := db.storage.IndexStore().Iterate(storage.Query{
- Factory: func() storage.Item { return new(expiredBatchItem) },
- ItemProperty: storage.QueryItemID,
- }, func(result storage.Result) (bool, error) {
- batchesToEvict = append(batchesToEvict, []byte(result.ID))
- return false, nil
- })
- if err != nil {
- return nil, err
- }
- return batchesToEvict, nil
-}
-
func (db *DB) evictBatch(
ctx context.Context,
batchID []byte,
@@ -252,24 +48,6 @@ func (db *DB) evictBatch(
return db.reserve.EvictBatchBin(ctx, batchID, evictCount, upToBin)
}
-// EvictBatch evicts all chunks belonging to a batch from the reserve.
-func (db *DB) EvictBatch(ctx context.Context, batchID []byte) error {
- if db.reserve == nil {
- // if reserve is not configured, do nothing
- return nil
- }
-
- err := db.storage.Run(ctx, func(tx transaction.Store) error {
- return tx.IndexStore().Put(&expiredBatchItem{BatchID: batchID})
- })
- if err != nil {
- return fmt.Errorf("save expired batch: %w", err)
- }
-
- db.events.Trigger(batchExpiry)
- return nil
-}
-
func (db *DB) ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (ch swarm.Chunk, err error) {
dur := captureDuration(time.Now())
defer func() {
@@ -400,250 +178,138 @@ func (db *DB) unreserve(ctx context.Context) (err error) {
return errMaxRadius
}
-// ReserveLastBinIDs returns all of the highest binIDs from all the bins in the reserve and the epoch time of the reserve.
-func (db *DB) ReserveLastBinIDs() ([]uint64, uint64, error) {
- if db.reserve == nil {
- return nil, 0, nil
- }
-
- return db.reserve.LastBinIDs()
-}
+func (db *DB) startReserveWorkers(
+ ctx context.Context,
+ radius func() (uint8, error),
+) {
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ <-db.quit
+ cancel()
+ }()
-func (db *DB) ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error {
- return db.reserve.IterateChunks(0, cb)
-}
+ db.inFlight.Add(1)
+ go db.reserveWorker(ctx)
-func (db *DB) StorageRadius() uint8 {
- if db.reserve == nil {
- return 0
- }
- return db.reserve.Radius()
-}
+ sub, unsubscribe := db.reserveOptions.startupStabilizer.Subscribe()
+ defer unsubscribe()
-func (db *DB) CommittedDepth() uint8 {
- if db.reserve == nil {
- return 0
+ select {
+ case <-sub:
+ db.logger.Debug("node warmup check completed")
+ case <-db.quit:
+ return
}
- return uint8(db.reserveOptions.capacityDoubling) + db.reserve.Radius()
-}
-
-func (db *DB) ReserveSize() int {
- if db.reserve == nil {
- return 0
+ r, err := radius()
+ if err != nil {
+ db.logger.Error(err, "reserve worker initial radius")
+ return // node shutdown
}
- return db.reserve.Size()
-}
-
-func (db *DB) ReserveSizeWithinRadius() uint64 {
- return reserveSizeWithinRadius.Load()
-}
-func (db *DB) IsWithinStorageRadius(addr swarm.Address) bool {
- if db.reserve == nil {
- return false
+ err = db.reserve.SetRadius(r)
+ if err != nil {
+ db.logger.Error(err, "reserve set radius")
+ } else {
+ db.metrics.StorageRadius.Set(float64(r))
}
- return swarm.Proximity(addr.Bytes(), db.baseAddr.Bytes()) >= db.reserve.Radius()
-}
-
-// BinC is the result returned from the SubscribeBin channel that contains the chunk address and the binID
-type BinC struct {
- Address swarm.Address
- BinID uint64
- BatchID []byte
- StampHash []byte
-}
-
-// SubscribeBin returns a channel that feeds all the chunks in the reserve from a certain bin between a start and end binIDs.
-func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) {
- out := make(chan *BinC)
- done := make(chan struct{})
- errC := make(chan error, 1)
-
- db.inFlight.Add(1)
- go func() {
- defer db.inFlight.Done()
-
- trigger, unsub := db.reserveBinEvents.Subscribe(string(bin))
- defer unsub()
- defer close(out)
-
- for {
-
- err := db.reserve.IterateBin(bin, start, func(a swarm.Address, binID uint64, batchID, stampHash []byte) (bool, error) {
- select {
- case out <- &BinC{Address: a, BinID: binID, BatchID: batchID, StampHash: stampHash}:
- start = binID + 1
- case <-done:
- return true, nil
- case <-db.quit:
- return false, ErrDBQuit
- case <-ctx.Done():
- return false, ctx.Err()
- }
-
- return false, nil
- })
- if err != nil {
- errC <- err
- return
- }
-
- select {
- case <-trigger:
- case <-done:
- return
- case <-db.quit:
- errC <- ErrDBQuit
- return
- case <-ctx.Done():
- errC <- err
- return
- }
- }
- }()
-
- var doneOnce sync.Once
- return out, func() {
- doneOnce.Do(func() { close(done) })
- }, errC
-}
-type NeighborhoodStat struct {
- Neighborhood swarm.Neighborhood
- ReserveSizeWithinRadius int
- Proximity uint8
+ // syncing can now begin now that the reserver worker is running
+ db.syncer.Start(ctx)
}
-func (db *DB) NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error) {
+func (db *DB) countWithinRadius(ctx context.Context) (int, error) {
+ count := 0
+ missing := 0
radius := db.StorageRadius()
- committedDepth := db.CommittedDepth()
-
- prefixes := neighborhoodPrefixes(db.baseAddr, int(radius), db.reserveOptions.capacityDoubling)
- neighs := make([]*NeighborhoodStat, len(prefixes))
- for i, n := range prefixes {
- neighs[i] = &NeighborhoodStat{
- Neighborhood: swarm.NewNeighborhood(n, committedDepth),
- ReserveSizeWithinRadius: 0,
- Proximity: min(committedDepth, swarm.Proximity(n.Bytes(), db.baseAddr.Bytes())),
+
+ evictBatches := make(map[string]bool)
+ err := db.reserve.IterateChunksItems(0, func(ci *reserve.ChunkBinItem) (bool, error) {
+ if ci.Bin >= radius {
+ count++
}
- }
- err := db.reserve.IterateChunksItems(0, func(ch *reserve.ChunkBinItem) (bool, error) {
- for _, n := range neighs {
- if swarm.Proximity(ch.Address.Bytes(), n.Neighborhood.Bytes()) >= committedDepth {
- n.ReserveSizeWithinRadius++
- break
- }
+ if exists, err := db.batchstore.Exists(ci.BatchID); err == nil && !exists {
+ missing++
+ evictBatches[string(ci.BatchID)] = true
}
return false, nil
})
if err != nil {
- return nil, err
+ return 0, err
}
- return neighs, err
-}
-
-func neighborhoodPrefixes(base swarm.Address, radius int, suffixLength int) []swarm.Address {
- bitCombinationsCount := int(math.Pow(2, float64(suffixLength)))
- bitSuffixes := make([]uint8, bitCombinationsCount)
-
- for i := 0; i < bitCombinationsCount; i++ {
- bitSuffixes[i] = uint8(i)
+ for batch := range evictBatches {
+ db.logger.Debug("reserve: invalid batch", "batch_id", hex.EncodeToString([]byte(batch)))
+ err = errors.Join(err, db.EvictBatch(ctx, []byte(batch)))
}
- binPrefixes := make([]swarm.Address, bitCombinationsCount)
+ db.metrics.ReserveSizeWithinRadius.Set(float64(count))
+ db.metrics.ReserveMissingBatch.Set(float64(missing))
+ reserveSizeWithinRadius.Store(uint64(count))
- // copy base address
- for i := range binPrefixes {
- binPrefixes[i] = base.Clone()
- }
+ return count, err
+}
- for j := range binPrefixes {
- pseudoAddrBytes := binPrefixes[j].Bytes()
+func (db *DB) reserveWorker(ctx context.Context) {
+ defer db.inFlight.Done()
- // set pseudo suffix
- bitSuffixPos := suffixLength - 1
- for l := radius + 0; l < radius+suffixLength+1; l++ {
- index, pos := l/8, l%8
+ batchExpiryTrigger, batchExpiryUnsub := db.events.Subscribe(batchExpiry)
+ defer batchExpiryUnsub()
- if hasBit(bitSuffixes[j], uint8(bitSuffixPos)) {
- pseudoAddrBytes[index] = bits.Reverse8(setBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
- } else {
- pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
- }
+ overCapTrigger, overCapUnsub := db.events.Subscribe(reserveOverCapacity)
+ defer overCapUnsub()
- bitSuffixPos--
- }
+ thresholdTicker := time.NewTicker(db.reserveOptions.wakeupDuration)
+ defer thresholdTicker.Stop()
- // clear rest of the bits
- for l := radius + suffixLength + 1; l < len(pseudoAddrBytes)*8; l++ {
- index, pos := l/8, l%8
- pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
- }
- }
+ _, _ = db.countWithinRadius(ctx)
- return binPrefixes
-}
+ if !db.reserve.IsWithinCapacity() {
+ db.events.Trigger(reserveOverCapacity)
+ }
-// Clears the bit at pos in n.
-func clearBit(n, pos uint8) uint8 {
- mask := ^(uint8(1) << pos)
- return n & mask
-}
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-batchExpiryTrigger:
-// Sets the bit at pos in the integer n.
-func setBit(n, pos uint8) uint8 {
- return n | 1< 0
-}
+ db.events.Trigger(batchExpiryDone)
-// expiredBatchItem is a storage.Item implementation for expired batches.
-type expiredBatchItem struct {
- BatchID []byte
-}
+ if !db.reserve.IsWithinCapacity() {
+ db.events.Trigger(reserveOverCapacity)
+ }
-// ID implements storage.Item.
-func (e *expiredBatchItem) ID() string {
- return string(e.BatchID)
-}
+ case <-overCapTrigger:
-// Namespace implements storage.Item.
-func (e *expiredBatchItem) Namespace() string {
- return "expiredBatchItem"
-}
+ db.metrics.OverCapTriggerCount.Inc()
+ if err := db.unreserve(ctx); err != nil {
+ db.logger.Warning("reserve worker unreserve", "error", err)
+ }
-// Marshal implements storage.Item.
-// It is a no-op as expiredBatchItem is not serialized.
-func (e *expiredBatchItem) Marshal() ([]byte, error) {
- return nil, nil
-}
+ case <-thresholdTicker.C:
-// Unmarshal implements storage.Item.
-// It is a no-op as expiredBatchItem is not serialized.
-func (e *expiredBatchItem) Unmarshal(_ []byte) error {
- return nil
-}
+ radius := db.reserve.Radius()
+ count, err := db.countWithinRadius(ctx)
+ if err != nil {
+ db.logger.Warning("reserve worker count within radius", "error", err)
+ continue
+ }
-// Clone implements storage.Item.
-func (e *expiredBatchItem) Clone() storage.Item {
- if e == nil {
- return nil
- }
- return &expiredBatchItem{
- BatchID: slices.Clone(e.BatchID),
+ if count < threshold(db.reserve.Capacity()) && db.syncer.SyncRate() == 0 && radius > db.reserveOptions.minimumRadius {
+ radius--
+ if err := db.reserve.SetRadius(radius); err != nil {
+ db.logger.Error(err, "reserve set radius")
+ }
+ db.metrics.StorageRadius.Set(float64(radius))
+ db.logger.Info("reserve radius decrease", "radius", radius)
+ }
+ }
}
}
-
-// String implements storage.Item.
-func (e *expiredBatchItem) String() string {
- return storageutil.JoinFields(e.Namespace(), e.ID())
-}
-
-func (db *DB) po(addr swarm.Address) uint8 {
- return swarm.Proximity(db.baseAddr.Bytes(), addr.Bytes())
-}
diff --git a/pkg/storer/reserve_js.go b/pkg/storer/reserve_js.go
new file mode 100644
index 00000000000..ca0f5783ab2
--- /dev/null
+++ b/pkg/storer/reserve_js.go
@@ -0,0 +1,273 @@
+//go:build js
+// +build js
+
+package storer
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+func (db *DB) evictBatch(
+ ctx context.Context,
+ batchID []byte,
+ evictCount int,
+ upToBin uint8,
+) (evicted int, err error) {
+ defer func() {
+ db.logger.Debug(
+ "reserve eviction",
+ "uptoBin", upToBin,
+ "evicted", evicted,
+ "batchID", hex.EncodeToString(batchID),
+ "new_size", db.reserve.Size(),
+ )
+ }()
+
+ return db.reserve.EvictBatchBin(ctx, batchID, evictCount, upToBin)
+}
+
+func (db *DB) ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (ch swarm.Chunk, err error) {
+
+ defer func() {
+ if err != nil && !errors.Is(err, storage.ErrNotFound) {
+ db.logger.Debug("reserve get error", "error", err)
+ }
+ }()
+
+ return db.reserve.Get(ctx, addr, batchID, stampHash)
+}
+
+func (db *DB) ReserveHas(addr swarm.Address, batchID []byte, stampHash []byte) (has bool, err error) {
+
+ defer func() {
+ if err != nil {
+ db.logger.Debug("reserve has error", "error", err)
+ }
+ }()
+
+ return db.reserve.Has(addr, batchID, stampHash)
+}
+
+// ReservePutter returns a Putter for inserting chunks into the reserve.
+func (db *DB) ReservePutter() storage.Putter {
+ return storage.PutterFunc(
+ func(ctx context.Context, chunk swarm.Chunk) error {
+ err := db.reserve.Put(ctx, chunk)
+ if err != nil {
+ db.logger.Debug("reserve put error", "error", err)
+ return fmt.Errorf("reserve putter.Put: %w", err)
+ }
+ db.reserveBinEvents.Trigger(string(db.po(chunk.Address())))
+ if !db.reserve.IsWithinCapacity() {
+ db.events.Trigger(reserveOverCapacity)
+ }
+ return nil
+ },
+ )
+}
+
+func (db *DB) unreserve(ctx context.Context) (err error) {
+ radius := db.reserve.Radius()
+ defer db.events.Trigger(reserveUnreserved)
+
+ target := db.reserve.EvictionTarget()
+ if target <= 0 {
+ return nil
+ }
+
+ db.logger.Info("unreserve start", "target", target, "radius", radius)
+
+ batchExpiry, unsub := db.events.Subscribe(batchExpiry)
+ defer unsub()
+
+ totalEvicted := 0
+
+ var batches [][]byte
+ err = db.batchstore.Iterate(func(b *postage.Batch) (bool, error) {
+ batches = append(batches, b.ID)
+ return false, nil
+ })
+ if err != nil {
+ return err
+ }
+
+ for radius < swarm.MaxBins {
+
+ for _, b := range batches {
+
+ select {
+ case <-batchExpiry:
+ db.logger.Debug("stopping unreserve, received batch expiration signal")
+ return nil
+ default:
+ }
+
+ evict := target - totalEvicted
+ if evict < int(db.reserveOptions.minEvictCount) { // evict at least a min count
+ evict = int(db.reserveOptions.minEvictCount)
+ }
+
+ binEvicted, err := db.evictBatch(ctx, b, evict, radius)
+ // eviction happens in batches, so we need to keep track of the total
+ // number of chunks evicted even if there was an error
+ totalEvicted += binEvicted
+
+ // we can only get error here for critical cases, for eg. batch commit
+ // error, which is not recoverable
+ if err != nil {
+ return err
+ }
+
+ if totalEvicted >= target {
+ db.logger.Info("unreserve finished", "evicted", totalEvicted, "radius", radius)
+ return nil
+ }
+ }
+
+ radius++
+ db.logger.Info("reserve radius increase", "radius", radius)
+ _ = db.reserve.SetRadius(radius)
+ }
+
+ return errMaxRadius
+}
+
+func (db *DB) startReserveWorkers(
+ ctx context.Context,
+ radius func() (uint8, error),
+) {
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ <-db.quit
+ cancel()
+ }()
+
+ db.inFlight.Add(1)
+ go db.reserveWorker(ctx)
+
+ sub, unsubscribe := db.reserveOptions.startupStabilizer.Subscribe()
+ defer unsubscribe()
+
+ select {
+ case <-sub:
+ db.logger.Debug("node warmup check completed")
+ case <-db.quit:
+ return
+ }
+
+ r, err := radius()
+ if err != nil {
+ db.logger.Error(err, "reserve worker initial radius")
+ return // node shutdown
+ }
+
+ err = db.reserve.SetRadius(r)
+ if err != nil {
+ db.logger.Error(err, "reserve set radius")
+ }
+
+ // syncing can now begin now that the reserver worker is running
+ db.syncer.Start(ctx)
+}
+
+func (db *DB) countWithinRadius(ctx context.Context) (int, error) {
+ count := 0
+ missing := 0
+ radius := db.StorageRadius()
+
+ evictBatches := make(map[string]bool)
+ err := db.reserve.IterateChunksItems(0, func(ci *reserve.ChunkBinItem) (bool, error) {
+ if ci.Bin >= radius {
+ count++
+ }
+
+ if exists, err := db.batchstore.Exists(ci.BatchID); err == nil && !exists {
+ missing++
+ evictBatches[string(ci.BatchID)] = true
+ }
+ return false, nil
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ for batch := range evictBatches {
+ db.logger.Debug("reserve: invalid batch", "batch_id", hex.EncodeToString([]byte(batch)))
+ err = errors.Join(err, db.EvictBatch(ctx, []byte(batch)))
+ }
+
+ reserveSizeWithinRadius.Store(uint64(count))
+
+ return count, err
+}
+
+func (db *DB) reserveWorker(ctx context.Context) {
+ defer db.inFlight.Done()
+
+ batchExpiryTrigger, batchExpiryUnsub := db.events.Subscribe(batchExpiry)
+ defer batchExpiryUnsub()
+
+ overCapTrigger, overCapUnsub := db.events.Subscribe(reserveOverCapacity)
+ defer overCapUnsub()
+
+ thresholdTicker := time.NewTicker(db.reserveOptions.wakeupDuration)
+ defer thresholdTicker.Stop()
+
+ _, _ = db.countWithinRadius(ctx)
+
+ if !db.reserve.IsWithinCapacity() {
+ db.events.Trigger(reserveOverCapacity)
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-batchExpiryTrigger:
+
+ err := db.evictExpiredBatches(ctx)
+ if err != nil {
+ db.logger.Warning("reserve worker evict expired batches", "error", err)
+ }
+
+ db.events.Trigger(batchExpiryDone)
+
+ if !db.reserve.IsWithinCapacity() {
+ db.events.Trigger(reserveOverCapacity)
+ }
+
+ case <-overCapTrigger:
+
+ if err := db.unreserve(ctx); err != nil {
+ db.logger.Warning("reserve worker unreserve", "error", err)
+ }
+
+ case <-thresholdTicker.C:
+
+ radius := db.reserve.Radius()
+ count, err := db.countWithinRadius(ctx)
+ if err != nil {
+ db.logger.Warning("reserve worker count within radius", "error", err)
+ continue
+ }
+
+ if count < threshold(db.reserve.Capacity()) && db.syncer.SyncRate() == 0 && radius > db.reserveOptions.minimumRadius {
+ radius--
+ if err := db.reserve.SetRadius(radius); err != nil {
+ db.logger.Error(err, "reserve set radius")
+ }
+
+ db.logger.Info("reserve radius decrease", "radius", radius)
+ }
+ }
+ }
+}
diff --git a/pkg/storer/reserve_shared.go b/pkg/storer/reserve_shared.go
new file mode 100644
index 00000000000..6d5d32290c0
--- /dev/null
+++ b/pkg/storer/reserve_shared.go
@@ -0,0 +1,349 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storer
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+ "slices"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage/storageutil"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+const (
+ reserveOverCapacity = "reserveOverCapacity"
+ reserveUnreserved = "reserveUnreserved"
+ batchExpiry = "batchExpiry"
+ batchExpiryDone = "batchExpiryDone"
+)
+
+var (
+ errMaxRadius = errors.New("max radius reached")
+ reserveSizeWithinRadius atomic.Uint64
+)
+
+type Syncer interface {
+ // Number of active historical syncing jobs.
+ SyncRate() float64
+ Start(context.Context)
+}
+
+func threshold(capacity int) int { return capacity * 5 / 10 }
+
+func (db *DB) evictExpiredBatches(ctx context.Context) error {
+ batches, err := db.getExpiredBatches()
+ if err != nil {
+ return err
+ }
+
+ for _, batchID := range batches {
+ evicted, err := db.evictBatch(ctx, batchID, math.MaxInt, swarm.MaxBins)
+ if err != nil {
+ return err
+ }
+ if evicted > 0 {
+ db.logger.Debug("evicted expired batch", "batch_id", hex.EncodeToString(batchID), "total_evicted", evicted)
+ }
+ err = db.storage.Run(ctx, func(st transaction.Store) error {
+ return st.IndexStore().Delete(&expiredBatchItem{BatchID: batchID})
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (db *DB) getExpiredBatches() ([][]byte, error) {
+ var batchesToEvict [][]byte
+ err := db.storage.IndexStore().Iterate(storage.Query{
+ Factory: func() storage.Item { return new(expiredBatchItem) },
+ ItemProperty: storage.QueryItemID,
+ }, func(result storage.Result) (bool, error) {
+ batchesToEvict = append(batchesToEvict, []byte(result.ID))
+ return false, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return batchesToEvict, nil
+}
+
+// EvictBatch evicts all chunks belonging to a batch from the reserve.
+func (db *DB) EvictBatch(ctx context.Context, batchID []byte) error {
+ if db.reserve == nil {
+ // if reserve is not configured, do nothing
+ return nil
+ }
+
+ err := db.storage.Run(ctx, func(tx transaction.Store) error {
+ return tx.IndexStore().Put(&expiredBatchItem{BatchID: batchID})
+ })
+ if err != nil {
+ return fmt.Errorf("save expired batch: %w", err)
+ }
+
+ db.events.Trigger(batchExpiry)
+ return nil
+}
+
+// ReserveLastBinIDs returns all of the highest binIDs from all the bins in the reserve and the epoch time of the reserve.
+func (db *DB) ReserveLastBinIDs() ([]uint64, uint64, error) {
+ if db.reserve == nil {
+ return nil, 0, nil
+ }
+
+ return db.reserve.LastBinIDs()
+}
+
+func (db *DB) ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error {
+ return db.reserve.IterateChunks(0, cb)
+}
+
+func (db *DB) StorageRadius() uint8 {
+ if db.reserve == nil {
+ return 0
+ }
+ return db.reserve.Radius()
+}
+
+func (db *DB) CommittedDepth() uint8 {
+ if db.reserve == nil {
+ return 0
+ }
+
+ return uint8(db.reserveOptions.capacityDoubling) + db.reserve.Radius()
+}
+
+func (db *DB) ReserveSize() int {
+ if db.reserve == nil {
+ return 0
+ }
+ return db.reserve.Size()
+}
+
+func (db *DB) ReserveSizeWithinRadius() uint64 {
+ return reserveSizeWithinRadius.Load()
+}
+
+func (db *DB) IsWithinStorageRadius(addr swarm.Address) bool {
+ if db.reserve == nil {
+ return false
+ }
+ return swarm.Proximity(addr.Bytes(), db.baseAddr.Bytes()) >= db.reserve.Radius()
+}
+
+// BinC is the result returned from the SubscribeBin channel that contains the chunk address and the binID
+type BinC struct {
+ Address swarm.Address
+ BinID uint64
+ BatchID []byte
+ StampHash []byte
+}
+
+// SubscribeBin returns a channel that feeds all the chunks in the reserve from a certain bin between a start and end binIDs.
+func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) {
+ out := make(chan *BinC)
+ done := make(chan struct{})
+ errC := make(chan error, 1)
+
+ db.inFlight.Add(1)
+ go func() {
+ defer db.inFlight.Done()
+
+ trigger, unsub := db.reserveBinEvents.Subscribe(string(bin))
+ defer unsub()
+ defer close(out)
+
+ for {
+
+ err := db.reserve.IterateBin(bin, start, func(a swarm.Address, binID uint64, batchID, stampHash []byte) (bool, error) {
+ select {
+ case out <- &BinC{Address: a, BinID: binID, BatchID: batchID, StampHash: stampHash}:
+ start = binID + 1
+ case <-done:
+ return true, nil
+ case <-db.quit:
+ return false, ErrDBQuit
+ case <-ctx.Done():
+ return false, ctx.Err()
+ }
+
+ return false, nil
+ })
+ if err != nil {
+ errC <- err
+ return
+ }
+
+ select {
+ case <-trigger:
+ case <-done:
+ return
+ case <-db.quit:
+ errC <- ErrDBQuit
+ return
+ case <-ctx.Done():
+ errC <- err
+ return
+ }
+ }
+ }()
+
+ var doneOnce sync.Once
+ return out, func() {
+ doneOnce.Do(func() { close(done) })
+ }, errC
+}
+
+type NeighborhoodStat struct {
+ Neighborhood swarm.Neighborhood
+ ReserveSizeWithinRadius int
+ Proximity uint8
+}
+
+func (db *DB) NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error) {
+ radius := db.StorageRadius()
+ committedDepth := db.CommittedDepth()
+
+ prefixes := neighborhoodPrefixes(db.baseAddr, int(radius), db.reserveOptions.capacityDoubling)
+ neighs := make([]*NeighborhoodStat, len(prefixes))
+ for i, n := range prefixes {
+ neighs[i] = &NeighborhoodStat{
+ Neighborhood: swarm.NewNeighborhood(n, committedDepth),
+ ReserveSizeWithinRadius: 0,
+ Proximity: min(committedDepth, swarm.Proximity(n.Bytes(), db.baseAddr.Bytes())),
+ }
+ }
+
+ err := db.reserve.IterateChunksItems(0, func(ch *reserve.ChunkBinItem) (bool, error) {
+ for _, n := range neighs {
+ if swarm.Proximity(ch.Address.Bytes(), n.Neighborhood.Bytes()) >= committedDepth {
+ n.ReserveSizeWithinRadius++
+ break
+ }
+ }
+ return false, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return neighs, err
+}
+
+func neighborhoodPrefixes(base swarm.Address, radius int, suffixLength int) []swarm.Address {
+ bitCombinationsCount := int(math.Pow(2, float64(suffixLength)))
+ bitSuffixes := make([]uint8, bitCombinationsCount)
+
+ for i := 0; i < bitCombinationsCount; i++ {
+ bitSuffixes[i] = uint8(i)
+ }
+
+ binPrefixes := make([]swarm.Address, bitCombinationsCount)
+
+ // copy base address
+ for i := range binPrefixes {
+ binPrefixes[i] = base.Clone()
+ }
+
+ for j := range binPrefixes {
+ pseudoAddrBytes := binPrefixes[j].Bytes()
+
+ // set pseudo suffix
+ bitSuffixPos := suffixLength - 1
+ for l := radius + 0; l < radius+suffixLength+1; l++ {
+ index, pos := l/8, l%8
+
+ if hasBit(bitSuffixes[j], uint8(bitSuffixPos)) {
+ pseudoAddrBytes[index] = bits.Reverse8(setBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
+ } else {
+ pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
+ }
+
+ bitSuffixPos--
+ }
+
+ // clear rest of the bits
+ for l := radius + suffixLength + 1; l < len(pseudoAddrBytes)*8; l++ {
+ index, pos := l/8, l%8
+ pseudoAddrBytes[index] = bits.Reverse8(clearBit(bits.Reverse8(pseudoAddrBytes[index]), uint8(pos)))
+ }
+ }
+
+ return binPrefixes
+}
+
+// Clears the bit at pos in n.
+func clearBit(n, pos uint8) uint8 {
+ mask := ^(uint8(1) << pos)
+ return n & mask
+}
+
+// Sets the bit at pos in the integer n.
+func setBit(n, pos uint8) uint8 {
+ return n | 1< 0
+}
+
+// expiredBatchItem is a storage.Item implementation for expired batches.
+type expiredBatchItem struct {
+ BatchID []byte
+}
+
+// ID implements storage.Item.
+func (e *expiredBatchItem) ID() string {
+ return string(e.BatchID)
+}
+
+// Namespace implements storage.Item.
+func (e *expiredBatchItem) Namespace() string {
+ return "expiredBatchItem"
+}
+
+// Marshal implements storage.Item.
+// It is a no-op as expiredBatchItem is not serialized.
+func (e *expiredBatchItem) Marshal() ([]byte, error) {
+ return nil, nil
+}
+
+// Unmarshal implements storage.Item.
+// It is a no-op as expiredBatchItem is not serialized.
+func (e *expiredBatchItem) Unmarshal(_ []byte) error {
+ return nil
+}
+
+// Clone implements storage.Item.
+func (e *expiredBatchItem) Clone() storage.Item {
+ if e == nil {
+ return nil
+ }
+ return &expiredBatchItem{
+ BatchID: slices.Clone(e.BatchID),
+ }
+}
+
+// String implements storage.Item.
+func (e *expiredBatchItem) String() string {
+ return storageutil.JoinFields(e.Namespace(), e.ID())
+}
+
+func (db *DB) po(addr swarm.Address) uint8 {
+ return swarm.Proximity(db.baseAddr.Bytes(), addr.Bytes())
+}
diff --git a/pkg/storer/storer.go b/pkg/storer/storer.go
index cafcb3c6a57..f1aea248b95 100644
--- a/pkg/storer/storer.go
+++ b/pkg/storer/storer.go
@@ -1,6 +1,5 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package storer
@@ -9,417 +8,30 @@ import (
"errors"
"fmt"
"io"
- "io/fs"
- "math/big"
- "os"
"path"
- "path/filepath"
"sync"
- "sync/atomic"
- "time"
"github.com/ethersphere/bee/v2/pkg/log"
- "github.com/ethersphere/bee/v2/pkg/stabilization"
- "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
-
- m "github.com/ethersphere/bee/v2/pkg/metrics"
"github.com/ethersphere/bee/v2/pkg/postage"
"github.com/ethersphere/bee/v2/pkg/pusher"
"github.com/ethersphere/bee/v2/pkg/retrieval"
- "github.com/ethersphere/bee/v2/pkg/sharky"
- "github.com/ethersphere/bee/v2/pkg/storage"
- "github.com/ethersphere/bee/v2/pkg/storage/leveldbstore"
"github.com/ethersphere/bee/v2/pkg/storage/migration"
"github.com/ethersphere/bee/v2/pkg/storer/internal/cache"
"github.com/ethersphere/bee/v2/pkg/storer/internal/events"
pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
"github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
"github.com/ethersphere/bee/v2/pkg/storer/internal/upload"
- localmigration "github.com/ethersphere/bee/v2/pkg/storer/migration"
"github.com/ethersphere/bee/v2/pkg/swarm"
- "github.com/ethersphere/bee/v2/pkg/topology"
"github.com/ethersphere/bee/v2/pkg/tracing"
- "github.com/ethersphere/bee/v2/pkg/util/syncutil"
"github.com/prometheus/client_golang/prometheus"
- "github.com/spf13/afero"
- "github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/filter"
- "github.com/syndtr/goleveldb/leveldb/opt"
"resenje.org/multex"
-)
-
-// PutterSession provides a session around the storage.Putter. The session on
-// successful completion commits all the operations or in case of error, rolls back
-// the state.
-type PutterSession interface {
- storage.Putter
- // Done is used to close the session and optionally assign a swarm.Address to
- // this session.
- Done(swarm.Address) error
- // Cleanup is used to cleanup any state related to this session in case of
- // any error.
- Cleanup() error
-}
-
-// SessionInfo is a type which exports the storer tag object. This object
-// stores all the relevant information about a particular session.
-type SessionInfo = upload.TagItem
-
-// UploadStore is a logical component of the storer which deals with the upload
-// of data to swarm.
-type UploadStore interface {
- // Upload provides a PutterSession which is tied to the tagID. Optionally if
- // users requests to pin the data, a new pinning collection is created.
- Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error)
- // NewSession can be used to obtain a tag ID to use for a new Upload session.
- NewSession() (SessionInfo, error)
- // Session will show the information about the session.
- Session(tagID uint64) (SessionInfo, error)
- // DeleteSession will delete the session info associated with the tag id.
- DeleteSession(tagID uint64) error
- // ListSessions will list all the Sessions currently being tracked.
- ListSessions(offset, limit int) ([]SessionInfo, error)
-}
-
-// PinStore is a logical component of the storer which deals with pinning
-// functionality.
-type PinStore interface {
- // NewCollection can be used to create a new PutterSession which writes a new
- // pinning collection. The address passed in during the Done of the session is
- // used as the root referencce.
- NewCollection(context.Context) (PutterSession, error)
- // DeletePin deletes all the chunks associated with the collection pointed to
- // by the swarm.Address passed in.
- DeletePin(context.Context, swarm.Address) error
- // Pins returns all the root references of pinning collections.
- Pins() ([]swarm.Address, error)
- // HasPin is a helper which checks if a collection exists with the root
- // reference passed in.
- HasPin(swarm.Address) (bool, error)
-}
-
-// PinIterator is a helper interface which can be used to iterate over all the
-// chunks in a pinning collection.
-type PinIterator interface {
- IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error
-}
-
-// CacheStore is a logical component of the storer that deals with cache
-// content.
-type CacheStore interface {
- // Lookup method provides a storage.Getter wrapped around the underlying
- // ChunkStore which will update cache related indexes if required on successful
- // lookups.
- Lookup() storage.Getter
- // Cache method provides a storage.Putter which will add the chunks to cache.
- // This will add the chunk to underlying store as well as new indexes which
- // will keep track of the chunk in the cache.
- Cache() storage.Putter
-}
-
-// NetStore is a logical component of the storer that deals with network. It will
-// push/retrieve chunks from the network.
-type NetStore interface {
- // DirectUpload provides a session which can be used to push chunks directly
- // to the network.
- DirectUpload() PutterSession
- // Download provides a getter which can be used to download data. If the data
- // is found locally, its returned immediately, otherwise it is retrieved from
- // the network.
- Download(cache bool) storage.Getter
- // PusherFeed is the feed for direct push chunks. This can be used by the
- // pusher component to push out the chunks.
- PusherFeed() <-chan *pusher.Op
-}
-
-var _ Reserve = (*DB)(nil)
-
-// Reserve is a logical component of the storer that deals with reserve
-// content. It will implement all the core functionality required for the protocols.
-type Reserve interface {
- ReserveStore
- EvictBatch(ctx context.Context, batchID []byte) error
- ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error)
- ReserveSize() int
-}
-
-// ReserveIterator is a helper interface which can be used to iterate over all
-// the chunks in the reserve.
-type ReserveIterator interface {
- ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error
-}
-
-// ReserveStore is a logical component of the storer that deals with reserve
-// content. It will implement all the core functionality required for the protocols.
-type ReserveStore interface {
- ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (swarm.Chunk, error)
- ReserveHas(addr swarm.Address, batchID []byte, stampHash []byte) (bool, error)
- ReservePutter() storage.Putter
- SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
- ReserveLastBinIDs() ([]uint64, uint64, error)
- RadiusChecker
-}
-
-// RadiusChecker provides the radius related functionality.
-type RadiusChecker interface {
- IsWithinStorageRadius(addr swarm.Address) bool
- StorageRadius() uint8
- CommittedDepth() uint8
-}
-
-// LocalStore is a read-only ChunkStore. It can be used to check if chunk is known
-// locally, but it cannot tell what is the context of the chunk (whether it is
-// pinned, uploaded, etc.).
-type LocalStore interface {
- ChunkStore() storage.ReadOnlyChunkStore
-}
-
-// Debugger is a helper interface which can be used to debug the storer.
-type Debugger interface {
- DebugInfo(context.Context) (Info, error)
-}
-
-type NeighborhoodStats interface {
- NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error)
-}
-
-type memFS struct {
- afero.Fs
-}
-
-func (m *memFS) Open(path string) (fs.File, error) {
- return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644)
-}
-
-type dirFS struct {
- basedir string
-}
-
-func (d *dirFS) Open(path string) (fs.File, error) {
- return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0o644)
-}
-
-var (
- sharkyNoOfShards = 32
- ErrDBQuit = errors.New("db quit")
-)
-type closerFn func() error
-
-func (c closerFn) Close() error { return c() }
-
-func closer(closers ...io.Closer) io.Closer {
- return closerFn(func() error {
- var err error
- for _, closer := range closers {
- err = errors.Join(err, closer.Close())
- }
- return err
- })
-}
-
-func initInmemRepository() (transaction.Storage, io.Closer, error) {
- store, err := leveldbstore.New("", nil)
- if err != nil {
- return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err)
- }
-
- sharky, err := sharky.New(
- &memFS{Fs: afero.NewMemMapFs()},
- sharkyNoOfShards,
- swarm.SocMaxChunkSize,
- )
- if err != nil {
- return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err)
- }
-
- return transaction.NewStorage(sharky, store), closer(store, sharky), nil
-}
-
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "storer"
-
-// Default options for levelDB.
-const (
- defaultOpenFilesLimit = uint64(256)
- defaultBlockCacheCapacity = uint64(32 * 1024 * 1024)
- defaultWriteBufferSize = uint64(32 * 1024 * 1024)
- defaultDisableSeeksCompaction = false
- defaultCacheCapacity = uint64(1_000_000)
- defaultBgCacheWorkers = 32
- DefaultReserveCapacity = 1 << 22 // 4194304 chunks
+ localmigration "github.com/ethersphere/bee/v2/pkg/storer/migration"
- indexPath = "indexstore"
- sharkyPath = "sharky"
+ m "github.com/ethersphere/bee/v2/pkg/metrics"
)
-func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) {
- ldbBasePath := path.Join(basePath, indexPath)
-
- if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) {
- err := os.MkdirAll(ldbBasePath, 0o777)
- if err != nil {
- return nil, err
- }
- }
- store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{
- OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit),
- BlockCacheCapacity: int(opts.LdbBlockCacheCapacity),
- WriteBuffer: int(opts.LdbWriteBufferSize),
- DisableSeeksCompaction: opts.LdbDisableSeeksCompaction,
- CompactionL0Trigger: 8,
- Filter: filter.NewBloomFilter(64),
- })
- if err != nil {
- return nil, fmt.Errorf("failed creating levelDB index store: %w", err)
- }
-
- return store, nil
-}
-
-func initDiskRepository(
- ctx context.Context,
- basePath string,
- opts *Options,
-) (transaction.Storage, *PinIntegrity, io.Closer, error) {
- store, err := initStore(basePath, opts)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err)
- }
-
- err = migration.Migrate(store, "core-migration", localmigration.BeforeInitSteps(store, opts.Logger))
- if err != nil {
- return nil, nil, nil, errors.Join(store.Close(), fmt.Errorf("failed core migration: %w", err))
- }
-
- if opts.LdbStats.Load() != nil {
- go func() {
- ldbStats := opts.LdbStats.Load()
- logger := log.NewLogger(loggerName).Register()
- ticker := time.NewTicker(15 * time.Second)
- defer ticker.Stop()
-
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- stats := new(leveldb.DBStats)
- switch err := store.DB().Stats(stats); {
- case errors.Is(err, leveldb.ErrClosed):
- return
- case err != nil:
- logger.Error(err, "snapshot levelDB stats")
- default:
- ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount))
- ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds())
- ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots))
- ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators))
- ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite))
- ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead))
- ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize))
- ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount))
- ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp))
- ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp))
- ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp))
- ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp))
- for i := 0; i < len(stats.LevelSizes); i++ {
- ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i]))
- ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i]))
- ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i]))
- ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i]))
- ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds())
- }
- }
- }
- }
- }()
- }
-
- sharkyBasePath := path.Join(basePath, sharkyPath)
-
- if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) {
- err := os.Mkdir(sharkyBasePath, 0o777)
- if err != nil {
- return nil, nil, nil, err
- }
- }
-
- recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed to recover sharky: %w", err)
- }
-
- sharky, err := sharky.New(
- &dirFS{basedir: sharkyBasePath},
- sharkyNoOfShards,
- swarm.SocMaxChunkSize,
- )
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed creating sharky instance: %w", err)
- }
-
- pinIntegrity := &PinIntegrity{
- Store: store,
- Sharky: sharky,
- }
-
- return transaction.NewStorage(sharky, store), pinIntegrity, closer(store, sharky, recoveryCloser), nil
-}
-
-const lockKeyNewSession string = "new_session"
-
-// Options provides a container to configure different things in the storer.
-type Options struct {
- // These are options related to levelDB. Currently, the underlying storage used is levelDB.
- LdbStats atomic.Pointer[prometheus.HistogramVec]
- LdbOpenFilesLimit uint64
- LdbBlockCacheCapacity uint64
- LdbWriteBufferSize uint64
- LdbDisableSeeksCompaction bool
- Logger log.Logger
- Tracer *tracing.Tracer
-
- Address swarm.Address
- StartupStabilizer stabilization.Subscriber
- Batchstore postage.Storer
- ValidStamp postage.ValidStampFn
- RadiusSetter topology.SetStorageRadiuser
- StateStore storage.StateStorer
-
- ReserveCapacity int
- ReserveWakeUpDuration time.Duration
- ReserveMinEvictCount uint64
- ReserveCapacityDoubling int
-
- CacheCapacity uint64
- CacheMinEvictCount uint64
-
- MinimumStorageRadius uint
-}
-
-func defaultOptions() *Options {
- return &Options{
- LdbOpenFilesLimit: defaultOpenFilesLimit,
- LdbBlockCacheCapacity: defaultBlockCacheCapacity,
- LdbWriteBufferSize: defaultWriteBufferSize,
- LdbDisableSeeksCompaction: defaultDisableSeeksCompaction,
- CacheCapacity: defaultCacheCapacity,
- Logger: log.Noop,
- ReserveCapacity: DefaultReserveCapacity,
- ReserveWakeUpDuration: time.Minute * 30,
- }
-}
-
-// cacheLimiter is used to limit the number
-// of concurrent cache background workers.
-type cacheLimiter struct {
- wg sync.WaitGroup
- sem chan struct{}
- ctx context.Context
- cancel context.CancelFunc
-}
-
// DB implements all the component stores described above.
type DB struct {
logger log.Logger
@@ -427,7 +39,7 @@ type DB struct {
metrics metrics
storage transaction.Storage
- multex *multex.Multex
+ multex *multex.Multex[any]
cacheObj *cache.Cache
retrieval retrieval.Interface
pusherFeed chan *pusher.Op
@@ -451,15 +63,6 @@ type DB struct {
pinIntegrity *PinIntegrity
}
-type reserveOpts struct {
- startupStabilizer stabilization.Subscriber
- wakeupDuration time.Duration
- minEvictCount uint64
- cacheMinEvictCount uint64
- minimumRadius uint8
- capacityDoubling int
-}
-
// New returns a newly constructed DB object which implements all the above
// component stores.
func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
@@ -477,7 +80,7 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
opts.Logger = log.Noop
}
- lock := multex.New()
+ lock := multex.New[any]()
metrics := newMetrics()
opts.LdbStats.CompareAndSwap(nil, metrics.LevelDBStats)
@@ -594,20 +197,6 @@ func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
return db, nil
}
-// Reset removes all entries
-func (db *DB) ResetReserve(ctx context.Context) error {
- return db.reserve.Reset(ctx)
-}
-
-// Metrics returns set of prometheus collectors.
-func (db *DB) Metrics() []prometheus.Collector {
- collectors := m.PrometheusCollectorsFromFields(db.metrics)
- if v, ok := db.storage.(m.Collector); ok {
- collectors = append(collectors, v.Metrics()...)
- }
- return collectors
-}
-
// StatusMetrics exposes metrics that are exposed on the status protocol.
func (db *DB) StatusMetrics() []prometheus.Collector {
collectors := []prometheus.Collector{
@@ -625,96 +214,11 @@ func (db *DB) StatusMetrics() []prometheus.Collector {
return collectors
}
-func (db *DB) Close() error {
- close(db.quit)
-
- bgReserveWorkersClosed := make(chan struct{})
- go func() {
- defer close(bgReserveWorkersClosed)
- if !syncutil.WaitWithTimeout(&db.inFlight, 5*time.Second) {
- db.logger.Warning("db shutting down with running goroutines")
- }
- }()
-
- bgCacheWorkersClosed := make(chan struct{})
- go func() {
- defer close(bgCacheWorkersClosed)
- if !syncutil.WaitWithTimeout(&db.cacheLimiter.wg, 5*time.Second) {
- db.logger.Warning("cache goroutines still running after the wait timeout; force closing")
- db.cacheLimiter.cancel()
- }
- }()
-
- var err error
- closerDone := make(chan struct{})
- go func() {
- defer close(closerDone)
- err = db.dbCloser.Close()
- }()
-
- done := make(chan struct{})
- go func() {
- defer close(done)
- <-closerDone
- <-bgCacheWorkersClosed
- <-bgReserveWorkersClosed
- }()
-
- select {
- case <-done:
- case <-time.After(3 * time.Second):
- return errors.New("storer closed with bg goroutines running")
- }
-
- return err
-}
-
-func (db *DB) SetRetrievalService(r retrieval.Interface) {
- db.retrieval = r
-}
-
-func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) {
- db.setSyncerOnce.Do(func() {
- db.syncer = s
- go db.startReserveWorkers(ctx, radius)
- })
-}
-
-type noopRetrieval struct{}
-
-func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.Address) (swarm.Chunk, error) {
- return nil, storage.ErrNotFound
-}
-
-func (db *DB) ChunkStore() storage.ReadOnlyChunkStore {
- return db.storage.ChunkStore()
-}
-
-func (db *DB) PinIntegrity() *PinIntegrity {
- return db.pinIntegrity
-}
-
-func (db *DB) Lock(strs ...string) func() {
- for _, s := range strs {
- db.multex.Lock(s)
- }
- return func() {
- for _, s := range strs {
- db.multex.Unlock(s)
- }
+// Metrics returns set of prometheus collectors.
+func (db *DB) Metrics() []prometheus.Collector {
+ collectors := m.PrometheusCollectorsFromFields(db.metrics)
+ if v, ok := db.storage.(m.Collector); ok {
+ collectors = append(collectors, v.Metrics()...)
}
+ return collectors
}
-
-func (db *DB) Storage() transaction.Storage {
- return db.storage
-}
-
-type putterSession struct {
- storage.Putter
- done func(swarm.Address) error
- cleanup func() error
-}
-
-func (p *putterSession) Done(addr swarm.Address) error { return p.done(addr) }
-
-func (p *putterSession) Cleanup() error { return p.cleanup() }
diff --git a/pkg/storer/storer_js.go b/pkg/storer/storer_js.go
new file mode 100644
index 00000000000..84db2172c35
--- /dev/null
+++ b/pkg/storer/storer_js.go
@@ -0,0 +1,187 @@
+//go:build js
+// +build js
+
+package storer
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "path"
+ "sync"
+
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/pusher"
+ "github.com/ethersphere/bee/v2/pkg/retrieval"
+ "github.com/ethersphere/bee/v2/pkg/storage/migration"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/cache"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/events"
+ pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/upload"
+ localmigration "github.com/ethersphere/bee/v2/pkg/storer/migration"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "resenje.org/multex"
+)
+
+// DB implements all the component stores described above.
+type DB struct {
+ logger log.Logger
+ tracer *tracing.Tracer
+
+ storage transaction.Storage
+ multex *multex.Multex[any]
+ cacheObj *cache.Cache
+ retrieval retrieval.Interface
+ pusherFeed chan *pusher.Op
+ quit chan struct{}
+ cacheLimiter cacheLimiter
+ dbCloser io.Closer
+ subscriptionsWG sync.WaitGroup
+ events *events.Subscriber
+ directUploadLimiter chan struct{}
+
+ reserve *reserve.Reserve
+ inFlight sync.WaitGroup
+ reserveBinEvents *events.Subscriber
+ baseAddr swarm.Address
+ batchstore postage.Storer
+ validStamp postage.ValidStampFn
+ setSyncerOnce sync.Once
+ syncer Syncer
+ reserveOptions reserveOpts
+
+ pinIntegrity *PinIntegrity
+}
+
+// New returns a newly constructed DB object which implements all the above
+// component stores.
+func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
+ var (
+ err error
+ pinIntegrity *PinIntegrity
+ st transaction.Storage
+ dbCloser io.Closer
+ )
+ if opts == nil {
+ opts = defaultOptions()
+ }
+
+ if opts.Logger == nil {
+ opts.Logger = log.Noop
+ }
+
+ lock := multex.New[any]()
+
+ if dirPath == "" {
+ st, dbCloser, err = initInmemRepository()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ st, pinIntegrity, dbCloser, err = initDiskRepository(ctx, dirPath, opts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil && dbCloser != nil {
+ err = errors.Join(err, dbCloser.Close())
+ }
+ }()
+
+ sharkyBasePath := ""
+ if dirPath != "" {
+ sharkyBasePath = path.Join(dirPath, sharkyPath)
+ }
+
+ err = st.Run(ctx, func(s transaction.Store) error {
+ return migration.Migrate(
+ s.IndexStore(),
+ "migration",
+ localmigration.AfterInitSteps(sharkyBasePath, sharkyNoOfShards, st, opts.Logger),
+ )
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed regular migration: %w", err)
+ }
+
+ cacheObj, err := cache.New(ctx, st.IndexStore(), opts.CacheCapacity)
+ if err != nil {
+ return nil, err
+ }
+
+ logger := opts.Logger.WithName(loggerName).Register()
+
+ clCtx, clCancel := context.WithCancel(ctx)
+ db := &DB{
+ storage: st,
+ logger: logger,
+ tracer: opts.Tracer,
+ baseAddr: opts.Address,
+ multex: lock,
+ cacheObj: cacheObj,
+ retrieval: noopRetrieval{},
+ pusherFeed: make(chan *pusher.Op),
+ quit: make(chan struct{}),
+ cacheLimiter: cacheLimiter{
+ sem: make(chan struct{}, defaultBgCacheWorkers),
+ ctx: clCtx,
+ cancel: clCancel,
+ },
+ dbCloser: dbCloser,
+ batchstore: opts.Batchstore,
+ validStamp: opts.ValidStamp,
+ events: events.NewSubscriber(),
+ reserveBinEvents: events.NewSubscriber(),
+ reserveOptions: reserveOpts{
+ startupStabilizer: opts.StartupStabilizer,
+ wakeupDuration: opts.ReserveWakeUpDuration,
+ minEvictCount: opts.ReserveMinEvictCount,
+ cacheMinEvictCount: opts.CacheMinEvictCount,
+ minimumRadius: uint8(opts.MinimumStorageRadius),
+ capacityDoubling: opts.ReserveCapacityDoubling,
+ },
+ directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes),
+ pinIntegrity: pinIntegrity,
+ }
+
+ if db.validStamp == nil {
+ db.validStamp = postage.ValidStamp(db.batchstore)
+ }
+
+ if opts.ReserveCapacity > 0 {
+ rs, err := reserve.New(
+ opts.Address,
+ st,
+ opts.ReserveCapacity,
+ opts.RadiusSetter,
+ logger,
+ )
+ if err != nil {
+ return nil, err
+ }
+ db.reserve = rs
+
+ }
+
+ // Cleanup any dirty state in upload and pinning stores, this could happen
+ // in case of dirty shutdowns
+ err = errors.Join(
+ upload.CleanupDirty(db.storage),
+ pinstore.CleanupDirty(db.storage),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ db.inFlight.Add(1)
+ go db.cacheWorker(ctx)
+
+ return db, nil
+}
diff --git a/pkg/storer/storer_shared.go b/pkg/storer/storer_shared.go
new file mode 100644
index 00000000000..881fb74d0c4
--- /dev/null
+++ b/pkg/storer/storer_shared.go
@@ -0,0 +1,524 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storer
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "math/big"
+ "os"
+ "path"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ sharedFs "github.com/ethersphere/bee/v2/pkg/fs"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/stabilization"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
+
+ "github.com/ethersphere/bee/v2/pkg/postage"
+ "github.com/ethersphere/bee/v2/pkg/pusher"
+ "github.com/ethersphere/bee/v2/pkg/retrieval"
+ "github.com/ethersphere/bee/v2/pkg/sharky"
+ "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storage/leveldbstore"
+ "github.com/ethersphere/bee/v2/pkg/storage/migration"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/upload"
+ localmigration "github.com/ethersphere/bee/v2/pkg/storer/migration"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/tracing"
+ "github.com/ethersphere/bee/v2/pkg/util/syncutil"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/spf13/afero"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+// PutterSession provides a session around the storage.Putter. The session on
+// successful completion commits all the operations or in case of error, rolls back
+// the state.
+type PutterSession interface {
+ storage.Putter
+ // Done is used to close the session and optionally assign a swarm.Address to
+ // this session.
+ Done(swarm.Address) error
+ // Cleanup is used to cleanup any state related to this session in case of
+ // any error.
+ Cleanup() error
+}
+
+// SessionInfo is a type which exports the storer tag object. This object
+// stores all the relevant information about a particular session.
+type SessionInfo = upload.TagItem
+
+// UploadStore is a logical component of the storer which deals with the upload
+// of data to swarm.
+type UploadStore interface {
+ // Upload provides a PutterSession which is tied to the tagID. Optionally if
+ // users requests to pin the data, a new pinning collection is created.
+ Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error)
+ // NewSession can be used to obtain a tag ID to use for a new Upload session.
+ NewSession() (SessionInfo, error)
+ // Session will show the information about the session.
+ Session(tagID uint64) (SessionInfo, error)
+ // DeleteSession will delete the session info associated with the tag id.
+ DeleteSession(tagID uint64) error
+ // ListSessions will list all the Sessions currently being tracked.
+ ListSessions(offset, limit int) ([]SessionInfo, error)
+}
+
+// PinStore is a logical component of the storer which deals with pinning
+// functionality.
+type PinStore interface {
+ // NewCollection can be used to create a new PutterSession which writes a new
+ // pinning collection. The address passed in during the Done of the session is
+ // used as the root referencce.
+ NewCollection(context.Context) (PutterSession, error)
+ // DeletePin deletes all the chunks associated with the collection pointed to
+ // by the swarm.Address passed in.
+ DeletePin(context.Context, swarm.Address) error
+ // Pins returns all the root references of pinning collections.
+ Pins() ([]swarm.Address, error)
+ // HasPin is a helper which checks if a collection exists with the root
+ // reference passed in.
+ HasPin(swarm.Address) (bool, error)
+}
+
+// PinIterator is a helper interface which can be used to iterate over all the
+// chunks in a pinning collection.
+type PinIterator interface {
+ IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error
+}
+
+// CacheStore is a logical component of the storer that deals with cache
+// content.
+type CacheStore interface {
+ // Lookup method provides a storage.Getter wrapped around the underlying
+ // ChunkStore which will update cache related indexes if required on successful
+ // lookups.
+ Lookup() storage.Getter
+ // Cache method provides a storage.Putter which will add the chunks to cache.
+ // This will add the chunk to underlying store as well as new indexes which
+ // will keep track of the chunk in the cache.
+ Cache() storage.Putter
+}
+
+// NetStore is a logical component of the storer that deals with network. It will
+// push/retrieve chunks from the network.
+type NetStore interface {
+ // DirectUpload provides a session which can be used to push chunks directly
+ // to the network.
+ DirectUpload() PutterSession
+ // Download provides a getter which can be used to download data. If the data
+ // is found locally, its returned immediately, otherwise it is retrieved from
+ // the network.
+ Download(cache bool) storage.Getter
+ // PusherFeed is the feed for direct push chunks. This can be used by the
+ // pusher component to push out the chunks.
+ PusherFeed() <-chan *pusher.Op
+}
+
+var _ Reserve = (*DB)(nil)
+
+// Reserve is a logical component of the storer that deals with reserve
+// content. It will implement all the core functionality required for the protocols.
+type Reserve interface {
+ ReserveStore
+ EvictBatch(ctx context.Context, batchID []byte) error
+ ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error)
+ ReserveSize() int
+}
+
+// ReserveIterator is a helper interface which can be used to iterate over all
+// the chunks in the reserve.
+type ReserveIterator interface {
+ ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error
+}
+
+// ReserveStore is a logical component of the storer that deals with reserve
+// content. It will implement all the core functionality required for the protocols.
+type ReserveStore interface {
+ ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (swarm.Chunk, error)
+ ReserveHas(addr swarm.Address, batchID []byte, stampHash []byte) (bool, error)
+ ReservePutter() storage.Putter
+ SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
+ ReserveLastBinIDs() ([]uint64, uint64, error)
+ RadiusChecker
+}
+
+// RadiusChecker provides the radius related functionality.
+type RadiusChecker interface {
+ IsWithinStorageRadius(addr swarm.Address) bool
+ StorageRadius() uint8
+ CommittedDepth() uint8
+}
+
+// LocalStore is a read-only ChunkStore. It can be used to check if chunk is known
+// locally, but it cannot tell what is the context of the chunk (whether it is
+// pinned, uploaded, etc.).
+type LocalStore interface {
+ ChunkStore() storage.ReadOnlyChunkStore
+}
+
+// Debugger is a helper interface which can be used to debug the storer.
+type Debugger interface {
+ DebugInfo(context.Context) (Info, error)
+}
+
+type NeighborhoodStats interface {
+ NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error)
+}
+
+type memFS struct {
+ afero.Fs
+}
+
+func (m *memFS) Open(path string) (fs.File, error) {
+ return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o644)
+}
+
+type dirFS struct {
+ basedir string
+}
+
+func (d *dirFS) Open(path string) (fs.File, error) {
+ return sharedFs.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0o644)
+}
+
+var (
+ sharkyNoOfShards = 32
+ ErrDBQuit = errors.New("db quit")
+)
+
+type closerFn func() error
+
+func (c closerFn) Close() error { return c() }
+
+func closer(closers ...io.Closer) io.Closer {
+ return closerFn(func() error {
+ var err error
+ for _, closer := range closers {
+ err = errors.Join(err, closer.Close())
+ }
+ return err
+ })
+}
+
+func initInmemRepository() (transaction.Storage, io.Closer, error) {
+ store, err := leveldbstore.New("", nil)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err)
+ }
+
+ sharky, err := sharky.New(
+ &memFS{Fs: afero.NewMemMapFs()},
+ sharkyNoOfShards,
+ swarm.SocMaxChunkSize,
+ )
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err)
+ }
+
+ return transaction.NewStorage(sharky, store), closer(store, sharky), nil
+}
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "storer"
+
+// Default options for levelDB.
+const (
+ defaultOpenFilesLimit = uint64(256)
+ defaultBlockCacheCapacity = uint64(32 * 1024 * 1024)
+ defaultWriteBufferSize = uint64(32 * 1024 * 1024)
+ defaultDisableSeeksCompaction = false
+ defaultCacheCapacity = uint64(1_000_000)
+ defaultBgCacheWorkers = 32
+ DefaultReserveCapacity = 1 << 22 // 4194304 chunks
+
+ indexPath = "indexstore"
+ sharkyPath = "sharky"
+)
+
+func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) {
+ ldbBasePath := path.Join(basePath, indexPath)
+
+ if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) {
+ err := os.MkdirAll(ldbBasePath, 0o777)
+ if err != nil {
+ return nil, err
+ }
+ }
+ store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{
+ OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit),
+ BlockCacheCapacity: int(opts.LdbBlockCacheCapacity),
+ WriteBuffer: int(opts.LdbWriteBufferSize),
+ DisableSeeksCompaction: opts.LdbDisableSeeksCompaction,
+ CompactionL0Trigger: 8,
+ Filter: filter.NewBloomFilter(64),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed creating levelDB index store: %w", err)
+ }
+
+ return store, nil
+}
+
+func initDiskRepository(
+ ctx context.Context,
+ basePath string,
+ opts *Options,
+) (transaction.Storage, *PinIntegrity, io.Closer, error) {
+ store, err := initStore(basePath, opts)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err)
+ }
+
+ err = migration.Migrate(store, "core-migration", localmigration.BeforeInitSteps(store, opts.Logger))
+ if err != nil {
+ return nil, nil, nil, errors.Join(store.Close(), fmt.Errorf("failed core migration: %w", err))
+ }
+
+ if opts.LdbStats.Load() != nil {
+ go func() {
+ ldbStats := opts.LdbStats.Load()
+ logger := log.NewLogger(loggerName).Register()
+ ticker := time.NewTicker(15 * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ stats := new(leveldb.DBStats)
+ switch err := store.DB().Stats(stats); {
+ case errors.Is(err, leveldb.ErrClosed):
+ return
+ case err != nil:
+ logger.Error(err, "snapshot levelDB stats")
+ default:
+ ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount))
+ ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds())
+ ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots))
+ ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators))
+ ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite))
+ ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead))
+ ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize))
+ ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount))
+ ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp))
+ ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp))
+ ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp))
+ ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp))
+ for i := 0; i < len(stats.LevelSizes); i++ {
+ ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i]))
+ ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i]))
+ ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i]))
+ ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i]))
+ ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds())
+ }
+ }
+ }
+ }
+ }()
+ }
+
+ sharkyBasePath := path.Join(basePath, sharkyPath)
+
+ if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) {
+ err := os.Mkdir(sharkyBasePath, 0o777)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ }
+
+ recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to recover sharky: %w", err)
+ }
+
+ sharky, err := sharky.New(
+ &dirFS{basedir: sharkyBasePath},
+ sharkyNoOfShards,
+ swarm.SocMaxChunkSize,
+ )
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed creating sharky instance: %w", err)
+ }
+
+ pinIntegrity := &PinIntegrity{
+ Store: store,
+ Sharky: sharky,
+ }
+
+ return transaction.NewStorage(sharky, store), pinIntegrity, closer(store, sharky, recoveryCloser), nil
+}
+
+const lockKeyNewSession string = "new_session"
+
+// Options provides a container to configure different things in the storer.
+type Options struct {
+ // These are options related to levelDB. Currently, the underlying storage used is levelDB.
+ LdbStats atomic.Pointer[prometheus.HistogramVec]
+ LdbOpenFilesLimit uint64
+ LdbBlockCacheCapacity uint64
+ LdbWriteBufferSize uint64
+ LdbDisableSeeksCompaction bool
+ Logger log.Logger
+ Tracer *tracing.Tracer
+
+ Address swarm.Address
+ StartupStabilizer stabilization.Subscriber
+ Batchstore postage.Storer
+ ValidStamp postage.ValidStampFn
+ RadiusSetter topology.SetStorageRadiuser
+ StateStore storage.StateStorer
+
+ ReserveCapacity int
+ ReserveWakeUpDuration time.Duration
+ ReserveMinEvictCount uint64
+ ReserveCapacityDoubling int
+
+ CacheCapacity uint64
+ CacheMinEvictCount uint64
+
+ MinimumStorageRadius uint
+}
+
+func defaultOptions() *Options {
+ return &Options{
+ LdbOpenFilesLimit: defaultOpenFilesLimit,
+ LdbBlockCacheCapacity: defaultBlockCacheCapacity,
+ LdbWriteBufferSize: defaultWriteBufferSize,
+ LdbDisableSeeksCompaction: defaultDisableSeeksCompaction,
+ CacheCapacity: defaultCacheCapacity,
+ Logger: log.Noop,
+ ReserveCapacity: DefaultReserveCapacity,
+ ReserveWakeUpDuration: time.Minute * 30,
+ }
+}
+
+// cacheLimiter is used to limit the number
+// of concurrent cache background workers.
+type cacheLimiter struct {
+ wg sync.WaitGroup
+ sem chan struct{}
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+type reserveOpts struct {
+ startupStabilizer stabilization.Subscriber
+ wakeupDuration time.Duration
+ minEvictCount uint64
+ cacheMinEvictCount uint64
+ minimumRadius uint8
+ capacityDoubling int
+}
+
+// Reset removes all entries
+func (db *DB) ResetReserve(ctx context.Context) error {
+ return db.reserve.Reset(ctx)
+}
+
+func (db *DB) Close() error {
+ close(db.quit)
+
+ bgReserveWorkersClosed := make(chan struct{})
+ go func() {
+ defer close(bgReserveWorkersClosed)
+ if !syncutil.WaitWithTimeout(&db.inFlight, 5*time.Second) {
+ db.logger.Warning("db shutting down with running goroutines")
+ }
+ }()
+
+ bgCacheWorkersClosed := make(chan struct{})
+ go func() {
+ defer close(bgCacheWorkersClosed)
+ if !syncutil.WaitWithTimeout(&db.cacheLimiter.wg, 5*time.Second) {
+ db.logger.Warning("cache goroutines still running after the wait timeout; force closing")
+ db.cacheLimiter.cancel()
+ }
+ }()
+
+ var err error
+ closerDone := make(chan struct{})
+ go func() {
+ defer close(closerDone)
+ err = db.dbCloser.Close()
+ }()
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ <-closerDone
+ <-bgCacheWorkersClosed
+ <-bgReserveWorkersClosed
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(3 * time.Second):
+ return errors.New("storer closed with bg goroutines running")
+ }
+
+ return err
+}
+
+func (db *DB) SetRetrievalService(r retrieval.Interface) {
+ db.retrieval = r
+}
+
+func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) {
+ db.setSyncerOnce.Do(func() {
+ db.syncer = s
+ go db.startReserveWorkers(ctx, radius)
+ })
+}
+
+type noopRetrieval struct{}
+
+func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.Address) (swarm.Chunk, error) {
+ return nil, storage.ErrNotFound
+}
+
+func (db *DB) ChunkStore() storage.ReadOnlyChunkStore {
+ return db.storage.ChunkStore()
+}
+
+func (db *DB) PinIntegrity() *PinIntegrity {
+ return db.pinIntegrity
+}
+
+func (db *DB) Lock(strs ...string) func() {
+ for _, s := range strs {
+ db.multex.Lock(s)
+ }
+ return func() {
+ for _, s := range strs {
+ db.multex.Unlock(s)
+ }
+ }
+}
+
+func (db *DB) Storage() transaction.Storage {
+ return db.storage
+}
+
+type putterSession struct {
+ storage.Putter
+ done func(swarm.Address) error
+ cleanup func() error
+}
+
+func (p *putterSession) Done(addr swarm.Address) error { return p.done(addr) }
+
+func (p *putterSession) Cleanup() error { return p.cleanup() }
diff --git a/pkg/storer/uploadstore.go b/pkg/storer/uploadstore.go
index f4f21fc59ba..81e592cb52a 100644
--- a/pkg/storer/uploadstore.go
+++ b/pkg/storer/uploadstore.go
@@ -1,6 +1,5 @@
-// Copyright 2023 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package storer
@@ -8,7 +7,6 @@ import (
"context"
"errors"
"fmt"
- "sort"
storage "github.com/ethersphere/bee/v2/pkg/storage"
"github.com/ethersphere/bee/v2/pkg/storer/internal"
@@ -18,25 +16,6 @@ import (
"github.com/ethersphere/bee/v2/pkg/swarm"
)
-const uploadsLock = "pin-upload-store"
-
-// Report implements the storage.PushReporter by wrapping the internal reporter
-// with a transaction.
-func (db *DB) Report(ctx context.Context, chunk swarm.Chunk, state storage.ChunkState) error {
-
- unlock := db.Lock(uploadsLock)
- defer unlock()
-
- err := db.storage.Run(ctx, func(s transaction.Store) error {
- return upload.Report(ctx, s, chunk, state)
- })
- if err != nil {
- return fmt.Errorf("reporter.Report: %w", err)
- }
-
- return nil
-}
-
// Upload is the implementation of UploadStore.Upload method.
func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) {
if tagID == 0 {
@@ -129,48 +108,3 @@ func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession
},
}, nil
}
-
-// NewSession is the implementation of UploadStore.NewSession method.
-func (db *DB) NewSession() (SessionInfo, error) {
- unlock := db.Lock(lockKeyNewSession)
- defer unlock()
-
- trx, done := db.storage.NewTransaction(context.Background())
- defer done()
-
- info, err := upload.NextTag(trx.IndexStore())
- if err != nil {
- return SessionInfo{}, err
- }
- return info, trx.Commit()
-}
-
-// Session is the implementation of the UploadStore.Session method.
-func (db *DB) Session(tagID uint64) (SessionInfo, error) {
- return upload.TagInfo(db.storage.IndexStore(), tagID)
-}
-
-// DeleteSession is the implementation of the UploadStore.DeleteSession method.
-func (db *DB) DeleteSession(tagID uint64) error {
- return db.storage.Run(context.Background(), func(s transaction.Store) error {
- return upload.DeleteTag(s.IndexStore(), tagID)
- })
-}
-
-// ListSessions is the implementation of the UploadStore.ListSessions method.
-func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error) {
- const maxPageSize = 1000
-
- limit = min(limit, maxPageSize)
-
- tags, err := upload.ListAllTags(db.storage.IndexStore())
- if err != nil {
- return nil, err
- }
-
- sort.Slice(tags, func(i, j int) bool {
- return tags[i].TagID < tags[j].TagID
- })
-
- return tags[min(offset, len(tags)):min(offset+limit, len(tags))], nil
-}
diff --git a/pkg/storer/uploadstore_js.go b/pkg/storer/uploadstore_js.go
new file mode 100644
index 00000000000..74fdd3a3919
--- /dev/null
+++ b/pkg/storer/uploadstore_js.go
@@ -0,0 +1,106 @@
+//go:build js
+// +build js
+
+package storer
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal"
+ pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/upload"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+// Upload is the implementation of UploadStore.Upload method.
+func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) {
+ if tagID == 0 {
+ return nil, fmt.Errorf("storer: tagID required")
+ }
+
+ var (
+ uploadPutter internal.PutterCloserWithReference
+ pinningPutter internal.PutterCloserWithReference
+ err error
+ )
+
+ err = db.storage.Run(ctx, func(s transaction.Store) error {
+ uploadPutter, err = upload.NewPutter(s.IndexStore(), tagID)
+ if err != nil {
+ return fmt.Errorf("upload.NewPutter: %w", err)
+ }
+
+ if pin {
+ pinningPutter, err = pinstore.NewCollection(s.IndexStore())
+ if err != nil {
+ return fmt.Errorf("pinstore.NewCollection: %w", err)
+ }
+ }
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &putterSession{
+ Putter: storage.PutterFunc(func(ctx context.Context, chunk swarm.Chunk) error {
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+ return errors.Join(
+ db.storage.Run(ctx, func(s transaction.Store) error {
+ return uploadPutter.Put(ctx, s, chunk)
+ }),
+ func() error {
+ if pinningPutter != nil {
+ return db.storage.Run(ctx, func(s transaction.Store) error {
+ return pinningPutter.Put(ctx, s, chunk)
+ })
+ }
+ return nil
+ }(),
+ )
+ }),
+ done: func(address swarm.Address) error {
+ defer db.events.Trigger(subscribePushEventKey)
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+
+ return errors.Join(
+ db.storage.Run(ctx, func(s transaction.Store) error {
+ return uploadPutter.Close(s.IndexStore(), address)
+ }),
+ func() error {
+ if pinningPutter != nil {
+ return db.storage.Run(ctx, func(s transaction.Store) error {
+ pinErr := pinningPutter.Close(s.IndexStore(), address)
+ if errors.Is(pinErr, pinstore.ErrDuplicatePinCollection) {
+ pinErr = pinningPutter.Cleanup(db.storage)
+ }
+ return pinErr
+ })
+ }
+ return nil
+ }(),
+ )
+ },
+ cleanup: func() error {
+ defer db.events.Trigger(subscribePushEventKey)
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+ return errors.Join(
+ uploadPutter.Cleanup(db.storage),
+ func() error {
+ if pinningPutter != nil {
+ return pinningPutter.Cleanup(db.storage)
+ }
+ return nil
+ }(),
+ )
+ },
+ }, nil
+}
diff --git a/pkg/storer/uploadstore_shared.go b/pkg/storer/uploadstore_shared.go
new file mode 100644
index 00000000000..b9ed9ae6767
--- /dev/null
+++ b/pkg/storer/uploadstore_shared.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storer
+
+import (
+ "context"
+ "fmt"
+ "sort"
+
+ storage "github.com/ethersphere/bee/v2/pkg/storage"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
+ "github.com/ethersphere/bee/v2/pkg/storer/internal/upload"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+)
+
+const uploadsLock = "pin-upload-store"
+
+// Report implements the storage.PushReporter by wrapping the internal reporter
+// with a transaction.
+func (db *DB) Report(ctx context.Context, chunk swarm.Chunk, state storage.ChunkState) error {
+
+ unlock := db.Lock(uploadsLock)
+ defer unlock()
+
+ err := db.storage.Run(ctx, func(s transaction.Store) error {
+ return upload.Report(ctx, s, chunk, state)
+ })
+ if err != nil {
+ return fmt.Errorf("reporter.Report: %w", err)
+ }
+
+ return nil
+}
+
+// NewSession is the implementation of UploadStore.NewSession method.
+func (db *DB) NewSession() (SessionInfo, error) {
+ unlock := db.Lock(lockKeyNewSession)
+ defer unlock()
+
+ trx, done := db.storage.NewTransaction(context.Background())
+ defer done()
+
+ info, err := upload.NextTag(trx.IndexStore())
+ if err != nil {
+ return SessionInfo{}, err
+ }
+ return info, trx.Commit()
+}
+
+// Session is the implementation of the UploadStore.Session method.
+func (db *DB) Session(tagID uint64) (SessionInfo, error) {
+ return upload.TagInfo(db.storage.IndexStore(), tagID)
+}
+
+// DeleteSession is the implementation of the UploadStore.DeleteSession method.
+func (db *DB) DeleteSession(tagID uint64) error {
+ return db.storage.Run(context.Background(), func(s transaction.Store) error {
+ return upload.DeleteTag(s.IndexStore(), tagID)
+ })
+}
+
+// ListSessions is the implementation of the UploadStore.ListSessions method.
+func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error) {
+ const maxPageSize = 1000
+
+ limit = min(limit, maxPageSize)
+
+ tags, err := upload.ListAllTags(db.storage.IndexStore())
+ if err != nil {
+ return nil, err
+ }
+
+ sort.Slice(tags, func(i, j int) bool {
+ return tags[i].TagID < tags[j].TagID
+ })
+
+ return tags[min(offset, len(tags)):min(offset+limit, len(tags))], nil
+}
diff --git a/pkg/storer/validate.go b/pkg/storer/validate.go
index 0220c4fb899..8ea45338cff 100644
--- a/pkg/storer/validate.go
+++ b/pkg/storer/validate.go
@@ -22,6 +22,8 @@ import (
"github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstore"
pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
"github.com/ethersphere/bee/v2/pkg/swarm"
+
+ universalFs "github.com/ethersphere/bee/v2/pkg/fs"
)
// Validate ensures that all retrievalIndex chunks are correctly stored in sharky.
@@ -228,7 +230,7 @@ func ValidatePinCollectionChunks(ctx context.Context, basePath, pin, location st
location = path.Join(fileLoc, fileName)
- f, err := os.OpenFile(location, os.O_CREATE|os.O_WRONLY, 0644)
+ f, err := universalFs.OpenFile(location, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("open output file for writing: %w", err)
}
diff --git a/pkg/topology/kademlia/kademlia.go b/pkg/topology/kademlia/kademlia.go
index 78d5feb0c40..3a1ee8d8b42 100644
--- a/pkg/topology/kademlia/kademlia.go
+++ b/pkg/topology/kademlia/kademlia.go
@@ -1,17 +1,12 @@
-// Copyright 2020 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package kademlia
import (
"context"
- random "crypto/rand"
- "encoding/json"
"errors"
"fmt"
- "math/big"
- "math/rand"
"path/filepath"
"sync"
"time"
@@ -29,150 +24,8 @@ import (
"github.com/ethersphere/bee/v2/pkg/topology/pslice"
"github.com/ethersphere/bee/v2/pkg/util/ioutil"
ma "github.com/multiformats/go-multiaddr"
- "golang.org/x/sync/errgroup"
)
-// loggerName is the tree path name of the logger for this package.
-const loggerName = "kademlia"
-
-const (
- maxConnAttempts = 1 // when there is maxConnAttempts failed connect calls for a given peer it is considered non-connectable
- maxBootNodeAttempts = 3 // how many attempts to dial to boot-nodes before giving up
- maxNeighborAttempts = 3 // how many attempts to dial to boot-nodes before giving up
-
- addPeerBatchSize = 500
-
- // To avoid context.Timeout errors during network failure, the value of
- // the peerConnectionAttemptTimeout constant must be equal to or greater
- // than 5 seconds (empirically verified).
- peerConnectionAttemptTimeout = 15 * time.Second // timeout for establishing a new connection with peer.
-)
-
-// Default option values
-const (
- defaultBitSuffixLength = 4 // the number of bits used to create pseudo addresses for balancing, 2^4, 16 addresses
- defaultLowWaterMark = 3 // the number of peers in consecutive deepest bins that constitute as nearest neighbours
- defaultSaturationPeers = 8
- defaultOverSaturationPeers = 18
- defaultBootNodeOverSaturationPeers = 20
- defaultShortRetry = 30 * time.Second
- defaultTimeToRetry = 2 * defaultShortRetry
- defaultPruneWakeup = 5 * time.Minute
- defaultBroadcastBinSize = 2
-)
-
-var (
- errOverlayMismatch = errors.New("overlay mismatch")
- errPruneEntry = errors.New("prune entry")
- errEmptyBin = errors.New("empty bin")
- errAnnounceLightNode = errors.New("announcing light node")
-)
-
-type (
- binSaturationFunc func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) bool
- sanctionedPeerFunc func(peer swarm.Address) bool
- pruneFunc func(depth uint8)
- pruneCountFunc func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) (int, int)
- staticPeerFunc func(peer swarm.Address) bool
- peerExcludeFunc func(peer swarm.Address) bool
- excludeFunc func(...im.ExcludeOp) peerExcludeFunc
-)
-
-var noopSanctionedPeerFn = func(_ swarm.Address) bool { return false }
-
-// Options for injecting services to Kademlia.
-type Options struct {
- SaturationFunc binSaturationFunc
- PruneCountFunc pruneCountFunc
- Bootnodes []ma.Multiaddr
- BootnodeMode bool
- PruneFunc pruneFunc
- StaticNodes []swarm.Address
- ExcludeFunc excludeFunc
- DataDir string
-
- BitSuffixLength *int
- TimeToRetry *time.Duration
- ShortRetry *time.Duration
- PruneWakeup *time.Duration
- SaturationPeers *int
- OverSaturationPeers *int
- BootnodeOverSaturationPeers *int
- BroadcastBinSize *int
- LowWaterMark *int
-}
-
-// kadOptions are made from Options with default values set
-type kadOptions struct {
- SaturationFunc binSaturationFunc
- Bootnodes []ma.Multiaddr
- BootnodeMode bool
- PruneCountFunc pruneCountFunc
- PruneFunc pruneFunc
- StaticNodes []swarm.Address
- ExcludeFunc excludeFunc
-
- TimeToRetry time.Duration
- ShortRetry time.Duration
- PruneWakeup time.Duration
- BitSuffixLength int // additional depth of common prefix for bin
- SaturationPeers int
- OverSaturationPeers int
- BootnodeOverSaturationPeers int
- BroadcastBinSize int
- LowWaterMark int
-}
-
-func newKadOptions(o Options) kadOptions {
- ko := kadOptions{
- // copy values
- SaturationFunc: o.SaturationFunc,
- Bootnodes: o.Bootnodes,
- BootnodeMode: o.BootnodeMode,
- PruneFunc: o.PruneFunc,
- StaticNodes: o.StaticNodes,
- ExcludeFunc: o.ExcludeFunc,
- // copy or use default
- TimeToRetry: defaultValDuration(o.TimeToRetry, defaultTimeToRetry),
- ShortRetry: defaultValDuration(o.ShortRetry, defaultShortRetry),
- PruneWakeup: defaultValDuration(o.PruneWakeup, defaultPruneWakeup),
- BitSuffixLength: defaultValInt(o.BitSuffixLength, defaultBitSuffixLength),
- SaturationPeers: defaultValInt(o.SaturationPeers, defaultSaturationPeers),
- OverSaturationPeers: defaultValInt(o.OverSaturationPeers, defaultOverSaturationPeers),
- BootnodeOverSaturationPeers: defaultValInt(o.BootnodeOverSaturationPeers, defaultBootNodeOverSaturationPeers),
- BroadcastBinSize: defaultValInt(o.BroadcastBinSize, defaultBroadcastBinSize),
- LowWaterMark: defaultValInt(o.LowWaterMark, defaultLowWaterMark),
- }
-
- if ko.SaturationFunc == nil {
- ko.SaturationFunc = makeSaturationFunc(ko)
- }
-
- return ko
-}
-
-func defaultValInt(v *int, d int) int {
- if v == nil {
- return d
- }
- return *v
-}
-
-func defaultValDuration(v *time.Duration, d time.Duration) time.Duration {
- if v == nil {
- return d
- }
- return *v
-}
-
-func makeSaturationFunc(o kadOptions) binSaturationFunc {
- os := o.OverSaturationPeers
- if o.BootnodeMode {
- os = o.BootnodeOverSaturationPeers
- }
- return binSaturated(os, isStaticPeer(o.StaticNodes))
-}
-
// Kad is the Swarm forwarding kademlia implementation.
type Kad struct {
opt kadOptions
@@ -285,11 +138,6 @@ func New(
return k, nil
}
-type peerConnInfo struct {
- po uint8
- addr swarm.Address
-}
-
// connectBalanced attempts to connect to the balanced peers first.
func (k *Kad) connectBalanced(wg *sync.WaitGroup, peerConnChan chan<- *peerConnInfo) {
skipPeers := func(peer swarm.Address) bool {
@@ -503,14 +351,6 @@ func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup,
}
}
-// notifyManageLoop notifies kademlia manage loop.
-func (k *Kad) notifyManageLoop() {
- select {
- case k.manageC <- struct{}{}:
- default:
- }
-}
-
// manage is a forever loop that manages the connection to new peers
// once they get added or once others leave.
func (k *Kad) manage() {
@@ -669,81 +509,6 @@ func (k *Kad) manage() {
}
}
-// pruneOversaturatedBins disconnects out of depth peers from oversaturated bins
-// while maintaining the balance of the bin and favoring healthy and reachable peers.
-func (k *Kad) pruneOversaturatedBins(depth uint8) {
- for i := range k.commonBinPrefixes {
-
- if i >= int(depth) {
- return
- }
-
- // skip to next bin if prune count is zero or fewer
- oldCount, pruneCount := k.opt.PruneCountFunc(uint8(i), k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false)))
- if pruneCount <= 0 {
- continue
- }
-
- for j := 0; j < len(k.commonBinPrefixes[i]); j++ {
-
- // skip to next bin if prune count is zero or fewer
- _, pruneCount := k.opt.PruneCountFunc(uint8(i), k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false)))
- if pruneCount <= 0 {
- break
- }
-
- binPeers := k.connectedPeers.BinPeers(uint8(i))
- peers := k.balancedSlotPeers(k.commonBinPrefixes[i][j], binPeers, i)
- if len(peers) <= 1 {
- continue
- }
-
- disconnectPeer := swarm.ZeroAddress
- unreachablePeer := swarm.ZeroAddress
- for _, peer := range peers {
- if ss := k.collector.Inspect(peer); ss != nil {
- if !ss.Healthy {
- disconnectPeer = peer
- break
- }
- if ss.Reachability != p2p.ReachabilityStatusPublic {
- unreachablePeer = peer
- }
- }
- }
-
- if disconnectPeer.IsZero() {
- if unreachablePeer.IsZero() {
- disconnectPeer = peers[rand.Intn(len(peers))]
- } else {
- disconnectPeer = unreachablePeer // pick unreachable peer
- }
- }
-
- err := k.p2p.Disconnect(disconnectPeer, "pruned from oversaturated bin")
- if err != nil {
- k.logger.Debug("prune disconnect failed", "error", err)
- }
- }
-
- newCount, _ := k.opt.PruneCountFunc(uint8(i), k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false)))
-
- k.logger.Debug("pruning", "bin", i, "oldBinSize", oldCount, "newBinSize", newCount)
- }
-}
-
-func (k *Kad) balancedSlotPeers(pseudoAddr swarm.Address, peers []swarm.Address, po int) []swarm.Address {
- var ret []swarm.Address
-
- for _, peer := range peers {
- if int(swarm.ExtendedProximity(peer.Bytes(), pseudoAddr.Bytes())) >= po+k.opt.BitSuffixLength+1 {
- ret = append(ret, peer)
- }
- }
-
- return ret
-}
-
func (k *Kad) Start(ctx context.Context) error {
// always discover bootnodes on startup to exclude them from protocol requests
k.connectBootNodes(ctx)
@@ -789,24 +554,6 @@ func (k *Kad) Start(ctx context.Context) error {
return nil
}
-func (k *Kad) previouslyConnected() []swarm.Address {
- loggerV1 := k.logger.V(1).Register()
-
- now := time.Now()
- ss := k.collector.Snapshot(now)
- loggerV1.Debug("metrics snapshot taken", "elapsed", time.Since(now))
-
- var peers []swarm.Address
-
- for addr, p := range ss {
- if p.ConnectionTotalDuration > 0 {
- peers = append(peers, swarm.NewAddress([]byte(addr)))
- }
- }
-
- return peers
-}
-
func (k *Kad) connectBootNodes(ctx context.Context) {
loggerV1 := k.logger.V(1).Register()
@@ -860,108 +607,6 @@ func (k *Kad) connectBootNodes(ctx context.Context) {
}
}
-// binSaturated indicates whether a certain bin is saturated or not.
-// when a bin is not saturated it means we would like to proactively
-// initiate connections to other peers in the bin.
-func binSaturated(oversaturationAmount int, staticNode staticPeerFunc) binSaturationFunc {
- return func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) bool {
- size := 0
- _ = connected.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
- if po == bin && !exclude(addr) && !staticNode(addr) {
- size++
- }
- return false, false, nil
- })
-
- return size >= oversaturationAmount
- }
-}
-
-// binPruneCount counts how many peers should be pruned from a bin.
-func binPruneCount(oversaturationAmount int, staticNode staticPeerFunc) pruneCountFunc {
- return func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) (int, int) {
- size := 0
- _ = connected.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
- if po == bin && !exclude(addr) && !staticNode(addr) {
- size++
- }
- return false, false, nil
- })
-
- return size, size - oversaturationAmount
- }
-}
-
-// recalcDepth calculates, assigns the new depth, and returns if depth has changed
-func (k *Kad) recalcDepth() {
- k.depthMu.Lock()
- defer k.depthMu.Unlock()
-
- var (
- peers = k.connectedPeers
- exclude = k.opt.ExcludeFunc(im.Reachability(false))
- binCount = 0
- shallowestUnsaturated = uint8(0)
- depth uint8
- )
-
- // handle edge case separately
- if peers.Length() <= k.opt.LowWaterMark {
- k.depth = 0
- return
- }
-
- _ = peers.EachBinRev(func(addr swarm.Address, bin uint8) (bool, bool, error) {
- if exclude(addr) {
- return false, false, nil
- }
- if bin == shallowestUnsaturated {
- binCount++
- return false, false, nil
- }
- if bin > shallowestUnsaturated && binCount < k.opt.SaturationPeers {
- // this means we have less than quickSaturationPeers in the previous bin
- // therefore we can return assuming that bin is the unsaturated one.
- return true, false, nil
- }
- shallowestUnsaturated = bin
- binCount = 1
-
- return false, false, nil
- })
- depth = shallowestUnsaturated
-
- shallowestEmpty, noEmptyBins := peers.ShallowestEmpty()
- // if there are some empty bins and the shallowestEmpty is
- // smaller than the shallowestUnsaturated then set shallowest
- // unsaturated to the empty bin.
- if !noEmptyBins && shallowestEmpty < depth {
- depth = shallowestEmpty
- }
-
- var (
- peersCtr = uint(0)
- candidate = uint8(0)
- )
- _ = peers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
- if exclude(addr) {
- return false, false, nil
- }
- peersCtr++
- if peersCtr >= uint(k.opt.LowWaterMark) {
- candidate = po
- return true, false, nil
- }
- return false, false, nil
- })
-
- if depth > candidate {
- depth = candidate
- }
-
- k.depth = depth
-}
-
// connect connects to a peer and gossips its address to our connected peers,
// as well as sends the peers we are connected to the newly connected peer
func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma ma.Multiaddr) error {
@@ -1032,99 +677,6 @@ func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma ma.Multiaddr)
return k.Announce(ctx, peer, true)
}
-// Announce a newly connected peer to our connected peers, but also
-// notify the peer about our already connected peers
-func (k *Kad) Announce(ctx context.Context, peer swarm.Address, fullnode bool) error {
- var addrs []swarm.Address
-
- depth := k.neighborhoodDepth()
- isNeighbor := swarm.Proximity(peer.Bytes(), k.base.Bytes()) >= depth
-
-outer:
- for bin := uint8(0); bin < swarm.MaxBins; bin++ {
-
- var (
- connectedPeers []swarm.Address
- err error
- )
-
- if bin >= depth && isNeighbor {
- connectedPeers = k.binPeers(bin, false) // broadcast all neighborhood peers
- } else {
- connectedPeers, err = randomSubset(k.binPeers(bin, true), k.opt.BroadcastBinSize)
- if err != nil {
- return err
- }
- }
-
- for _, connectedPeer := range connectedPeers {
- if connectedPeer.Equal(peer) {
- continue
- }
-
- addrs = append(addrs, connectedPeer)
-
- if !fullnode {
- // dont gossip about lightnodes to others.
- continue
- }
- // if kademlia is closing, dont enqueue anymore broadcast requests
- select {
- case <-k.bgBroadcastCtx.Done():
- // we will not interfere with the announce operation by returning here
- continue
- case <-k.halt:
- break outer
- default:
- }
- go func(connectedPeer swarm.Address) {
- // Create a new deadline ctx to prevent goroutine pile up
- cCtx, cCancel := context.WithTimeout(k.bgBroadcastCtx, time.Minute)
- defer cCancel()
-
- if err := k.discovery.BroadcastPeers(cCtx, connectedPeer, peer); err != nil {
- k.logger.Debug("peer gossip failed", "new_peer_address", peer, "connected_peer_address", connectedPeer, "error", err)
- }
- }(connectedPeer)
- }
- }
-
- if len(addrs) == 0 {
- return nil
- }
-
- select {
- case <-k.halt:
- return nil
- default:
- }
-
- err := k.discovery.BroadcastPeers(ctx, peer, addrs...)
- if err != nil {
- k.logger.Error(err, "could not broadcast to peer", "peer_address", peer)
- _ = k.p2p.Disconnect(peer, "failed broadcasting to peer")
- }
-
- return err
-}
-
-// AnnounceTo announces a selected peer to another.
-func (k *Kad) AnnounceTo(ctx context.Context, addressee, peer swarm.Address, fullnode bool) error {
- if !fullnode {
- return errAnnounceLightNode
- }
-
- return k.discovery.BroadcastPeers(ctx, addressee, peer)
-}
-
-// AddPeers adds peers to the knownPeers list.
-// This does not guarantee that a connection will immediately
-// be made to the peer.
-func (k *Kad) AddPeers(addrs ...swarm.Address) {
- k.knownPeers.Add(addrs...)
- k.notifyManageLoop()
-}
-
func (k *Kad) Pick(peer p2p.Peer) bool {
k.metrics.PickCalls.Inc()
if k.bootnode || !peer.FullNode {
@@ -1142,29 +694,6 @@ func (k *Kad) Pick(peer p2p.Peer) bool {
return false
}
-func (k *Kad) binPeers(bin uint8, reachable bool) (peers []swarm.Address) {
- _ = k.EachConnectedPeerRev(func(p swarm.Address, po uint8) (bool, bool, error) {
- if po == bin {
- peers = append(peers, p)
- return false, false, nil
- }
-
- if po > bin {
- return true, false, nil
- }
-
- return false, true, nil
- }, topology.Select{Reachable: reachable})
-
- return
-}
-
-func isStaticPeer(staticNodes []swarm.Address) func(overlay swarm.Address) bool {
- return func(overlay swarm.Address) bool {
- return swarm.ContainsAddress(staticNodes, overlay)
- }
-}
-
// Connected is called when a peer has dialed in.
// If forceConnection is true `overSaturated` is ignored for non-bootnodes.
func (k *Kad) Connected(ctx context.Context, peer p2p.Peer, forceConnection bool) (err error) {
@@ -1195,22 +724,6 @@ func (k *Kad) Connected(ctx context.Context, peer p2p.Peer, forceConnection bool
return k.onConnected(ctx, address)
}
-func (k *Kad) onConnected(ctx context.Context, addr swarm.Address) error {
- if err := k.Announce(ctx, addr, true); err != nil {
- return err
- }
-
- k.knownPeers.Add(addr)
- k.connectedPeers.Add(addr)
- k.waitNext.Remove(addr)
- k.recalcDepth()
- k.notifyManageLoop()
- k.notifyPeerSig()
- k.detector.Record()
-
- return nil
-}
-
// Disconnected is called when peer disconnects.
func (k *Kad) Disconnected(peer p2p.Peer) {
k.logger.Debug("disconnected peer", "peer_address", peer.Address)
@@ -1228,125 +741,6 @@ func (k *Kad) Disconnected(peer p2p.Peer) {
k.notifyPeerSig()
}
-func (k *Kad) notifyPeerSig() {
- k.peerSigMtx.Lock()
- defer k.peerSigMtx.Unlock()
-
- for _, c := range k.peerSig {
- // Every peerSig channel has a buffer capacity of 1,
- // so every receiver will get the signal even if the
- // select statement has the default case to avoid blocking.
- select {
- case c <- struct{}{}:
- default:
- }
- }
-}
-
-func nClosePeerInSlice(peers []swarm.Address, addr swarm.Address, spf sanctionedPeerFunc, minPO uint8) (swarm.Address, bool) {
- for _, peer := range peers {
- if spf(peer) {
- continue
- }
-
- if swarm.ExtendedProximity(peer.Bytes(), addr.Bytes()) >= minPO {
- return peer, true
- }
- }
-
- return swarm.ZeroAddress, false
-}
-
-func (k *Kad) IsReachable() bool {
- return k.reachability == p2p.ReachabilityStatusPublic
-}
-
-// ClosestPeer returns the closest peer to a given address.
-func (k *Kad) ClosestPeer(addr swarm.Address, includeSelf bool, filter topology.Select, skipPeers ...swarm.Address) (swarm.Address, error) {
- if k.connectedPeers.Length() == 0 {
- return swarm.Address{}, topology.ErrNotFound
- }
-
- closest := swarm.ZeroAddress
-
- if includeSelf && k.reachability == p2p.ReachabilityStatusPublic {
- closest = k.base
- }
-
- prox := swarm.Proximity(k.base.Bytes(), addr.Bytes())
-
- // iterate starting from bin 0 to the maximum bin
- err := k.EachConnectedPeerRev(func(peer swarm.Address, bin uint8) (bool, bool, error) {
- if swarm.ContainsAddress(skipPeers, peer) {
- return false, false, nil
- }
-
- if bin > prox && !closest.IsZero() {
- return true, false, nil
- }
-
- if closest.IsZero() {
- closest = peer
- return false, false, nil
- }
-
- closer, err := peer.Closer(addr, closest)
- if closer {
- closest = peer
- }
- if err != nil {
- k.logger.Debug("closest peer", "peer", peer, "addr", addr, "error", err)
- }
- return false, false, nil
- }, filter)
- if err != nil {
- return swarm.Address{}, err
- }
-
- if closest.IsZero() { // no peers
- return swarm.Address{}, topology.ErrNotFound // only for light nodes
- }
-
- // check if self
- if closest.Equal(k.base) {
- return swarm.Address{}, topology.ErrWantSelf
- }
-
- return closest, nil
-}
-
-// EachConnectedPeer implements topology.PeerIterator interface.
-func (k *Kad) EachConnectedPeer(f topology.EachPeerFunc, filter topology.Select) error {
- excludeFunc := k.opt.ExcludeFunc(excludeFromIterator(filter)...)
- return k.connectedPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
- if excludeFunc(addr) {
- return false, false, nil
- }
- return f(addr, po)
- })
-}
-
-// EachConnectedPeerRev implements topology.PeerIterator interface.
-func (k *Kad) EachConnectedPeerRev(f topology.EachPeerFunc, filter topology.Select) error {
- excludeFunc := k.opt.ExcludeFunc(excludeFromIterator(filter)...)
- return k.connectedPeers.EachBinRev(func(addr swarm.Address, po uint8) (bool, bool, error) {
- if excludeFunc(addr) {
- return false, false, nil
- }
- return f(addr, po)
- })
-}
-
-// Reachable sets the peer reachability status.
-func (k *Kad) Reachable(addr swarm.Address, status p2p.ReachabilityStatus) {
- k.collector.Record(addr, im.PeerReachability(status))
- k.logger.Debug("reachability of peer updated", "peer_address", addr, "reachability", status)
- if status == p2p.ReachabilityStatusPublic {
- k.recalcDepth()
- k.notifyManageLoop()
- }
-}
-
// UpdateReachability updates node reachability status.
// The status will be updated only once. Updates to status
// p2p.ReachabilityStatusUnknown are ignored.
@@ -1359,63 +753,6 @@ func (k *Kad) UpdateReachability(status p2p.ReachabilityStatus) {
k.metrics.ReachabilityStatus.WithLabelValues(status.String()).Set(0)
}
-// UpdateReachability updates node reachability status.
-// The status will be updated only once. Updates to status
-// p2p.ReachabilityStatusUnknown are ignored.
-func (k *Kad) UpdatePeerHealth(peer swarm.Address, health bool, dur time.Duration) {
- k.collector.Record(peer, im.PeerHealth(health), im.PeerLatency(dur))
-}
-
-// SubscribeTopologyChange returns the channel that signals when the connected peers
-// set and depth changes. Returned function is safe to be called multiple times.
-func (k *Kad) SubscribeTopologyChange() (c <-chan struct{}, unsubscribe func()) {
- channel := make(chan struct{}, 1)
- var closeOnce sync.Once
-
- k.peerSigMtx.Lock()
- defer k.peerSigMtx.Unlock()
-
- k.peerSig = append(k.peerSig, channel)
-
- unsubscribe = func() {
- k.peerSigMtx.Lock()
- defer k.peerSigMtx.Unlock()
-
- for i, c := range k.peerSig {
- if c == channel {
- k.peerSig = append(k.peerSig[:i], k.peerSig[i+1:]...)
- break
- }
- }
-
- closeOnce.Do(func() { close(channel) })
- }
-
- return channel, unsubscribe
-}
-
-func excludeFromIterator(filter topology.Select) []im.ExcludeOp {
- ops := make([]im.ExcludeOp, 0, 3)
- ops = append(ops, im.Bootnode())
-
- if filter.Reachable {
- ops = append(ops, im.Reachability(false))
- }
- if filter.Healthy {
- ops = append(ops, im.Health(false))
- }
-
- return ops
-}
-
-// NeighborhoodDepth returns the current Kademlia depth.
-func (k *Kad) neighborhoodDepth() uint8 {
- k.depthMu.RLock()
- defer k.depthMu.RUnlock()
-
- return k.storageRadius
-}
-
func (k *Kad) SetStorageRadius(d uint8) {
k.depthMu.Lock()
defer k.depthMu.Unlock()
@@ -1431,215 +768,3 @@ func (k *Kad) SetStorageRadius(d uint8) {
k.notifyManageLoop()
k.notifyPeerSig()
}
-
-func (k *Kad) Snapshot() *topology.KadParams {
- var infos []topology.BinInfo
- for i := int(swarm.MaxPO); i >= 0; i-- {
- infos = append(infos, topology.BinInfo{})
- }
-
- ss := k.collector.Snapshot(time.Now())
-
- _ = k.connectedPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
- infos[po].BinConnected++
- infos[po].ConnectedPeers = append(
- infos[po].ConnectedPeers,
- &topology.PeerInfo{
- Address: addr,
- Metrics: createMetricsSnapshotView(ss[addr.ByteString()]),
- },
- )
- return false, false, nil
- })
-
- // output (k.knownPeers ¬ k.connectedPeers) here to not repeat the peers we already have in the connected peers list
- _ = k.knownPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
- infos[po].BinPopulation++
-
- for _, v := range infos[po].ConnectedPeers {
- // peer already connected, don't show in the known peers list
- if v.Address.Equal(addr) {
- return false, false, nil
- }
- }
-
- infos[po].DisconnectedPeers = append(
- infos[po].DisconnectedPeers,
- &topology.PeerInfo{
- Address: addr,
- Metrics: createMetricsSnapshotView(ss[addr.ByteString()]),
- },
- )
- return false, false, nil
- })
-
- return &topology.KadParams{
- Base: k.base.String(),
- Population: k.knownPeers.Length(),
- Connected: k.connectedPeers.Length(),
- Timestamp: time.Now(),
- NNLowWatermark: k.opt.LowWaterMark,
- Depth: k.neighborhoodDepth(),
- Reachability: k.reachability.String(),
- NetworkAvailability: k.p2p.NetworkStatus().String(),
- Bins: topology.KadBins{
- Bin0: infos[0],
- Bin1: infos[1],
- Bin2: infos[2],
- Bin3: infos[3],
- Bin4: infos[4],
- Bin5: infos[5],
- Bin6: infos[6],
- Bin7: infos[7],
- Bin8: infos[8],
- Bin9: infos[9],
- Bin10: infos[10],
- Bin11: infos[11],
- Bin12: infos[12],
- Bin13: infos[13],
- Bin14: infos[14],
- Bin15: infos[15],
- Bin16: infos[16],
- Bin17: infos[17],
- Bin18: infos[18],
- Bin19: infos[19],
- Bin20: infos[20],
- Bin21: infos[21],
- Bin22: infos[22],
- Bin23: infos[23],
- Bin24: infos[24],
- Bin25: infos[25],
- Bin26: infos[26],
- Bin27: infos[27],
- Bin28: infos[28],
- Bin29: infos[29],
- Bin30: infos[30],
- Bin31: infos[31],
- },
- }
-}
-
-// String returns a string represenstation of Kademlia.
-func (k *Kad) String() string {
- j := k.Snapshot()
- b, err := json.MarshalIndent(j, "", " ")
- if err != nil {
- k.logger.Error(err, "could not marshal kademlia into json")
- return ""
- }
- return string(b)
-}
-
-// Halt stops outgoing connections from happening.
-// This is needed while we shut down, so that further topology
-// changes do not happen while we shut down.
-func (k *Kad) Halt() {
- close(k.halt)
-}
-
-// Close shuts down kademlia.
-func (k *Kad) Close() error {
- k.logger.Info("kademlia shutting down")
- close(k.quit)
- cc := make(chan struct{})
-
- k.bgBroadcastCancel()
-
- go func() {
- k.wg.Wait()
- close(cc)
- }()
-
- eg := errgroup.Group{}
-
- errTimeout := errors.New("timeout")
-
- eg.Go(func() error {
- select {
- case <-cc:
- case <-time.After(peerConnectionAttemptTimeout):
- return fmt.Errorf("kademlia shutting down with running goroutines: %w", errTimeout)
- }
- return nil
- })
-
- eg.Go(func() error {
- select {
- case <-k.done:
- case <-time.After(time.Second * 5):
- return fmt.Errorf("kademlia manage loop did not shut down properly: %w", errTimeout)
- }
- return nil
- })
-
- err := eg.Wait()
-
- k.logger.Info("kademlia persisting peer metrics")
- start := time.Now()
- if err := k.collector.Finalize(start, false); err != nil {
- k.logger.Debug("unable to finalize open sessions", "error", err)
- }
- k.logger.Debug("metrics collector finalized", "elapsed", time.Since(start))
-
- return err
-}
-
-func randomSubset(addrs []swarm.Address, count int) ([]swarm.Address, error) {
- if count >= len(addrs) {
- return addrs, nil
- }
-
- for i := 0; i < len(addrs); i++ {
- b, err := random.Int(random.Reader, big.NewInt(int64(len(addrs))))
- if err != nil {
- return nil, err
- }
- j := int(b.Int64())
- addrs[i], addrs[j] = addrs[j], addrs[i]
- }
-
- return addrs[:count], nil
-}
-
-func (k *Kad) randomPeer(bin uint8) (swarm.Address, error) {
- peers := k.connectedPeers.BinPeers(bin)
-
- for idx := 0; idx < len(peers); {
- // do not consider protected peers
- if k.staticPeer(peers[idx]) {
- peers = append(peers[:idx], peers[idx+1:]...)
- continue
- }
- idx++
- }
-
- if len(peers) == 0 {
- return swarm.ZeroAddress, errEmptyBin
- }
-
- rndIndx, err := random.Int(random.Reader, big.NewInt(int64(len(peers))))
- if err != nil {
- return swarm.ZeroAddress, err
- }
-
- return peers[rndIndx.Int64()], nil
-}
-
-// createMetricsSnapshotView creates new topology.MetricSnapshotView from the
-// given metrics.Snapshot and rounds all the timestamps and durations to its
-// nearest second, except for the peer latency, which is given in milliseconds.
-func createMetricsSnapshotView(ss *im.Snapshot) *topology.MetricSnapshotView {
- if ss == nil {
- return nil
- }
- return &topology.MetricSnapshotView{
- LastSeenTimestamp: time.Unix(0, ss.LastSeenTimestamp).Unix(),
- SessionConnectionRetry: ss.SessionConnectionRetry,
- ConnectionTotalDuration: ss.ConnectionTotalDuration.Truncate(time.Second).Seconds(),
- SessionConnectionDuration: ss.SessionConnectionDuration.Truncate(time.Second).Seconds(),
- SessionConnectionDirection: string(ss.SessionConnectionDirection),
- LatencyEWMA: ss.LatencyEWMA.Milliseconds(),
- Reachability: ss.Reachability.String(),
- Healthy: ss.Healthy,
- }
-}
diff --git a/pkg/topology/kademlia/kademlia_js.go b/pkg/topology/kademlia/kademlia_js.go
new file mode 100644
index 00000000000..c8761472afb
--- /dev/null
+++ b/pkg/topology/kademlia/kademlia_js.go
@@ -0,0 +1,735 @@
+//go:build js
+// +build js
+
+package kademlia
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/addressbook"
+ "github.com/ethersphere/bee/v2/pkg/discovery"
+ "github.com/ethersphere/bee/v2/pkg/log"
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/shed"
+ "github.com/ethersphere/bee/v2/pkg/stabilization"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ im "github.com/ethersphere/bee/v2/pkg/topology/kademlia/internal/metrics"
+ "github.com/ethersphere/bee/v2/pkg/topology/kademlia/internal/waitnext"
+ "github.com/ethersphere/bee/v2/pkg/topology/pslice"
+ "github.com/ethersphere/bee/v2/pkg/util/ioutil"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// Kad is the Swarm forwarding kademlia implementation.
+type Kad struct {
+ opt kadOptions
+ base swarm.Address // this node's overlay address
+ discovery discovery.Driver // the discovery driver
+ addressBook addressbook.Interface // address book to get underlays
+ p2p p2p.Service // p2p service to connect to nodes with
+ commonBinPrefixes [][]swarm.Address // list of address prefixes for each bin
+ connectedPeers *pslice.PSlice // a slice of peers sorted and indexed by po, indexes kept in `bins`
+ knownPeers *pslice.PSlice // both are po aware slice of addresses
+ depth uint8 // current neighborhood depth
+ storageRadius uint8 // storage area of responsibility
+ depthMu sync.RWMutex // protect depth changes
+ manageC chan struct{} // trigger the manage forever loop to connect to new peers
+ peerSig []chan struct{}
+ peerSigMtx sync.Mutex
+ logger log.Logger // logger
+ bootnode bool // indicates whether the node is working in bootnode mode
+ collector *im.Collector
+ quit chan struct{} // quit channel
+ halt chan struct{} // halt channel
+ done chan struct{} // signal that `manage` has quit
+ wg sync.WaitGroup
+ waitNext *waitnext.WaitNext
+ staticPeer staticPeerFunc
+ bgBroadcastCtx context.Context
+ bgBroadcastCancel context.CancelFunc
+ reachability p2p.ReachabilityStatus
+ detector *stabilization.Detector
+}
+
+// New returns a new Kademlia.
+func New(
+ base swarm.Address,
+ addressbook addressbook.Interface,
+ discovery discovery.Driver,
+ p2pSvc p2p.Service,
+ detector *stabilization.Detector,
+ logger log.Logger,
+ o Options,
+) (*Kad, error) {
+ var k *Kad
+
+ if o.DataDir == "" {
+ logger.Warning("using in-mem store for kademlia metrics, no state will be persisted")
+ } else {
+ o.DataDir = filepath.Join(o.DataDir, ioutil.DataPathKademlia)
+ }
+ sdb, err := shed.NewDB(o.DataDir, nil)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create metrics storage: %w", err)
+ }
+ imc, err := im.NewCollector(sdb)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create metrics collector: %w", err)
+ }
+
+ opt := newKadOptions(o)
+
+ k = &Kad{
+ opt: opt,
+ base: base,
+ discovery: discovery,
+ addressBook: addressbook,
+ p2p: p2pSvc,
+ commonBinPrefixes: make([][]swarm.Address, int(swarm.MaxBins)),
+ connectedPeers: pslice.New(int(swarm.MaxBins), base),
+ knownPeers: pslice.New(int(swarm.MaxBins), base),
+ manageC: make(chan struct{}, 1),
+ waitNext: waitnext.New(),
+ logger: logger.WithName(loggerName).Register(),
+ bootnode: opt.BootnodeMode,
+ collector: imc,
+ quit: make(chan struct{}),
+ halt: make(chan struct{}),
+ done: make(chan struct{}),
+ staticPeer: isStaticPeer(opt.StaticNodes),
+ storageRadius: swarm.MaxPO,
+ detector: detector,
+ }
+
+ if k.opt.PruneFunc == nil {
+ k.opt.PruneFunc = k.pruneOversaturatedBins
+ }
+
+ os := k.opt.OverSaturationPeers
+ if k.opt.BootnodeMode {
+ os = k.opt.BootnodeOverSaturationPeers
+ }
+ k.opt.PruneCountFunc = binPruneCount(os, isStaticPeer(k.opt.StaticNodes))
+
+ if k.opt.ExcludeFunc == nil {
+ k.opt.ExcludeFunc = func(f ...im.ExcludeOp) peerExcludeFunc {
+ return func(peer swarm.Address) bool {
+ return k.collector.Exclude(peer, f...)
+ }
+ }
+ }
+
+ if k.opt.BitSuffixLength > 0 {
+ k.commonBinPrefixes = generateCommonBinPrefixes(k.base, k.opt.BitSuffixLength)
+ }
+
+ k.bgBroadcastCtx, k.bgBroadcastCancel = context.WithCancel(context.Background())
+
+ return k, nil
+}
+
+// connectBalanced attempts to connect to the balanced peers first.
+func (k *Kad) connectBalanced(wg *sync.WaitGroup, peerConnChan chan<- *peerConnInfo) {
+ skipPeers := func(peer swarm.Address) bool {
+ if k.waitNext.Waiting(peer) {
+ return true
+ }
+ return false
+ }
+
+ depth := k.neighborhoodDepth()
+
+ for i := range k.commonBinPrefixes {
+
+ binPeersLength := k.knownPeers.BinSize(uint8(i))
+
+ // balancer should skip on bins where neighborhood connector would connect to peers anyway
+ // and there are not enough peers in known addresses to properly balance the bin
+ if i >= int(depth) && binPeersLength < len(k.commonBinPrefixes[i]) {
+ continue
+ }
+
+ binPeers := k.knownPeers.BinPeers(uint8(i))
+ binConnectedPeers := k.connectedPeers.BinPeers(uint8(i))
+
+ for j := range k.commonBinPrefixes[i] {
+ pseudoAddr := k.commonBinPrefixes[i][j]
+
+ // Connect to closest known peer which we haven't tried connecting to recently.
+
+ _, exists := nClosePeerInSlice(binConnectedPeers, pseudoAddr, noopSanctionedPeerFn, uint8(i+k.opt.BitSuffixLength+1))
+ if exists {
+ continue
+ }
+
+ closestKnownPeer, exists := nClosePeerInSlice(binPeers, pseudoAddr, skipPeers, uint8(i+k.opt.BitSuffixLength+1))
+ if !exists {
+ continue
+ }
+
+ if k.connectedPeers.Exists(closestKnownPeer) {
+ continue
+ }
+
+ blocklisted, err := k.p2p.Blocklisted(closestKnownPeer)
+ if err != nil {
+ k.logger.Warning("peer blocklist check failed", "error", err)
+ }
+ if blocklisted {
+ continue
+ }
+
+ wg.Add(1)
+ select {
+ case peerConnChan <- &peerConnInfo{
+ po: swarm.Proximity(k.base.Bytes(), closestKnownPeer.Bytes()),
+ addr: closestKnownPeer,
+ }:
+ case <-k.quit:
+ wg.Done()
+ return
+ }
+ }
+ }
+}
+
+// connectNeighbours attempts to connect to the neighbours
+// which were not considered by the connectBalanced method.
+func (k *Kad) connectNeighbours(wg *sync.WaitGroup, peerConnChan chan<- *peerConnInfo) {
+ sent := 0
+ var currentPo uint8 = 0
+
+ _ = k.knownPeers.EachBinRev(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ // out of depth, skip bin
+ if po < k.neighborhoodDepth() {
+ return false, true, nil
+ }
+
+ if po != currentPo {
+ currentPo = po
+ sent = 0
+ }
+
+ if k.connectedPeers.Exists(addr) {
+ return false, false, nil
+ }
+
+ blocklisted, err := k.p2p.Blocklisted(addr)
+ if err != nil {
+ k.logger.Warning("peer blocklist check failed", "error", err)
+ }
+ if blocklisted {
+ return false, false, nil
+ }
+
+ if k.waitNext.Waiting(addr) {
+ return false, false, nil
+ }
+
+ wg.Add(1)
+ select {
+ case peerConnChan <- &peerConnInfo{po: po, addr: addr}:
+ case <-k.quit:
+ wg.Done()
+ return true, false, nil
+ }
+
+ sent++
+
+ // We want 'sent' equal to 'saturationPeers'
+ // in order to skip to the next bin and speed up the topology build.
+ return false, sent == k.opt.SaturationPeers, nil
+ })
+}
+
+// connectionAttemptsHandler handles the connection attempts
+// to peers sent by the producers to the peerConnChan.
+func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup, neighbourhoodChan, balanceChan <-chan *peerConnInfo) {
+ connect := func(peer *peerConnInfo) {
+ bzzAddr, err := k.addressBook.Get(peer.addr)
+ switch {
+ case errors.Is(err, addressbook.ErrNotFound):
+ k.logger.Debug("empty address book entry for peer", "peer_address", peer.addr)
+ k.knownPeers.Remove(peer.addr)
+ return
+ case err != nil:
+ k.logger.Debug("failed to get address book entry for peer", "peer_address", peer.addr, "error", err)
+ return
+ }
+
+ remove := func(peer *peerConnInfo) {
+ k.waitNext.Remove(peer.addr)
+ k.knownPeers.Remove(peer.addr)
+ if err := k.addressBook.Remove(peer.addr); err != nil {
+ k.logger.Debug("could not remove peer from addressbook", "peer_address", peer.addr)
+ }
+ }
+
+ switch err = k.connect(ctx, peer.addr, bzzAddr.Underlay); {
+ case errors.Is(err, p2p.ErrNetworkUnavailable):
+ k.logger.Debug("network unavailable when reaching peer", "peer_overlay_address", peer.addr, "peer_underlay_address", bzzAddr.Underlay)
+ return
+ case errors.Is(err, errPruneEntry):
+ k.logger.Debug("dial to light node", "peer_overlay_address", peer.addr, "peer_underlay_address", bzzAddr.Underlay)
+ remove(peer)
+ return
+ case errors.Is(err, errOverlayMismatch):
+ k.logger.Debug("overlay mismatch has occurred", "peer_overlay_address", peer.addr, "peer_underlay_address", bzzAddr.Underlay)
+ remove(peer)
+ return
+ case errors.Is(err, p2p.ErrPeerBlocklisted):
+ k.logger.Debug("peer still in blocklist", "peer_address", bzzAddr)
+ return
+ case err != nil:
+ k.logger.Debug("peer not reachable from kademlia", "peer_address", bzzAddr, "error", err)
+ return
+ }
+
+ k.waitNext.Set(peer.addr, time.Now().Add(k.opt.ShortRetry), 0)
+
+ k.connectedPeers.Add(peer.addr)
+
+ k.collector.Record(peer.addr, im.PeerLogIn(time.Now(), im.PeerConnectionDirectionOutbound))
+
+ k.recalcDepth()
+
+ k.logger.Debug("connected to peer", "peer_address", peer.addr, "proximity_order", peer.po)
+ k.notifyManageLoop()
+ k.notifyPeerSig()
+ }
+
+ var (
+ // The inProgress helps to avoid making a connection
+ // to a peer who has the connection already in progress.
+ inProgress = make(map[string]bool)
+ inProgressMu sync.Mutex
+ )
+ connAttempt := func(peerConnChan <-chan *peerConnInfo) {
+ for {
+ select {
+ case <-k.quit:
+ return
+ case peer := <-peerConnChan:
+ addr := peer.addr.String()
+
+ if k.waitNext.Waiting(peer.addr) {
+ wg.Done()
+ continue
+ }
+
+ inProgressMu.Lock()
+ if !inProgress[addr] {
+ inProgress[addr] = true
+ inProgressMu.Unlock()
+ connect(peer)
+ inProgressMu.Lock()
+ delete(inProgress, addr)
+ }
+ inProgressMu.Unlock()
+ wg.Done()
+ }
+ }
+ }
+
+ for range 32 {
+ go connAttempt(balanceChan)
+ go connAttempt(neighbourhoodChan)
+ }
+}
+
+// manage is a forever loop that manages the connection to new peers
+// once they get added or once others leave.
+func (k *Kad) manage() {
+ loggerV1 := k.logger.V(1).Register()
+
+ defer k.wg.Done()
+ defer close(k.done)
+ defer k.logger.Debug("kademlia manage loop exited")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ <-k.quit
+ cancel()
+ }()
+
+ // The wg makes sure that we wait for all the connection attempts,
+ // spun up by goroutines, to finish before we try the boot-nodes.
+ var wg sync.WaitGroup
+ neighbourhoodChan := make(chan *peerConnInfo)
+ balanceChan := make(chan *peerConnInfo)
+ go k.connectionAttemptsHandler(ctx, &wg, neighbourhoodChan, balanceChan)
+
+ k.wg.Add(1)
+ go func() {
+ defer k.wg.Done()
+ for {
+ select {
+ case <-k.halt:
+ return
+ case <-k.quit:
+ return
+ case <-time.After(k.opt.PruneWakeup):
+ k.opt.PruneFunc(k.neighborhoodDepth())
+ }
+ }
+ }()
+
+ k.wg.Add(1)
+ go func() {
+ defer k.wg.Done()
+ for {
+ select {
+ case <-k.halt:
+ return
+ case <-k.quit:
+ return
+ case <-time.After(5 * time.Minute):
+ start := time.Now()
+ loggerV1.Debug("starting to flush metrics", "start_time", start)
+ if err := k.collector.Flush(); err != nil {
+ k.logger.Debug("unable to flush metrics counters to the persistent store", "error", err)
+ } else {
+ loggerV1.Debug("flush metrics done", "elapsed", time.Since(start))
+ }
+ }
+ }
+ }()
+
+ // tell each neighbor about other neighbors periodically
+ k.wg.Add(1)
+ go func() {
+ defer k.wg.Done()
+ for {
+ select {
+ case <-k.halt:
+ return
+ case <-k.quit:
+ return
+ case <-time.After(15 * time.Minute):
+ var neighbors []swarm.Address
+ _ = k.connectedPeers.EachBin(func(addr swarm.Address, bin uint8) (stop bool, jumpToNext bool, err error) {
+ if bin < k.neighborhoodDepth() {
+ return true, false, nil
+ }
+ neighbors = append(neighbors, addr)
+ return false, false, nil
+ })
+ for i, peer := range neighbors {
+ if err := k.discovery.BroadcastPeers(ctx, peer, append(neighbors[:i], neighbors[i+1:]...)...); err != nil {
+ k.logger.Debug("broadcast neighborhood failure", "peer_address", peer, "error", err)
+ }
+ }
+ }
+ }
+ }()
+
+ for {
+ select {
+ case <-k.quit:
+ return
+ case <-time.After(15 * time.Second):
+ k.notifyManageLoop()
+ case <-k.manageC:
+ start := time.Now()
+
+ select {
+ case <-k.halt:
+ // halt stops dial-outs while shutting down
+ return
+ case <-k.quit:
+ return
+ default:
+ }
+
+ if k.bootnode {
+
+ continue
+ }
+
+ oldDepth := k.neighborhoodDepth()
+ k.connectBalanced(&wg, balanceChan)
+ k.connectNeighbours(&wg, neighbourhoodChan)
+ wg.Wait()
+
+ depth := k.neighborhoodDepth()
+
+ loggerV1.Debug("connector finished", "elapsed", time.Since(start), "old_depth", oldDepth, "new_depth", depth)
+
+ if k.connectedPeers.Length() == 0 {
+ select {
+ case <-k.halt:
+ continue
+ default:
+ }
+ k.logger.Debug("kademlia: no connected peers, trying bootnodes")
+ k.connectBootNodes(ctx)
+ } else {
+ rs := make(map[string]float64)
+ ss := k.collector.Snapshot(time.Now())
+
+ if err := k.connectedPeers.EachBin(func(addr swarm.Address, _ uint8) (bool, bool, error) {
+ if ss, ok := ss[addr.ByteString()]; ok {
+ rs[ss.Reachability.String()]++
+ }
+ return false, false, nil
+ }); err != nil {
+ k.logger.Error(err, "unable to set peers reachability status")
+ }
+
+ }
+ }
+ }
+}
+
+func (k *Kad) Start(ctx context.Context) error {
+ // always discover bootnodes on startup to exclude them from protocol requests
+ k.connectBootNodes(ctx)
+
+ k.wg.Add(1)
+ go k.manage()
+
+ k.AddPeers(k.previouslyConnected()...)
+
+ go func() {
+ select {
+ case <-k.halt:
+ return
+ case <-k.quit:
+ return
+ default:
+ }
+ var (
+ addresses []swarm.Address
+ )
+
+ err := k.addressBook.IterateOverlays(func(addr swarm.Address) (stop bool, err error) {
+ addresses = append(addresses, addr)
+ if len(addresses) == addPeerBatchSize {
+ k.AddPeers(addresses...)
+ addresses = nil
+ }
+ return false, nil
+ })
+ if err != nil {
+ k.logger.Error(err, "addressbook iterate overlays failed")
+ return
+ }
+ k.AddPeers(addresses...)
+ }()
+
+ // trigger the first manage loop immediately so that
+ // we can start connecting to the bootnode quickly
+ k.notifyManageLoop()
+
+ return nil
+}
+
+func (k *Kad) connectBootNodes(ctx context.Context) {
+ loggerV1 := k.logger.V(1).Register()
+
+ var attempts, connected int
+ totalAttempts := maxBootNodeAttempts * len(k.opt.Bootnodes)
+
+ ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
+ defer cancel()
+
+ for _, addr := range k.opt.Bootnodes {
+ if attempts >= totalAttempts || connected >= 3 {
+ return
+ }
+
+ if _, err := p2p.Discover(ctx, addr, func(addr ma.Multiaddr) (stop bool, err error) {
+ loggerV1.Debug("connecting to bootnode", "bootnode_address", addr)
+ if attempts >= maxBootNodeAttempts {
+ return true, nil
+ }
+ bzzAddress, err := k.p2p.Connect(ctx, addr)
+
+ attempts++
+
+ if err != nil {
+ if !errors.Is(err, p2p.ErrAlreadyConnected) {
+ k.logger.Debug("connect to bootnode failed", "bootnode_address", addr, "error", err)
+ k.logger.Warning("connect to bootnode failed", "bootnode_address", addr)
+ return false, err
+ }
+ k.logger.Debug("connect to bootnode failed", "bootnode_address", addr, "error", err)
+ return false, nil
+ }
+
+ if err := k.onConnected(ctx, bzzAddress.Overlay); err != nil {
+ return false, err
+ }
+
+ k.collector.Record(bzzAddress.Overlay, im.PeerLogIn(time.Now(), im.PeerConnectionDirectionOutbound), im.IsBootnode(true))
+ loggerV1.Debug("connected to bootnode", "bootnode_address", addr)
+ connected++
+
+ // connect to max 3 bootnodes
+ return connected >= 3, nil
+ }); err != nil && !errors.Is(err, context.Canceled) {
+ k.logger.Debug("discover to bootnode failed", "bootnode_address", addr, "error", err)
+ k.logger.Warning("discover to bootnode failed", "bootnode_address", addr)
+ return
+ }
+ }
+}
+
+// connect connects to a peer and gossips its address to our connected peers,
+// as well as sends the peers we are connected to the newly connected peer
+func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma ma.Multiaddr) error {
+ k.logger.Debug("attempting connect to peer", "peer_address", peer)
+
+ ctx, cancel := context.WithTimeout(ctx, peerConnectionAttemptTimeout)
+ defer cancel()
+
+ switch i, err := k.p2p.Connect(ctx, ma); {
+ case errors.Is(err, p2p.ErrNetworkUnavailable):
+ return err
+ case k.p2p.NetworkStatus() == p2p.NetworkStatusUnavailable:
+ return p2p.ErrNetworkUnavailable
+ case errors.Is(err, p2p.ErrDialLightNode):
+ return errPruneEntry
+ case errors.Is(err, p2p.ErrAlreadyConnected):
+ if !i.Overlay.Equal(peer) {
+ return errOverlayMismatch
+ }
+ return nil
+ case errors.Is(err, context.Canceled):
+ return err
+ case errors.Is(err, p2p.ErrPeerBlocklisted):
+ return err
+ case err != nil:
+ k.logger.Debug("could not connect to peer", "peer_address", peer, "error", err)
+
+ retryTime := time.Now().Add(k.opt.TimeToRetry)
+ var e *p2p.ConnectionBackoffError
+ failedAttempts := 0
+ if errors.As(err, &e) {
+ retryTime = e.TryAfter()
+ } else {
+ failedAttempts = k.waitNext.Attempts(peer)
+ failedAttempts++
+ }
+
+ k.collector.Record(peer, im.IncSessionConnectionRetry())
+
+ maxAttempts := maxConnAttempts
+ if swarm.Proximity(k.base.Bytes(), peer.Bytes()) >= k.neighborhoodDepth() {
+ maxAttempts = maxNeighborAttempts
+ }
+
+ if failedAttempts >= maxAttempts {
+ k.waitNext.Remove(peer)
+ k.knownPeers.Remove(peer)
+ if err := k.addressBook.Remove(peer); err != nil {
+ k.logger.Debug("could not remove peer from addressbook", "peer_address", peer)
+ }
+ k.logger.Debug("peer pruned from address book", "peer_address", peer)
+ } else {
+ k.waitNext.Set(peer, retryTime, failedAttempts)
+ }
+
+ return err
+ case !i.Overlay.Equal(peer):
+ _ = k.p2p.Disconnect(peer, errOverlayMismatch.Error())
+ _ = k.p2p.Disconnect(i.Overlay, errOverlayMismatch.Error())
+ return errOverlayMismatch
+ }
+
+ k.detector.Record()
+
+ return k.Announce(ctx, peer, true)
+}
+
+func (k *Kad) Pick(peer p2p.Peer) bool {
+ if k.bootnode || !peer.FullNode {
+ // shortcircuit for bootnode mode AND light node peers - always accept connections,
+ // at least until we find a better solution.
+ return true
+ }
+ po := swarm.Proximity(k.base.Bytes(), peer.Address.Bytes())
+ oversaturated := k.opt.SaturationFunc(po, k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false)))
+ // pick the peer if we are not oversaturated
+ if !oversaturated {
+ return true
+ }
+ return false
+}
+
+// Connected is called when a peer has dialed in.
+// If forceConnection is true `overSaturated` is ignored for non-bootnodes.
+func (k *Kad) Connected(ctx context.Context, peer p2p.Peer, forceConnection bool) (err error) {
+ defer func() {
+ if err == nil {
+ k.collector.Record(peer.Address, im.PeerLogIn(time.Now(), im.PeerConnectionDirectionInbound))
+ }
+ }()
+
+ address := peer.Address
+ po := swarm.Proximity(k.base.Bytes(), address.Bytes())
+
+ if overSaturated := k.opt.SaturationFunc(po, k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false))); overSaturated {
+ if k.bootnode {
+ randPeer, err := k.randomPeer(po)
+ if err != nil {
+ return fmt.Errorf("failed to get random peer to kick-out: %w", err)
+ }
+ _ = k.p2p.Disconnect(randPeer, "kicking out random peer to accommodate node")
+ return k.onConnected(ctx, address)
+ }
+ if !forceConnection {
+ return topology.ErrOversaturated
+ }
+ }
+
+ return k.onConnected(ctx, address)
+}
+
+// Disconnected is called when peer disconnects.
+func (k *Kad) Disconnected(peer p2p.Peer) {
+ k.logger.Debug("disconnected peer", "peer_address", peer.Address)
+
+ k.connectedPeers.Remove(peer.Address)
+
+ k.waitNext.SetTryAfter(peer.Address, time.Now().Add(k.opt.TimeToRetry))
+
+ k.collector.Record(peer.Address, im.PeerLogOut(time.Now()))
+
+ k.recalcDepth()
+
+ k.notifyManageLoop()
+ k.notifyPeerSig()
+}
+
+// UpdateReachability updates node reachability status.
+// The status will be updated only once. Updates to status
+// p2p.ReachabilityStatusUnknown are ignored.
+func (k *Kad) UpdateReachability(status p2p.ReachabilityStatus) {
+ if status == p2p.ReachabilityStatusUnknown {
+ return
+ }
+ k.logger.Debug("reachability updated", "reachability", status)
+ k.reachability = status
+}
+
+func (k *Kad) SetStorageRadius(d uint8) {
+ k.depthMu.Lock()
+ defer k.depthMu.Unlock()
+
+ if k.storageRadius == d {
+ return
+ }
+
+ k.storageRadius = d
+ k.logger.Debug("kademlia set storage radius", "radius", k.storageRadius)
+
+ k.notifyManageLoop()
+ k.notifyPeerSig()
+}
diff --git a/pkg/topology/kademlia/kademlia_shared.go b/pkg/topology/kademlia/kademlia_shared.go
new file mode 100644
index 00000000000..b03c4789bae
--- /dev/null
+++ b/pkg/topology/kademlia/kademlia_shared.go
@@ -0,0 +1,894 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package kademlia
+
+import (
+ "context"
+ random "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ im "github.com/ethersphere/bee/v2/pkg/topology/kademlia/internal/metrics"
+ "github.com/ethersphere/bee/v2/pkg/topology/pslice"
+ ma "github.com/multiformats/go-multiaddr"
+ "golang.org/x/sync/errgroup"
+)
+
+// loggerName is the tree path name of the logger for this package.
+const loggerName = "kademlia"
+
+const (
+ maxConnAttempts = 1 // when there is maxConnAttempts failed connect calls for a given peer it is considered non-connectable
+ maxBootNodeAttempts = 3 // how many attempts to dial to boot-nodes before giving up
+ maxNeighborAttempts = 3 // how many attempts to dial to boot-nodes before giving up
+
+ addPeerBatchSize = 500
+
+ // To avoid context.Timeout errors during network failure, the value of
+ // the peerConnectionAttemptTimeout constant must be equal to or greater
+ // than 5 seconds (empirically verified).
+ peerConnectionAttemptTimeout = 15 * time.Second // timeout for establishing a new connection with peer.
+)
+
+// Default option values
+const (
+ defaultBitSuffixLength = 4 // the number of bits used to create pseudo addresses for balancing, 2^4, 16 addresses
+ defaultLowWaterMark = 3 // the number of peers in consecutive deepest bins that constitute as nearest neighbours
+ defaultSaturationPeers = 8
+ defaultOverSaturationPeers = 18
+ defaultBootNodeOverSaturationPeers = 20
+ defaultShortRetry = 30 * time.Second
+ defaultTimeToRetry = 2 * defaultShortRetry
+ defaultPruneWakeup = 5 * time.Minute
+ defaultBroadcastBinSize = 2
+)
+
+var (
+ errOverlayMismatch = errors.New("overlay mismatch")
+ errPruneEntry = errors.New("prune entry")
+ errEmptyBin = errors.New("empty bin")
+ errAnnounceLightNode = errors.New("announcing light node")
+)
+
+type (
+ binSaturationFunc func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) bool
+ sanctionedPeerFunc func(peer swarm.Address) bool
+ pruneFunc func(depth uint8)
+ pruneCountFunc func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) (int, int)
+ staticPeerFunc func(peer swarm.Address) bool
+ peerExcludeFunc func(peer swarm.Address) bool
+ excludeFunc func(...im.ExcludeOp) peerExcludeFunc
+)
+
+var noopSanctionedPeerFn = func(_ swarm.Address) bool { return false }
+
+// Options for injecting services to Kademlia.
+type Options struct {
+ SaturationFunc binSaturationFunc
+ PruneCountFunc pruneCountFunc
+ Bootnodes []ma.Multiaddr
+ BootnodeMode bool
+ PruneFunc pruneFunc
+ StaticNodes []swarm.Address
+ ExcludeFunc excludeFunc
+ DataDir string
+
+ BitSuffixLength *int
+ TimeToRetry *time.Duration
+ ShortRetry *time.Duration
+ PruneWakeup *time.Duration
+ SaturationPeers *int
+ OverSaturationPeers *int
+ BootnodeOverSaturationPeers *int
+ BroadcastBinSize *int
+ LowWaterMark *int
+}
+
+// kadOptions are made from Options with default values set
+type kadOptions struct {
+ SaturationFunc binSaturationFunc
+ Bootnodes []ma.Multiaddr
+ BootnodeMode bool
+ PruneCountFunc pruneCountFunc
+ PruneFunc pruneFunc
+ StaticNodes []swarm.Address
+ ExcludeFunc excludeFunc
+
+ TimeToRetry time.Duration
+ ShortRetry time.Duration
+ PruneWakeup time.Duration
+ BitSuffixLength int // additional depth of common prefix for bin
+ SaturationPeers int
+ OverSaturationPeers int
+ BootnodeOverSaturationPeers int
+ BroadcastBinSize int
+ LowWaterMark int
+}
+
+func newKadOptions(o Options) kadOptions {
+ ko := kadOptions{
+ // copy values
+ SaturationFunc: o.SaturationFunc,
+ Bootnodes: o.Bootnodes,
+ BootnodeMode: o.BootnodeMode,
+ PruneFunc: o.PruneFunc,
+ StaticNodes: o.StaticNodes,
+ ExcludeFunc: o.ExcludeFunc,
+ // copy or use default
+ TimeToRetry: defaultValDuration(o.TimeToRetry, defaultTimeToRetry),
+ ShortRetry: defaultValDuration(o.ShortRetry, defaultShortRetry),
+ PruneWakeup: defaultValDuration(o.PruneWakeup, defaultPruneWakeup),
+ BitSuffixLength: defaultValInt(o.BitSuffixLength, defaultBitSuffixLength),
+ SaturationPeers: defaultValInt(o.SaturationPeers, defaultSaturationPeers),
+ OverSaturationPeers: defaultValInt(o.OverSaturationPeers, defaultOverSaturationPeers),
+ BootnodeOverSaturationPeers: defaultValInt(o.BootnodeOverSaturationPeers, defaultBootNodeOverSaturationPeers),
+ BroadcastBinSize: defaultValInt(o.BroadcastBinSize, defaultBroadcastBinSize),
+ LowWaterMark: defaultValInt(o.LowWaterMark, defaultLowWaterMark),
+ }
+
+ if ko.SaturationFunc == nil {
+ ko.SaturationFunc = makeSaturationFunc(ko)
+ }
+
+ return ko
+}
+
+func defaultValInt(v *int, d int) int {
+ if v == nil {
+ return d
+ }
+ return *v
+}
+
+func defaultValDuration(v *time.Duration, d time.Duration) time.Duration {
+ if v == nil {
+ return d
+ }
+ return *v
+}
+
+func makeSaturationFunc(o kadOptions) binSaturationFunc {
+ os := o.OverSaturationPeers
+ if o.BootnodeMode {
+ os = o.BootnodeOverSaturationPeers
+ }
+ return binSaturated(os, isStaticPeer(o.StaticNodes))
+}
+
+type peerConnInfo struct {
+ po uint8
+ addr swarm.Address
+}
+
+// notifyManageLoop notifies kademlia manage loop.
+func (k *Kad) notifyManageLoop() {
+ select {
+ case k.manageC <- struct{}{}:
+ default:
+ }
+}
+
+// pruneOversaturatedBins disconnects out of depth peers from oversaturated bins
+// while maintaining the balance of the bin and favoring healthy and reachable peers.
+func (k *Kad) pruneOversaturatedBins(depth uint8) {
+ for i := range k.commonBinPrefixes {
+
+ if i >= int(depth) {
+ return
+ }
+
+ // skip to next bin if prune count is zero or fewer
+ oldCount, pruneCount := k.opt.PruneCountFunc(uint8(i), k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false)))
+ if pruneCount <= 0 {
+ continue
+ }
+
+ for j := 0; j < len(k.commonBinPrefixes[i]); j++ {
+
+ // skip to next bin if prune count is zero or fewer
+ _, pruneCount := k.opt.PruneCountFunc(uint8(i), k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false)))
+ if pruneCount <= 0 {
+ break
+ }
+
+ binPeers := k.connectedPeers.BinPeers(uint8(i))
+ peers := k.balancedSlotPeers(k.commonBinPrefixes[i][j], binPeers, i)
+ if len(peers) <= 1 {
+ continue
+ }
+
+ disconnectPeer := swarm.ZeroAddress
+ unreachablePeer := swarm.ZeroAddress
+ for _, peer := range peers {
+ if ss := k.collector.Inspect(peer); ss != nil {
+ if !ss.Healthy {
+ disconnectPeer = peer
+ break
+ }
+ if ss.Reachability != p2p.ReachabilityStatusPublic {
+ unreachablePeer = peer
+ }
+ }
+ }
+
+ if disconnectPeer.IsZero() {
+ if unreachablePeer.IsZero() {
+ disconnectPeer = peers[rand.Intn(len(peers))]
+ } else {
+ disconnectPeer = unreachablePeer // pick unreachable peer
+ }
+ }
+
+ err := k.p2p.Disconnect(disconnectPeer, "pruned from oversaturated bin")
+ if err != nil {
+ k.logger.Debug("prune disconnect failed", "error", err)
+ }
+ }
+
+ newCount, _ := k.opt.PruneCountFunc(uint8(i), k.connectedPeers, k.opt.ExcludeFunc(im.Reachability(false)))
+
+ k.logger.Debug("pruning", "bin", i, "oldBinSize", oldCount, "newBinSize", newCount)
+ }
+}
+
+func (k *Kad) balancedSlotPeers(pseudoAddr swarm.Address, peers []swarm.Address, po int) []swarm.Address {
+ var ret []swarm.Address
+
+ for _, peer := range peers {
+ if int(swarm.ExtendedProximity(peer.Bytes(), pseudoAddr.Bytes())) >= po+k.opt.BitSuffixLength+1 {
+ ret = append(ret, peer)
+ }
+ }
+
+ return ret
+}
+
+func (k *Kad) previouslyConnected() []swarm.Address {
+ loggerV1 := k.logger.V(1).Register()
+
+ now := time.Now()
+ ss := k.collector.Snapshot(now)
+ loggerV1.Debug("metrics snapshot taken", "elapsed", time.Since(now))
+
+ var peers []swarm.Address
+
+ for addr, p := range ss {
+ if p.ConnectionTotalDuration > 0 {
+ peers = append(peers, swarm.NewAddress([]byte(addr)))
+ }
+ }
+
+ return peers
+}
+
+// binSaturated indicates whether a certain bin is saturated or not.
+// when a bin is not saturated it means we would like to proactively
+// initiate connections to other peers in the bin.
+func binSaturated(oversaturationAmount int, staticNode staticPeerFunc) binSaturationFunc {
+ return func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) bool {
+ size := 0
+ _ = connected.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ if po == bin && !exclude(addr) && !staticNode(addr) {
+ size++
+ }
+ return false, false, nil
+ })
+
+ return size >= oversaturationAmount
+ }
+}
+
+// binPruneCount counts how many peers should be pruned from a bin.
+func binPruneCount(oversaturationAmount int, staticNode staticPeerFunc) pruneCountFunc {
+ return func(bin uint8, connected *pslice.PSlice, exclude peerExcludeFunc) (int, int) {
+ size := 0
+ _ = connected.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ if po == bin && !exclude(addr) && !staticNode(addr) {
+ size++
+ }
+ return false, false, nil
+ })
+
+ return size, size - oversaturationAmount
+ }
+}
+
+// recalcDepth calculates, assigns the new depth, and returns if depth has changed
+func (k *Kad) recalcDepth() {
+ k.depthMu.Lock()
+ defer k.depthMu.Unlock()
+
+ var (
+ peers = k.connectedPeers
+ exclude = k.opt.ExcludeFunc(im.Reachability(false))
+ binCount = 0
+ shallowestUnsaturated = uint8(0)
+ depth uint8
+ )
+
+ // handle edge case separately
+ if peers.Length() <= k.opt.LowWaterMark {
+ k.depth = 0
+ return
+ }
+
+ _ = peers.EachBinRev(func(addr swarm.Address, bin uint8) (bool, bool, error) {
+ if exclude(addr) {
+ return false, false, nil
+ }
+ if bin == shallowestUnsaturated {
+ binCount++
+ return false, false, nil
+ }
+ if bin > shallowestUnsaturated && binCount < k.opt.SaturationPeers {
+ // this means we have less than quickSaturationPeers in the previous bin
+ // therefore we can return assuming that bin is the unsaturated one.
+ return true, false, nil
+ }
+ shallowestUnsaturated = bin
+ binCount = 1
+
+ return false, false, nil
+ })
+ depth = shallowestUnsaturated
+
+ shallowestEmpty, noEmptyBins := peers.ShallowestEmpty()
+ // if there are some empty bins and the shallowestEmpty is
+ // smaller than the shallowestUnsaturated then set shallowest
+ // unsaturated to the empty bin.
+ if !noEmptyBins && shallowestEmpty < depth {
+ depth = shallowestEmpty
+ }
+
+ var (
+ peersCtr = uint(0)
+ candidate = uint8(0)
+ )
+ _ = peers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ if exclude(addr) {
+ return false, false, nil
+ }
+ peersCtr++
+ if peersCtr >= uint(k.opt.LowWaterMark) {
+ candidate = po
+ return true, false, nil
+ }
+ return false, false, nil
+ })
+
+ if depth > candidate {
+ depth = candidate
+ }
+
+ k.depth = depth
+}
+
+// Announce a newly connected peer to our connected peers, but also
+// notify the peer about our already connected peers
+func (k *Kad) Announce(ctx context.Context, peer swarm.Address, fullnode bool) error {
+ var addrs []swarm.Address
+
+ depth := k.neighborhoodDepth()
+ isNeighbor := swarm.Proximity(peer.Bytes(), k.base.Bytes()) >= depth
+
+outer:
+ for bin := uint8(0); bin < swarm.MaxBins; bin++ {
+
+ var (
+ connectedPeers []swarm.Address
+ err error
+ )
+
+ if bin >= depth && isNeighbor {
+ connectedPeers = k.binPeers(bin, false) // broadcast all neighborhood peers
+ } else {
+ connectedPeers, err = randomSubset(k.binPeers(bin, true), k.opt.BroadcastBinSize)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, connectedPeer := range connectedPeers {
+ if connectedPeer.Equal(peer) {
+ continue
+ }
+
+ addrs = append(addrs, connectedPeer)
+
+ if !fullnode {
+ // dont gossip about lightnodes to others.
+ continue
+ }
+ // if kademlia is closing, dont enqueue anymore broadcast requests
+ select {
+ case <-k.bgBroadcastCtx.Done():
+ // we will not interfere with the announce operation by returning here
+ continue
+ case <-k.halt:
+ break outer
+ default:
+ }
+ go func(connectedPeer swarm.Address) {
+ // Create a new deadline ctx to prevent goroutine pile up
+ cCtx, cCancel := context.WithTimeout(k.bgBroadcastCtx, time.Minute)
+ defer cCancel()
+
+ if err := k.discovery.BroadcastPeers(cCtx, connectedPeer, peer); err != nil {
+ k.logger.Debug("peer gossip failed", "new_peer_address", peer, "connected_peer_address", connectedPeer, "error", err)
+ }
+ }(connectedPeer)
+ }
+ }
+
+ if len(addrs) == 0 {
+ return nil
+ }
+
+ select {
+ case <-k.halt:
+ return nil
+ default:
+ }
+
+ err := k.discovery.BroadcastPeers(ctx, peer, addrs...)
+ if err != nil {
+ k.logger.Error(err, "could not broadcast to peer", "peer_address", peer)
+ _ = k.p2p.Disconnect(peer, "failed broadcasting to peer")
+ }
+
+ return err
+}
+
+// AnnounceTo announces a selected peer to another.
+func (k *Kad) AnnounceTo(ctx context.Context, addressee, peer swarm.Address, fullnode bool) error {
+ if !fullnode {
+ return errAnnounceLightNode
+ }
+
+ return k.discovery.BroadcastPeers(ctx, addressee, peer)
+}
+
+// AddPeers adds peers to the knownPeers list.
+// This does not guarantee that a connection will immediately
+// be made to the peer.
+func (k *Kad) AddPeers(addrs ...swarm.Address) {
+ k.knownPeers.Add(addrs...)
+ k.notifyManageLoop()
+}
+
+func (k *Kad) binPeers(bin uint8, reachable bool) (peers []swarm.Address) {
+ _ = k.EachConnectedPeerRev(func(p swarm.Address, po uint8) (bool, bool, error) {
+ if po == bin {
+ peers = append(peers, p)
+ return false, false, nil
+ }
+
+ if po > bin {
+ return true, false, nil
+ }
+
+ return false, true, nil
+ }, topology.Select{Reachable: reachable})
+
+ return
+}
+
+func isStaticPeer(staticNodes []swarm.Address) func(overlay swarm.Address) bool {
+ return func(overlay swarm.Address) bool {
+ return swarm.ContainsAddress(staticNodes, overlay)
+ }
+}
+
+func (k *Kad) onConnected(ctx context.Context, addr swarm.Address) error {
+ if err := k.Announce(ctx, addr, true); err != nil {
+ return err
+ }
+
+ k.knownPeers.Add(addr)
+ k.connectedPeers.Add(addr)
+ k.waitNext.Remove(addr)
+ k.recalcDepth()
+ k.notifyManageLoop()
+ k.notifyPeerSig()
+ k.detector.Record()
+
+ return nil
+}
+
+func (k *Kad) notifyPeerSig() {
+ k.peerSigMtx.Lock()
+ defer k.peerSigMtx.Unlock()
+
+ for _, c := range k.peerSig {
+ // Every peerSig channel has a buffer capacity of 1,
+ // so every receiver will get the signal even if the
+ // select statement has the default case to avoid blocking.
+ select {
+ case c <- struct{}{}:
+ default:
+ }
+ }
+}
+
+func nClosePeerInSlice(peers []swarm.Address, addr swarm.Address, spf sanctionedPeerFunc, minPO uint8) (swarm.Address, bool) {
+ for _, peer := range peers {
+ if spf(peer) {
+ continue
+ }
+
+ if swarm.ExtendedProximity(peer.Bytes(), addr.Bytes()) >= minPO {
+ return peer, true
+ }
+ }
+
+ return swarm.ZeroAddress, false
+}
+
+func (k *Kad) IsReachable() bool {
+ return k.reachability == p2p.ReachabilityStatusPublic
+}
+
+// ClosestPeer returns the closest peer to a given address.
+func (k *Kad) ClosestPeer(addr swarm.Address, includeSelf bool, filter topology.Select, skipPeers ...swarm.Address) (swarm.Address, error) {
+ if k.connectedPeers.Length() == 0 {
+ return swarm.Address{}, topology.ErrNotFound
+ }
+
+ closest := swarm.ZeroAddress
+
+ if includeSelf && k.reachability == p2p.ReachabilityStatusPublic {
+ closest = k.base
+ }
+
+ prox := swarm.Proximity(k.base.Bytes(), addr.Bytes())
+
+ // iterate starting from bin 0 to the maximum bin
+ err := k.EachConnectedPeerRev(func(peer swarm.Address, bin uint8) (bool, bool, error) {
+ if swarm.ContainsAddress(skipPeers, peer) {
+ return false, false, nil
+ }
+
+ if bin > prox && !closest.IsZero() {
+ return true, false, nil
+ }
+
+ if closest.IsZero() {
+ closest = peer
+ return false, false, nil
+ }
+
+ closer, err := peer.Closer(addr, closest)
+ if closer {
+ closest = peer
+ }
+ if err != nil {
+ k.logger.Debug("closest peer", "peer", peer, "addr", addr, "error", err)
+ }
+ return false, false, nil
+ }, filter)
+ if err != nil {
+ return swarm.Address{}, err
+ }
+
+ if closest.IsZero() { // no peers
+ return swarm.Address{}, topology.ErrNotFound // only for light nodes
+ }
+
+ // check if self
+ if closest.Equal(k.base) {
+ return swarm.Address{}, topology.ErrWantSelf
+ }
+
+ return closest, nil
+}
+
+// EachConnectedPeer implements topology.PeerIterator interface.
+func (k *Kad) EachConnectedPeer(f topology.EachPeerFunc, filter topology.Select) error {
+ excludeFunc := k.opt.ExcludeFunc(excludeFromIterator(filter)...)
+ return k.connectedPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ if excludeFunc(addr) {
+ return false, false, nil
+ }
+ return f(addr, po)
+ })
+}
+
+// EachConnectedPeerRev implements topology.PeerIterator interface.
+func (k *Kad) EachConnectedPeerRev(f topology.EachPeerFunc, filter topology.Select) error {
+ excludeFunc := k.opt.ExcludeFunc(excludeFromIterator(filter)...)
+ return k.connectedPeers.EachBinRev(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ if excludeFunc(addr) {
+ return false, false, nil
+ }
+ return f(addr, po)
+ })
+}
+
+// Reachable sets the peer reachability status.
+func (k *Kad) Reachable(addr swarm.Address, status p2p.ReachabilityStatus) {
+ k.collector.Record(addr, im.PeerReachability(status))
+ k.logger.Debug("reachability of peer updated", "peer_address", addr, "reachability", status)
+ if status == p2p.ReachabilityStatusPublic {
+ k.recalcDepth()
+ k.notifyManageLoop()
+ }
+}
+
+// UpdateReachability updates node reachability status.
+// The status will be updated only once. Updates to status
+// p2p.ReachabilityStatusUnknown are ignored.
+func (k *Kad) UpdatePeerHealth(peer swarm.Address, health bool, dur time.Duration) {
+ k.collector.Record(peer, im.PeerHealth(health), im.PeerLatency(dur))
+}
+
+// SubscribeTopologyChange returns the channel that signals when the connected peers
+// set and depth changes. Returned function is safe to be called multiple times.
+func (k *Kad) SubscribeTopologyChange() (c <-chan struct{}, unsubscribe func()) {
+ channel := make(chan struct{}, 1)
+ var closeOnce sync.Once
+
+ k.peerSigMtx.Lock()
+ defer k.peerSigMtx.Unlock()
+
+ k.peerSig = append(k.peerSig, channel)
+
+ unsubscribe = func() {
+ k.peerSigMtx.Lock()
+ defer k.peerSigMtx.Unlock()
+
+ for i, c := range k.peerSig {
+ if c == channel {
+ k.peerSig = append(k.peerSig[:i], k.peerSig[i+1:]...)
+ break
+ }
+ }
+
+ closeOnce.Do(func() { close(channel) })
+ }
+
+ return channel, unsubscribe
+}
+
+func excludeFromIterator(filter topology.Select) []im.ExcludeOp {
+ ops := make([]im.ExcludeOp, 0, 3)
+ ops = append(ops, im.Bootnode())
+
+ if filter.Reachable {
+ ops = append(ops, im.Reachability(false))
+ }
+ if filter.Healthy {
+ ops = append(ops, im.Health(false))
+ }
+
+ return ops
+}
+
+// NeighborhoodDepth returns the current Kademlia depth.
+func (k *Kad) neighborhoodDepth() uint8 {
+ k.depthMu.RLock()
+ defer k.depthMu.RUnlock()
+
+ return k.storageRadius
+}
+
+func (k *Kad) Snapshot() *topology.KadParams {
+ var infos []topology.BinInfo
+ for i := int(swarm.MaxPO); i >= 0; i-- {
+ infos = append(infos, topology.BinInfo{})
+ }
+
+ ss := k.collector.Snapshot(time.Now())
+
+ _ = k.connectedPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ infos[po].BinConnected++
+ infos[po].ConnectedPeers = append(
+ infos[po].ConnectedPeers,
+ &topology.PeerInfo{
+ Address: addr,
+ Metrics: createMetricsSnapshotView(ss[addr.ByteString()]),
+ },
+ )
+ return false, false, nil
+ })
+
+ // output (k.knownPeers ¬ k.connectedPeers) here to not repeat the peers we already have in the connected peers list
+ _ = k.knownPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ infos[po].BinPopulation++
+
+ for _, v := range infos[po].ConnectedPeers {
+ // peer already connected, don't show in the known peers list
+ if v.Address.Equal(addr) {
+ return false, false, nil
+ }
+ }
+
+ infos[po].DisconnectedPeers = append(
+ infos[po].DisconnectedPeers,
+ &topology.PeerInfo{
+ Address: addr,
+ Metrics: createMetricsSnapshotView(ss[addr.ByteString()]),
+ },
+ )
+ return false, false, nil
+ })
+
+ return &topology.KadParams{
+ Base: k.base.String(),
+ Population: k.knownPeers.Length(),
+ Connected: k.connectedPeers.Length(),
+ Timestamp: time.Now(),
+ NNLowWatermark: k.opt.LowWaterMark,
+ Depth: k.neighborhoodDepth(),
+ Reachability: k.reachability.String(),
+ NetworkAvailability: k.p2p.NetworkStatus().String(),
+ Bins: topology.KadBins{
+ Bin0: infos[0],
+ Bin1: infos[1],
+ Bin2: infos[2],
+ Bin3: infos[3],
+ Bin4: infos[4],
+ Bin5: infos[5],
+ Bin6: infos[6],
+ Bin7: infos[7],
+ Bin8: infos[8],
+ Bin9: infos[9],
+ Bin10: infos[10],
+ Bin11: infos[11],
+ Bin12: infos[12],
+ Bin13: infos[13],
+ Bin14: infos[14],
+ Bin15: infos[15],
+ Bin16: infos[16],
+ Bin17: infos[17],
+ Bin18: infos[18],
+ Bin19: infos[19],
+ Bin20: infos[20],
+ Bin21: infos[21],
+ Bin22: infos[22],
+ Bin23: infos[23],
+ Bin24: infos[24],
+ Bin25: infos[25],
+ Bin26: infos[26],
+ Bin27: infos[27],
+ Bin28: infos[28],
+ Bin29: infos[29],
+ Bin30: infos[30],
+ Bin31: infos[31],
+ },
+ }
+}
+
+// String returns a string represenstation of Kademlia.
+func (k *Kad) String() string {
+ j := k.Snapshot()
+ b, err := json.MarshalIndent(j, "", " ")
+ if err != nil {
+ k.logger.Error(err, "could not marshal kademlia into json")
+ return ""
+ }
+ return string(b)
+}
+
+// Halt stops outgoing connections from happening.
+// This is needed while we shut down, so that further topology
+// changes do not happen while we shut down.
+func (k *Kad) Halt() {
+ close(k.halt)
+}
+
+// Close shuts down kademlia.
+func (k *Kad) Close() error {
+ k.logger.Info("kademlia shutting down")
+ close(k.quit)
+ cc := make(chan struct{})
+
+ k.bgBroadcastCancel()
+
+ go func() {
+ k.wg.Wait()
+ close(cc)
+ }()
+
+ eg := errgroup.Group{}
+
+ errTimeout := errors.New("timeout")
+
+ eg.Go(func() error {
+ select {
+ case <-cc:
+ case <-time.After(peerConnectionAttemptTimeout):
+ return fmt.Errorf("kademlia shutting down with running goroutines: %w", errTimeout)
+ }
+ return nil
+ })
+
+ eg.Go(func() error {
+ select {
+ case <-k.done:
+ case <-time.After(time.Second * 5):
+ return fmt.Errorf("kademlia manage loop did not shut down properly: %w", errTimeout)
+ }
+ return nil
+ })
+
+ err := eg.Wait()
+
+ k.logger.Info("kademlia persisting peer metrics")
+ start := time.Now()
+ if err := k.collector.Finalize(start, false); err != nil {
+ k.logger.Debug("unable to finalize open sessions", "error", err)
+ }
+ k.logger.Debug("metrics collector finalized", "elapsed", time.Since(start))
+
+ return err
+}
+
+func randomSubset(addrs []swarm.Address, count int) ([]swarm.Address, error) {
+ if count >= len(addrs) {
+ return addrs, nil
+ }
+
+ for i := 0; i < len(addrs); i++ {
+ b, err := random.Int(random.Reader, big.NewInt(int64(len(addrs))))
+ if err != nil {
+ return nil, err
+ }
+ j := int(b.Int64())
+ addrs[i], addrs[j] = addrs[j], addrs[i]
+ }
+
+ return addrs[:count], nil
+}
+
+func (k *Kad) randomPeer(bin uint8) (swarm.Address, error) {
+ peers := k.connectedPeers.BinPeers(bin)
+
+ for idx := 0; idx < len(peers); {
+ // do not consider protected peers
+ if k.staticPeer(peers[idx]) {
+ peers = append(peers[:idx], peers[idx+1:]...)
+ continue
+ }
+ idx++
+ }
+
+ if len(peers) == 0 {
+ return swarm.ZeroAddress, errEmptyBin
+ }
+
+ rndIndx, err := random.Int(random.Reader, big.NewInt(int64(len(peers))))
+ if err != nil {
+ return swarm.ZeroAddress, err
+ }
+
+ return peers[rndIndx.Int64()], nil
+}
+
+// createMetricsSnapshotView creates new topology.MetricSnapshotView from the
+// given metrics.Snapshot and rounds all the timestamps and durations to its
+// nearest second, except for the peer latency, which is given in milliseconds.
+func createMetricsSnapshotView(ss *im.Snapshot) *topology.MetricSnapshotView {
+ if ss == nil {
+ return nil
+ }
+ return &topology.MetricSnapshotView{
+ LastSeenTimestamp: time.Unix(0, ss.LastSeenTimestamp).Unix(),
+ SessionConnectionRetry: ss.SessionConnectionRetry,
+ ConnectionTotalDuration: ss.ConnectionTotalDuration.Truncate(time.Second).Seconds(),
+ SessionConnectionDuration: ss.SessionConnectionDuration.Truncate(time.Second).Seconds(),
+ SessionConnectionDirection: string(ss.SessionConnectionDirection),
+ LatencyEWMA: ss.LatencyEWMA.Milliseconds(),
+ Reachability: ss.Reachability.String(),
+ Healthy: ss.Healthy,
+ }
+}
diff --git a/pkg/topology/kademlia/metrics.go b/pkg/topology/kademlia/metrics.go
index 7fc9a53751e..919476c8c76 100644
--- a/pkg/topology/kademlia/metrics.go
+++ b/pkg/topology/kademlia/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/topology/lightnode/container.go b/pkg/topology/lightnode/container.go
index 0e4f6c3967c..8bd7b784a08 100644
--- a/pkg/topology/lightnode/container.go
+++ b/pkg/topology/lightnode/container.go
@@ -1,18 +1,14 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package lightnode
import (
"context"
- "crypto/rand"
- "math/big"
"sync"
"github.com/ethersphere/bee/v2/pkg/p2p"
"github.com/ethersphere/bee/v2/pkg/swarm"
- "github.com/ethersphere/bee/v2/pkg/topology"
"github.com/ethersphere/bee/v2/pkg/topology/pslice"
)
@@ -58,65 +54,3 @@ func (c *Container) Disconnected(peer p2p.Peer) {
c.metrics.CurrentlyConnectedPeers.Set(float64(c.connectedPeers.Length()))
c.metrics.CurrentlyDisconnectedPeers.Set(float64(c.disconnectedPeers.Length()))
}
-
-func (c *Container) Count() int {
- return c.connectedPeers.Length()
-}
-
-func (c *Container) RandomPeer(not swarm.Address) (swarm.Address, error) {
- c.peerMu.Lock()
- defer c.peerMu.Unlock()
- var (
- cnt = big.NewInt(int64(c.Count()))
- addr = swarm.ZeroAddress
- count = int64(0)
- )
-
-PICKPEER:
- i, e := rand.Int(rand.Reader, cnt)
- if e != nil {
- return swarm.ZeroAddress, e
- }
- i64 := i.Int64()
-
- count = 0
- _ = c.connectedPeers.EachBinRev(func(peer swarm.Address, _ uint8) (bool, bool, error) {
- if count == i64 {
- addr = peer
- return true, false, nil
- }
- count++
- return false, false, nil
- })
-
- if addr.Equal(not) {
- goto PICKPEER
- }
-
- return addr, nil
-}
-
-func (c *Container) EachPeer(pf topology.EachPeerFunc) error {
- return c.connectedPeers.EachBin(pf)
-}
-
-func (c *Container) PeerInfo() topology.BinInfo {
- return topology.BinInfo{
- BinPopulation: uint(c.connectedPeers.Length()),
- BinConnected: uint(c.connectedPeers.Length()),
- DisconnectedPeers: peersInfo(c.disconnectedPeers),
- ConnectedPeers: peersInfo(c.connectedPeers),
- }
-}
-
-func peersInfo(s *pslice.PSlice) []*topology.PeerInfo {
- if s.Length() == 0 {
- return nil
- }
- peers := make([]*topology.PeerInfo, 0, s.Length())
- _ = s.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
- peers = append(peers, &topology.PeerInfo{Address: addr})
- return false, false, nil
- })
- return peers
-}
diff --git a/pkg/topology/lightnode/container_js.go b/pkg/topology/lightnode/container_js.go
new file mode 100644
index 00000000000..33282e3992c
--- /dev/null
+++ b/pkg/topology/lightnode/container_js.go
@@ -0,0 +1,50 @@
+//go:build js
+// +build js
+
+package lightnode
+
+import (
+ "context"
+ "sync"
+
+ "github.com/ethersphere/bee/v2/pkg/p2p"
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology/pslice"
+)
+
+type Container struct {
+ base swarm.Address
+ peerMu sync.Mutex // peerMu guards connectedPeers and disconnectedPeers.
+ connectedPeers *pslice.PSlice
+ disconnectedPeers *pslice.PSlice
+}
+
+func NewContainer(base swarm.Address) *Container {
+ return &Container{
+ base: base,
+ connectedPeers: pslice.New(1, base),
+ disconnectedPeers: pslice.New(1, base),
+ }
+}
+
+func (c *Container) Connected(ctx context.Context, peer p2p.Peer) {
+ c.peerMu.Lock()
+ defer c.peerMu.Unlock()
+
+ addr := peer.Address
+ c.connectedPeers.Add(addr)
+ c.disconnectedPeers.Remove(addr)
+
+}
+
+func (c *Container) Disconnected(peer p2p.Peer) {
+ c.peerMu.Lock()
+ defer c.peerMu.Unlock()
+
+ addr := peer.Address
+ if found := c.connectedPeers.Exists(addr); found {
+ c.connectedPeers.Remove(addr)
+ c.disconnectedPeers.Add(addr)
+ }
+
+}
diff --git a/pkg/topology/lightnode/container_shared.go b/pkg/topology/lightnode/container_shared.go
new file mode 100644
index 00000000000..37b6b37cd7a
--- /dev/null
+++ b/pkg/topology/lightnode/container_shared.go
@@ -0,0 +1,76 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lightnode
+
+import (
+ "crypto/rand"
+ "math/big"
+
+ "github.com/ethersphere/bee/v2/pkg/swarm"
+ "github.com/ethersphere/bee/v2/pkg/topology"
+ "github.com/ethersphere/bee/v2/pkg/topology/pslice"
+)
+
+func (c *Container) Count() int {
+ return c.connectedPeers.Length()
+}
+
+func (c *Container) RandomPeer(not swarm.Address) (swarm.Address, error) {
+ c.peerMu.Lock()
+ defer c.peerMu.Unlock()
+ var (
+ cnt = big.NewInt(int64(c.Count()))
+ addr = swarm.ZeroAddress
+ count = int64(0)
+ )
+
+PICKPEER:
+ i, e := rand.Int(rand.Reader, cnt)
+ if e != nil {
+ return swarm.ZeroAddress, e
+ }
+ i64 := i.Int64()
+
+ count = 0
+ _ = c.connectedPeers.EachBinRev(func(peer swarm.Address, _ uint8) (bool, bool, error) {
+ if count == i64 {
+ addr = peer
+ return true, false, nil
+ }
+ count++
+ return false, false, nil
+ })
+
+ if addr.Equal(not) {
+ goto PICKPEER
+ }
+
+ return addr, nil
+}
+
+func (c *Container) EachPeer(pf topology.EachPeerFunc) error {
+ return c.connectedPeers.EachBin(pf)
+}
+
+func (c *Container) PeerInfo() topology.BinInfo {
+ return topology.BinInfo{
+ BinPopulation: uint(c.connectedPeers.Length()),
+ BinConnected: uint(c.connectedPeers.Length()),
+ DisconnectedPeers: peersInfo(c.disconnectedPeers),
+ ConnectedPeers: peersInfo(c.connectedPeers),
+ }
+}
+
+func peersInfo(s *pslice.PSlice) []*topology.PeerInfo {
+ if s.Length() == 0 {
+ return nil
+ }
+ peers := make([]*topology.PeerInfo, 0, s.Length())
+ _ = s.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) {
+ peers = append(peers, &topology.PeerInfo{Address: addr})
+ return false, false, nil
+ })
+ return peers
+}
diff --git a/pkg/topology/lightnode/metrics.go b/pkg/topology/lightnode/metrics.go
index cea1504f8a9..a0c4acf8393 100644
--- a/pkg/topology/lightnode/metrics.go
+++ b/pkg/topology/lightnode/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/transaction/event_test.go b/pkg/transaction/event_test.go
index 6760a805ccb..e286796d248 100644
--- a/pkg/transaction/event_test.go
+++ b/pkg/transaction/event_test.go
@@ -17,7 +17,7 @@ import (
)
var (
- erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_9)
+ erc20ABI = abiutil.MustParseABI(sw3abi.ERC20ABIv0_6_5)
)
type transferEvent struct {
diff --git a/pkg/transaction/wrapped/metrics.go b/pkg/transaction/wrapped/metrics.go
index eeb255c3d3b..18e5eee7177 100644
--- a/pkg/transaction/wrapped/metrics.go
+++ b/pkg/transaction/wrapped/metrics.go
@@ -1,3 +1,6 @@
+//go:build !js
+// +build !js
+
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/pkg/transaction/wrapped/wrapped.go b/pkg/transaction/wrapped/wrapped.go
index f810ee2ebab..27907ff76eb 100644
--- a/pkg/transaction/wrapped/wrapped.go
+++ b/pkg/transaction/wrapped/wrapped.go
@@ -1,6 +1,5 @@
-// Copyright 2021 The Swarm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+//go:build !js
+// +build !js
package wrapped
@@ -12,11 +11,8 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethersphere/bee/v2/pkg/transaction"
-)
-var (
- _ transaction.Backend = (*wrappedBackend)(nil)
+ "github.com/ethersphere/bee/v2/pkg/transaction"
)
type wrappedBackend struct {
@@ -201,7 +197,3 @@ func (b *wrappedBackend) ChainID(ctx context.Context) (*big.Int, error) {
}
return chainID, nil
}
-
-func (b *wrappedBackend) Close() {
- b.backend.Close()
-}
diff --git a/pkg/transaction/wrapped/wrapped_js.go b/pkg/transaction/wrapped/wrapped_js.go
new file mode 100644
index 00000000000..5440b452bdb
--- /dev/null
+++ b/pkg/transaction/wrapped/wrapped_js.go
@@ -0,0 +1,181 @@
+//go:build js
+// +build js
+
+package wrapped
+
+import (
+ "context"
+ "errors"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethersphere/bee/v2/pkg/transaction"
+)
+
+type wrappedBackend struct {
+ backend transaction.Backend
+}
+
+func NewBackend(backend transaction.Backend) transaction.Backend {
+ return &wrappedBackend{
+ backend: backend,
+ }
+}
+
+func (b *wrappedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
+
+ receipt, err := b.backend.TransactionReceipt(ctx, txHash)
+ if err != nil {
+ if !errors.Is(err, ethereum.NotFound) {
+
+ }
+ return nil, err
+ }
+ return receipt, nil
+}
+
+func (b *wrappedBackend) TransactionByHash(ctx context.Context, hash common.Hash) (*types.Transaction, bool, error) {
+
+ tx, isPending, err := b.backend.TransactionByHash(ctx, hash)
+ if err != nil {
+ if !errors.Is(err, ethereum.NotFound) {
+
+ }
+ return nil, false, err
+ }
+ return tx, isPending, err
+}
+
+func (b *wrappedBackend) BlockNumber(ctx context.Context) (uint64, error) {
+
+ blockNumber, err := b.backend.BlockNumber(ctx)
+ if err != nil {
+
+ return 0, err
+ }
+ return blockNumber, nil
+}
+
+func (b *wrappedBackend) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
+
+ header, err := b.backend.HeaderByNumber(ctx, number)
+ if err != nil {
+ if !errors.Is(err, ethereum.NotFound) {
+
+ }
+ return nil, err
+ }
+ return header, nil
+}
+
+func (b *wrappedBackend) BalanceAt(ctx context.Context, address common.Address, block *big.Int) (*big.Int, error) {
+
+ balance, err := b.backend.BalanceAt(ctx, address, block)
+ if err != nil {
+
+ return nil, err
+ }
+ return balance, nil
+}
+
+func (b *wrappedBackend) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) {
+
+ nonce, err := b.backend.NonceAt(ctx, account, blockNumber)
+ if err != nil {
+
+ return 0, err
+ }
+ return nonce, nil
+}
+
+func (b *wrappedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
+
+ code, err := b.backend.CodeAt(ctx, contract, blockNumber)
+ if err != nil {
+
+ return nil, err
+ }
+ return code, nil
+}
+
+func (b *wrappedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
+
+ result, err := b.backend.CallContract(ctx, call, blockNumber)
+ if err != nil {
+
+ return nil, err
+ }
+ return result, nil
+}
+
+func (b *wrappedBackend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
+
+ nonce, err := b.backend.PendingNonceAt(ctx, account)
+ if err != nil {
+
+ return 0, err
+ }
+ return nonce, nil
+}
+
+func (b *wrappedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
+
+ gasPrice, err := b.backend.SuggestGasPrice(ctx)
+ if err != nil {
+
+ return nil, err
+ }
+ return gasPrice, nil
+}
+
+func (b *wrappedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
+
+ gasTipCap, err := b.backend.SuggestGasTipCap(ctx)
+ if err != nil {
+
+ return nil, err
+ }
+ return gasTipCap, nil
+}
+
+func (b *wrappedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
+
+ gas, err = b.backend.EstimateGas(ctx, call)
+ if err != nil {
+
+ return 0, err
+ }
+ return gas, nil
+}
+
+func (b *wrappedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
+
+ err := b.backend.SendTransaction(ctx, tx)
+ if err != nil {
+
+ return err
+ }
+ return nil
+}
+
+func (b *wrappedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
+
+ logs, err := b.backend.FilterLogs(ctx, query)
+ if err != nil {
+
+ return nil, err
+ }
+ return logs, nil
+}
+
+func (b *wrappedBackend) ChainID(ctx context.Context) (*big.Int, error) {
+
+ chainID, err := b.backend.ChainID(ctx)
+ if err != nil {
+
+ return nil, err
+ }
+ return chainID, nil
+}
diff --git a/pkg/transaction/wrapped/wrapped_shared.go b/pkg/transaction/wrapped/wrapped_shared.go
new file mode 100644
index 00000000000..50ed383f16b
--- /dev/null
+++ b/pkg/transaction/wrapped/wrapped_shared.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wrapped
+
+import (
+ "github.com/ethersphere/bee/v2/pkg/transaction"
+)
+
+var (
+ _ transaction.Backend = (*wrappedBackend)(nil)
+)
+
+func (b *wrappedBackend) Close() {
+ b.backend.Close()
+}