From 1941c690bd9120d57cd4cc3d1e7c654d160622e8 Mon Sep 17 00:00:00 2001 From: Silas Davis Date: Wed, 19 Jun 2019 18:41:40 +0100 Subject: [PATCH] Remove lots of dead code, restructure storage. Also add staticcheck to makefile. Signed-off-by: Silas Davis --- Makefile | 5 + acm/account_test.go | 4 +- acm/acmstate/state_cache_test.go | 1 + acm/bytecode_test.go | 4 +- acm/validator/bucket_test.go | 4 +- acm/validator/ring_test.go | 2 +- binary/bytes_test.go | 1 + binary/word256_test.go | 1 + cmd/burrow/commands/configure.go | 6 +- cmd/burrow/commands/examine.go | 6 + consensus/abci/app.go | 5 +- consensus/tendermint/sign_info.go | 12 +- core/kernel.go | 2 +- crypto/address_test.go | 2 + crypto/public_key_test.go | 1 + deploy/compile/compilers.go | 20 +- deploy/compile/compilers_test.go | 34 +- deploy/def/client.go | 3 + deploy/jobs/job_manager.go | 9 + deploy/jobs/jobs_contracts.go | 4 +- dump/dump.go | 2 +- event/pubsub/pubsub_test.go | 4 +- event/query/builder.go | 4 +- event/query/reflect_tagged_test.go | 11 +- event/query/tags.go | 5 +- execution/contexts/name_context.go | 2 +- execution/contexts/proposal_context.go | 2 +- execution/evm/abi/abi.go | 24 +- execution/evm/abi/core.go | 2 +- execution/evm/abi/core_test.go | 20 +- execution/evm/event_sink.go | 1 - execution/evm/snative_test.go | 14 +- execution/evm/stack.go | 7 +- execution/evm/vm_test.go | 3 +- execution/execution_test.go | 3 +- execution/state/validators.go | 3 + execution/transactor.go | 3 + {storage => forensics}/cache_db.go | 13 +- {storage => forensics}/cache_db_test.go | 18 +- {storage => forensics}/channel_iterator.go | 2 +- forensics/channel_iterator_test.go | 52 +++ {storage => forensics}/kvcache.go | 30 +- {storage => forensics}/kvcache_test.go | 30 +- {storage => forensics}/multi_iterator.go | 22 +- {storage => forensics}/multi_iterator_test.go | 19 +- forensics/replay.go | 15 +- forensics/replay_test.go | 3 +- {storage => forensics}/unique_iterator.go | 12 +- .../unique_iterator_test.go | 2 +- {storage => forensics}/util_test.go | 31 +- genesis/spec/presets.go | 14 - go.mod | 1 + go.sum | 14 + integration/rpctest/helpers.go | 2 - keys/core.go | 20 +- keys/key_client.go | 6 +- keys/key_store.go | 4 +- keys/server.go | 2 +- logging/lifecycle/lifecycle.go | 6 +- logging/logconfig/sinks.go | 2 +- logging/logger.go | 3 +- logging/loggers/burrow_format_logger.go | 9 +- logging/loggers/stream_logger.go | 4 - logging/loggers/stream_logger_test.go | 2 +- logging/structure/structure.go | 13 - logging/structure/structure_test.go | 10 +- permission/perm_flag.go | 20 +- rpc/lib/rpc_test.go | 14 - rpc/lib/server/handlers.go | 20 +- rpc/metrics/exporter.go | 3 +- rpc/rpcinfo/info_server.go | 2 +- rpc/rpcinfo/methods.go | 4 +- rpc/rpcquery/query_server.go | 5 +- rpc/service.go | 4 +- storage/channel_iterator_test.go | 16 - storage/commit_id.go | 44 --- storage/forest.go | 347 ++++++++++++++++++ ...{mutable_forest_test.go => forest_test.go} | 30 +- storage/immutable_forest.go | 129 ------- storage/immutable_tree.go | 28 -- storage/key_format.go | 34 ++ storage/key_format_store.go | 38 -- storage/key_format_store_test.go | 77 ---- storage/key_format_test.go | 73 +++- storage/kvcascade.go | 39 -- storage/kvstore.go | 28 +- storage/mutable_forest.go | 165 --------- storage/prefix_db_test.go | 124 +++---- storage/rwtree.go | 3 + storage/rwtree_test.go | 48 +-- storage/{mutable_tree.go => tree.go} | 43 ++- sync/ring_mutex.go | 4 +- txs/payload/bond_tx.go | 2 +- txs/tx.go | 3 + util/fs.go | 6 +- util/slice/slice.go | 21 +- vent/service/decoder.go | 4 +- vent/service/rowbuilder.go | 2 +- vent/sqldb/adapters/postgres_adapter.go | 3 + vent/sqlsol/projection.go | 2 +- vent/sqlsol/projection_test.go | 2 +- 101 files changed, 948 insertions(+), 1036 deletions(-) rename {storage => forensics}/cache_db.go (84%) rename {storage => forensics}/cache_db_test.go (86%) rename {storage => forensics}/channel_iterator.go (99%) create mode 100644 forensics/channel_iterator_test.go rename {storage => forensics}/kvcache.go (86%) rename {storage => forensics}/kvcache_test.go (56%) rename {storage => forensics}/multi_iterator.go (79%) rename {storage => forensics}/multi_iterator_test.go (78%) rename {storage => forensics}/unique_iterator.go (80%) rename {storage => forensics}/unique_iterator_test.go (94%) rename {storage => forensics}/util_test.go (58%) delete mode 100644 storage/channel_iterator_test.go delete mode 100644 storage/commit_id.go create mode 100644 storage/forest.go rename storage/{mutable_forest_test.go => forest_test.go} (92%) delete mode 100644 storage/immutable_forest.go delete mode 100644 storage/immutable_tree.go delete mode 100644 storage/key_format_store.go delete mode 100644 storage/key_format_store_test.go delete mode 100644 storage/kvcascade.go delete mode 100644 storage/mutable_forest.go rename storage/{mutable_tree.go => tree.go} (76%) diff --git a/Makefile b/Makefile index 0bb947ec0..f5b60e0ba 100644 --- a/Makefile +++ b/Makefile @@ -247,3 +247,8 @@ push_ci_image: build_ci_image .PHONY: ready_for_pull_request ready_for_pull_request: docs fix + +.PHONY: staticcheck +staticcheck: + go get honnef.co/go/tools/cmd/staticcheck + staticcheck ./... diff --git a/acm/account_test.go b/acm/account_test.go index 852f31fff..1173bdae6 100644 --- a/acm/account_test.go +++ b/acm/account_test.go @@ -66,7 +66,7 @@ func TestDecodeConcrete(t *testing.T) { require.NoError(t, err) assert.Equal(t, concreteAcc, concreteAccOut) - concreteAccOut, err = Decode([]byte("flungepliffery munknut tolopops")) + _, err = Decode([]byte("flungepliffery munknut tolopops")) assert.Error(t, err) } @@ -116,7 +116,7 @@ func TestAccountTags(t *testing.T) { assert.Equal(t, "send | call | createContract | createAccount | bond | name | proposal | input | batch | hasBase | hasRole", str) str, _ = tagged.Get("Roles") assert.Equal(t, "frogs;dogs", str) - str, _ = tagged.Get("Code") + tagged.Get("Code") qry, err := query.New("Code CONTAINS '0116002556001600360006101000A815'") require.NoError(t, err) assert.True(t, qry.Matches(tagged)) diff --git a/acm/acmstate/state_cache_test.go b/acm/acmstate/state_cache_test.go index 8baf046dc..5c87f44c5 100644 --- a/acm/acmstate/state_cache_test.go +++ b/acm/acmstate/state_cache_test.go @@ -231,6 +231,7 @@ func TestStateCache_Sync(t *testing.T) { newAcc := acm.NewAccountFromSecret("newAcc") // Create account err := cache.UpdateAccount(newAcc) + require.NoError(t, err) // Set balance for account balance := uint64(24) diff --git a/acm/bytecode_test.go b/acm/bytecode_test.go index 17ce3a071..7409ffe06 100644 --- a/acm/bytecode_test.go +++ b/acm/bytecode_test.go @@ -24,6 +24,7 @@ func TestBytecode_MarshalJSON(t *testing.T) { bytecodeOut := new(Bytecode) err = json.Unmarshal(bs, bytecodeOut) + require.NoError(t, err) assert.Equal(t, bytecode, *bytecodeOut) } @@ -41,6 +42,7 @@ func TestBytecode_MarshalText(t *testing.T) { bytecodeOut := new(Bytecode) err = bytecodeOut.UnmarshalText(bs) + require.NoError(t, err) assert.Equal(t, bytecode, *bytecodeOut) } @@ -80,6 +82,6 @@ func TestBytecode_Tokens(t *testing.T) { require.NoError(t, err) assert.Equal(t, []string{}, tokens) - tokens, err = Bytecode(bc.MustSplice(asm.PUSH3, 1, 2)).Tokens() + _, err = Bytecode(bc.MustSplice(asm.PUSH3, 1, 2)).Tokens() assert.Error(t, err, "not enough bytes to push") } diff --git a/acm/validator/bucket_test.go b/acm/validator/bucket_test.go index fcd935818..e1ab8b79a 100644 --- a/acm/validator/bucket_test.go +++ b/acm/validator/bucket_test.go @@ -31,10 +31,10 @@ func TestBucket_AlterPower(t *testing.T) { require.NoError(t, err) require.Equal(t, big3.Int64(), flow.Int64()) - flow, err = bucket.AlterPower(pubA, new(big.Int).Add(maxTotalVotingPower, big1)) + _, err = bucket.AlterPower(pubA, new(big.Int).Add(maxTotalVotingPower, big1)) require.Error(t, err, "should fail as we would breach total power") - flow, err = bucket.AlterPower(pubB, big1) + _, err = bucket.AlterPower(pubB, big1) require.Error(t, err, "should fail as we would breach total power") // Drop A and raise B - should now succeed diff --git a/acm/validator/ring_test.go b/acm/validator/ring_test.go index 456e39870..12567d9f8 100644 --- a/acm/validator/ring_test.go +++ b/acm/validator/ring_test.go @@ -29,7 +29,7 @@ func TestValidatorsRing_AlterPower(t *testing.T) { vs = Copy(vsBase) vw = NewRing(vs, 5) powA, powB, powC = 7000, 23, 310 - powerChange, totalFlow, err = alterPowers(t, vw, powA, powB, powC) + _, _, err = alterPowers(t, vw, powA, powB, powC) require.Error(t, err) powA, powB, powC = 7000, 23, 309 diff --git a/binary/bytes_test.go b/binary/bytes_test.go index 7e0aeb536..81f9d22e8 100644 --- a/binary/bytes_test.go +++ b/binary/bytes_test.go @@ -15,5 +15,6 @@ func TestHexBytes_MarshalText(t *testing.T) { assert.Equal(t, "\"0102030405\"", string(out)) bs2 := new(HexBytes) err = json.Unmarshal(out, bs2) + require.NoError(t, err) assert.Equal(t, bs, *bs2) } diff --git a/binary/word256_test.go b/binary/word256_test.go index bc0f038cf..ee0460bfb 100644 --- a/binary/word256_test.go +++ b/binary/word256_test.go @@ -52,6 +52,7 @@ func TestWord256_MarshalText(t *testing.T) { assert.Equal(t, "\"0102030405000000000000000000000000000000000000000000000000000000\"", string(out)) bs2 := new(Word256) err = json.Unmarshal(out, bs2) + require.NoError(t, err) assert.Equal(t, w, *bs2) } diff --git a/cmd/burrow/commands/configure.go b/cmd/burrow/commands/configure.go index b981f64e5..1ff30b9a9 100644 --- a/cmd/burrow/commands/configure.go +++ b/cmd/burrow/commands/configure.go @@ -190,11 +190,11 @@ func Configure(output Output) func(cmd *cli.Cmd) { output.Fatalf("Could not create remote key client: %v", err) } conf.GenesisDoc, err = genesisSpec.GenesisDoc(keyClient, *generateNodeKeys || *pool) + if err != nil { + output.Fatalf("could not realise GenesisSpec: %v", err) + } } - if err != nil { - output.Fatalf("could not realise GenesisSpec: %v", err) - } } if *chainNameOpt != "" { diff --git a/cmd/burrow/commands/examine.go b/cmd/burrow/commands/examine.go index 3824089ec..f2351eb47 100644 --- a/cmd/burrow/commands/examine.go +++ b/cmd/burrow/commands/examine.go @@ -37,6 +37,9 @@ func Examine(output Output) func(cmd *cli.Cmd) { cmd.Action = func() { start, end, err := parseRange(*rangeArg) + if err != nil { + output.Fatalf("could not parse range '%s': %v", *rangeArg, err) + } err = explorer.Blocks(start, end, func(block *bcm.Block) error { @@ -61,6 +64,9 @@ func Examine(output Output) func(cmd *cli.Cmd) { cmd.Action = func() { start, end, err := parseRange(*rangeArg) + if err != nil { + output.Fatalf("could not parse range '%s': %v", *rangeArg, err) + } err = explorer.Blocks(start, end, func(block *bcm.Block) error { diff --git a/consensus/abci/app.go b/consensus/abci/app.go index 61f5a5d02..6af202d87 100644 --- a/consensus/abci/app.go +++ b/consensus/abci/app.go @@ -34,8 +34,6 @@ type App struct { // State blockchain *bcm.Blockchain validators Validators - checkTx func(txBytes []byte) types.ResponseCheckTx - deliverTx func(txBytes []byte) types.ResponseCheckTx mempoolLocker sync.Locker authorizedPeersProvider PeersFilterProvider // We need to cache these from BeginBlock for when we need actually need it in Commit @@ -125,6 +123,9 @@ func (app *App) InitChain(chain types.RequestInitChain) (respInitChain types.Res } for _, v := range chain.Validators { pk, err := crypto.PublicKeyFromABCIPubKey(v.GetPubKey()) + if err != nil { + panic(err) + } err = app.checkValidatorMatches(currentSet, types.Validator{Address: pk.GetAddress().Bytes(), Power: v.Power}) if err != nil { panic(err) diff --git a/consensus/tendermint/sign_info.go b/consensus/tendermint/sign_info.go index 7f7c199df..105293bc8 100644 --- a/consensus/tendermint/sign_info.go +++ b/consensus/tendermint/sign_info.go @@ -76,17 +76,17 @@ func (lsi *LastSignedInfo) SignProposal(sign tmCryptoSigner, chainID string, pro // returns error if HRS regression or no SignBytes. returns true if HRS is unchanged func (lsi *LastSignedInfo) checkHRS(height int64, round int, step int8) (bool, error) { if lsi.Height > height { - return false, errors.New("Height regression") + return false, errors.New("height regression") } if lsi.Height == height { if lsi.Round > round { - return false, errors.New("Round regression") + return false, errors.New("round regression") } if lsi.Round == round { if lsi.Step > step { - return false, errors.New("Step regression") + return false, errors.New("step regression") } else if lsi.Step == step { if lsi.SignBytes != nil { if lsi.Signature == nil { @@ -94,7 +94,7 @@ func (lsi *LastSignedInfo) checkHRS(height int64, round int, step int8) (bool, e } return true, nil } - return false, errors.New("No Signature found") + return false, errors.New("no Signature found") } } } @@ -125,7 +125,7 @@ func (lsi *LastSignedInfo) signVote(sign tmCryptoSigner, chainID string, vote *t vote.Timestamp = timestamp vote.Signature = lsi.Signature } else { - err = fmt.Errorf("Conflicting data") + err = fmt.Errorf("conflicting data") } return err } @@ -161,7 +161,7 @@ func (lsi *LastSignedInfo) signProposal(sign tmCryptoSigner, chainID string, pro proposal.Timestamp = timestamp proposal.Signature = lsi.Signature } else { - err = fmt.Errorf("Conflicting data") + err = fmt.Errorf("conflicting data") } return err } diff --git a/core/kernel.go b/core/kernel.go index 6c72adfe4..b751678a3 100644 --- a/core/kernel.go +++ b/core/kernel.go @@ -311,7 +311,7 @@ func (kern *Kernel) supervise() { shutdownCh := make(chan os.Signal, 1) reloadCh := make(chan os.Signal, 1) syncCh := make(chan os.Signal, 1) - signal.Notify(shutdownCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL) + signal.Notify(shutdownCh, syscall.SIGINT, syscall.SIGTERM) signal.Notify(reloadCh, syscall.SIGHUP) signal.Notify(syncCh, syscall.SIGUSR1) for { diff --git a/crypto/address_test.go b/crypto/address_test.go index 55e8a36c2..05c065c6f 100644 --- a/crypto/address_test.go +++ b/crypto/address_test.go @@ -57,6 +57,7 @@ func TestAddress_MarshalJSON(t *testing.T) { addrOut := new(Address) err = json.Unmarshal(bs, addrOut) + require.NoError(t, err) assert.Equal(t, addr, *addrOut) } @@ -74,6 +75,7 @@ func TestAddress_MarshalText(t *testing.T) { addrOut := new(Address) err = addrOut.UnmarshalText(bs) + require.NoError(t, err) assert.Equal(t, addr, *addrOut) } diff --git a/crypto/public_key_test.go b/crypto/public_key_test.go index 6dea35482..b504570a0 100644 --- a/crypto/public_key_test.go +++ b/crypto/public_key_test.go @@ -21,6 +21,7 @@ func TestPublicKeySerialisation(t *testing.T) { require.NoError(t, err) var pubOut PublicKey err = proto.Unmarshal(bs, &pubOut) + require.NoError(t, err) assert.Equal(t, pub, pubOut) bs, err = json.Marshal(pub) diff --git a/deploy/compile/compilers.go b/deploy/compile/compilers.go index 2323c4728..6c6d544e3 100644 --- a/deploy/compile/compilers.go +++ b/deploy/compile/compilers.go @@ -8,7 +8,6 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "strings" "github.com/hyperledger/burrow/crypto" @@ -156,10 +155,8 @@ func Compile(file string, optimize bool, workDir string, libraries map[string]st input.Settings.Libraries = make(map[string]map[string]string) input.Settings.Libraries[""] = make(map[string]string) - if libraries != nil { - for l, a := range libraries { - input.Settings.Libraries[""][l] = "0x" + a - } + for l, a := range libraries { + input.Settings.Libraries[""][l] = "0x" + a } command, err := json.Marshal(input) @@ -253,16 +250,3 @@ func PrintResponse(resp Response, cli bool, logger *logging.Logger) { } } } - -func extractObjectNames(script []byte) ([]string, error) { - regExpression, err := regexp.Compile("(contract|library) (.+?) (is)?(.+?)?({)") - if err != nil { - return nil, err - } - objectNamesList := regExpression.FindAllSubmatch(script, -1) - var objects []string - for _, objectNames := range objectNamesList { - objects = append(objects, string(objectNames[2])) - } - return objects, nil -} diff --git a/deploy/compile/compilers_test.go b/deploy/compile/compilers_test.go index d170b204d..8a3b107fe 100644 --- a/deploy/compile/compilers_test.go +++ b/deploy/compile/compilers_test.go @@ -2,12 +2,15 @@ package compile import ( "encoding/json" + "fmt" "os" "os/exec" "path/filepath" "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/hyperledger/burrow/logging" "github.com/stretchr/testify/assert" ) @@ -24,10 +27,6 @@ type SolcResponse struct { Version string `mapstructure:"version" json:"version"` // json encoded } -func BlankSolcItem() *SolcItem { - return &SolcItem{} -} - func BlankSolcResponse() *SolcResponse { return &SolcResponse{ Version: "", @@ -46,6 +45,7 @@ func TestLocalMulti(t *testing.T) { warning, responseJSON := extractWarningJSON(strings.TrimSpace(string(actualOutput))) err = json.Unmarshal([]byte(responseJSON), expectedSolcResponse) + require.NoError(t, err) respItemArray := make([]ResponseItem, 0) @@ -90,6 +90,7 @@ func TestLocalSingle(t *testing.T) { warning, responseJSON := extractWarningJSON(strings.TrimSpace(string(actualOutput))) err = json.Unmarshal([]byte(responseJSON), expectedSolcResponse) + require.NoError(t, err) respItemArray := make([]ResponseItem, 0) @@ -122,20 +123,18 @@ func TestLocalSingle(t *testing.T) { } func TestFaultyContract(t *testing.T) { - var expectedSolcResponse Response - - actualOutput, err := exec.Command("solc", "--combined-json", "bin,abi", "faultyContract.sol").CombinedOutput() - err = json.Unmarshal(actualOutput, expectedSolcResponse) - t.Log(expectedSolcResponse.Error) - resp, err := Compile("faultyContract.sol", false, "", make(map[string]string), logging.NewNoopLogger()) - t.Log(resp.Error) + const faultyContractFile = "tests/compilers_fixtures/faultyContract.sol" + actualOutput, err := exec.Command("solc", "--combined-json", "bin,abi", faultyContractFile).CombinedOutput() + require.EqualError(t, err, "exit status 1") + resp, err := Compile(faultyContractFile, false, "", make(map[string]string), logging.NewNoopLogger()) + require.NoError(t, err) if err != nil { - if expectedSolcResponse.Error != resp.Error { - t.Errorf("Expected %v got %v", expectedSolcResponse.Error, resp.Error) + if string(actualOutput) != resp.Error { + t.Errorf("Expected %v got %v", string(actualOutput), resp.Error) } } output := strings.TrimSpace(string(actualOutput)) - err = json.Unmarshal([]byte(output), expectedSolcResponse) + fmt.Println(output) } func testContractPath() string { @@ -143,13 +142,6 @@ func testContractPath() string { return filepath.Join(baseDir, "..", "..", "tests", "compilers_fixtures") } -// The solidity 0.4.21 compiler appends something called auxdata to the end of the bin file (this is visible with -// solc --asm). This is a swarm hash of the metadata, and it's always at the end. This includes the path of the -// solidity source file, so it will differ. -func trimAuxdata(bin string) string { - return bin[:len(bin)-86] -} - func extractWarningJSON(output string) (warning string, json string) { jsonBeginsCertainly := strings.Index(output, `{"contracts":`) diff --git a/deploy/def/client.go b/deploy/def/client.go index 77b966591..8c496ccdd 100644 --- a/deploy/def/client.go +++ b/deploy/def/client.go @@ -598,6 +598,9 @@ func (c *Client) TxInput(inputString, amountString, sequenceString string, allow var amount uint64 if amountString != "" { amount, err = c.ParseUint64(amountString) + if err != nil { + return nil, err + } } var sequence uint64 sequence, err = c.getSequence(sequenceString, inputAddress, c.MempoolSigning && allowMempoolSigning, logger) diff --git a/deploy/jobs/job_manager.go b/deploy/jobs/job_manager.go index c0efe8dd3..73efee268 100644 --- a/deploy/jobs/job_manager.go +++ b/deploy/jobs/job_manager.go @@ -160,6 +160,9 @@ func doJobs(playbook *def.Playbook, args *def.DeployArgs, client *def.Client, lo return err } job.Result, err = SendJob(job.Send, tx, playbook.Account, client, logger) + if err != nil { + return err + } case *def.RegisterName: announce(job.Name, "RegisterName", logger) txs, err := FormulateRegisterNameJob(job.RegisterName, args, playbook, client, logger) @@ -167,6 +170,9 @@ func doJobs(playbook *def.Playbook, args *def.DeployArgs, client *def.Client, lo return err } job.Result, err = RegisterNameJob(job.RegisterName, args, playbook, txs, client, logger) + if err != nil { + return err + } case *def.Permission: announce(job.Name, "Permission", logger) tx, err := FormulatePermissionJob(job.Permission, playbook.Account, client, logger) @@ -174,6 +180,9 @@ func doJobs(playbook *def.Playbook, args *def.DeployArgs, client *def.Client, lo return err } job.Result, err = PermissionJob(job.Permission, playbook.Account, tx, client, logger) + if err != nil { + return err + } // Contracts jobs case *def.Deploy: diff --git a/deploy/jobs/jobs_contracts.go b/deploy/jobs/jobs_contracts.go index 0a6ca39d9..5b8c06947 100644 --- a/deploy/jobs/jobs_contracts.go +++ b/deploy/jobs/jobs_contracts.go @@ -223,7 +223,7 @@ func FormulateDeployJob(deploy *def.Deploy, do *def.DeployArgs, deployScript *de return nil, nil, err } deployedCount++ - if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) { + if strings.EqualFold(response.Objectname, strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) { baseObj = tx baseContract = &resp.Objects[i] } else { @@ -310,7 +310,7 @@ func matchInstanceName(objectName, deployInstance string) bool { objectNameParts := strings.Split(objectName, ":") deployInstanceParts := strings.Split(deployInstance, "/") - return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstanceParts[len(deployInstanceParts)-1]) + return strings.EqualFold(objectNameParts[len(objectNameParts)-1], deployInstanceParts[len(deployInstanceParts)-1]) } func findContractFile(contract, binPath string, deployPath string) (string, error) { diff --git a/dump/dump.go b/dump/dump.go index 31f96cd61..7f5261169 100644 --- a/dump/dump.go +++ b/dump/dump.go @@ -53,7 +53,7 @@ func NewDumper(state *state.State, blockchain bcm.BlockchainInfo, logger *loggin func (ds *Dumper) Transmit(stream Sender, startHeight, endHeight uint64, options Option) error { height := endHeight - if height <= 0 { + if height == 0 { height = ds.blockchain.LastBlockHeight() } st, err := ds.state.LoadHeight(height) diff --git a/event/pubsub/pubsub_test.go b/event/pubsub/pubsub_test.go index 76525243d..94da8c07d 100644 --- a/event/pubsub/pubsub_test.go +++ b/event/pubsub/pubsub_test.go @@ -111,11 +111,11 @@ func TestResubscribe(t *testing.T) { defer s.Stop() ctx := context.Background() - ch, err := s.Subscribe(ctx, clientID, query.Empty{}, 1) + _, err := s.Subscribe(ctx, clientID, query.Empty{}, 1) require.NoError(t, err) err = s.Unsubscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) - ch, err = s.Subscribe(ctx, clientID, query.Empty{}, 1) + ch, err := s.Subscribe(ctx, clientID, query.Empty{}, 1) require.NoError(t, err) err = s.Publish(ctx, "Cable") diff --git a/event/query/builder.go b/event/query/builder.go index 249ae1b7d..4e8d3dd3c 100644 --- a/event/query/builder.go +++ b/event/query/builder.go @@ -203,6 +203,8 @@ func StringFromValue(value interface{}) string { switch v := value.(type) { case string: return v + case time.Time: + return timeString + " " + v.Format(time.RFC3339) case encoding.TextMarshaler: bs, _ := v.MarshalText() return string(bs) @@ -229,8 +231,6 @@ func StringFromValue(value interface{}) string { return strconv.FormatFloat(float64(v), 'f', -1, 32) case float64: return strconv.FormatFloat(float64(v), 'f', -1, 64) - case time.Time: - return timeString + " " + v.Format(time.RFC3339) default: if rv.Kind() == reflect.Slice { values := make([]string, rv.Len()) diff --git a/event/query/reflect_tagged_test.go b/event/query/reflect_tagged_test.go index 7aec3fd99..3e2392ec8 100644 --- a/event/query/reflect_tagged_test.go +++ b/event/query/reflect_tagged_test.go @@ -10,12 +10,11 @@ import ( ) type testTaggable struct { - Foo string - Bar string - Baz binary.HexBytes - Address crypto.Address - Indices []int - unexported int + Foo string + Bar string + Baz binary.HexBytes + Address crypto.Address + Indices []int } func TestReflectTagged_Keys(t *testing.T) { diff --git a/event/query/tags.go b/event/query/tags.go index 2cb5d13cd..f1ace8e45 100644 --- a/event/query/tags.go +++ b/event/query/tags.go @@ -52,9 +52,8 @@ func (ts TagMap) Keys() []string { } type CombinedTags struct { - keys []string - ks map[string][]Tagged - concat bool + keys []string + ks map[string][]Tagged } func NewCombinedTags() *CombinedTags { diff --git a/execution/contexts/name_context.go b/execution/contexts/name_context.go index aeb8896a5..398c9e875 100644 --- a/execution/contexts/name_context.go +++ b/execution/contexts/name_context.go @@ -138,7 +138,7 @@ func (ctx *NameContext) Execute(txe *exec.TxExecution, p payload.Payload) error } } else { if expiresIn < names.MinNameRegistrationPeriod { - return fmt.Errorf("Names must be registered for at least %d blocks", names.MinNameRegistrationPeriod) + return fmt.Errorf("names must be registered for at least %d blocks", names.MinNameRegistrationPeriod) } // entry does not exist, so create it entry = &names.Entry{ diff --git a/execution/contexts/proposal_context.go b/execution/contexts/proposal_context.go index 16f3f0b50..50e30bfd8 100644 --- a/execution/contexts/proposal_context.go +++ b/execution/contexts/proposal_context.go @@ -259,7 +259,7 @@ func validateProposalStrings(proposal *payload.Proposal) error { } func validateStringPrintable(data string) bool { - for _, r := range []rune(data) { + for _, r := range data { if !unicode.IsPrint(r) { return false } diff --git a/execution/evm/abi/abi.go b/execution/evm/abi/abi.go index 9f0a1b24c..c12013312 100644 --- a/execution/evm/abi/abi.go +++ b/execution/evm/abi/abi.go @@ -451,10 +451,6 @@ func (e EVMInt) unpack(data []byte, offset int, v interface{}) (int, error) { return ElementSize, nil } -func (e EVMInt) fixedSize() int { - return ElementSize -} - func (e EVMInt) Dynamic() bool { return false } @@ -597,10 +593,6 @@ func (e EVMBytes) unpack(data []byte, offset int, v interface{}) (int, error) { return ElementSize, nil } -func (e EVMBytes) fixedSize() int { - return ElementSize -} - func (e EVMBytes) Dynamic() bool { return e.M == 0 } @@ -689,10 +681,6 @@ func (e EVMFixed) unpack(data []byte, offset int, v interface{}) (int, error) { return 0, fmt.Errorf("unpacking of %s not implemented, patches welcome", e.GetSignature()) } -func (e EVMFixed) fixedSize() int { - return ElementSize -} - func (e EVMFixed) Dynamic() bool { return false } @@ -763,7 +751,7 @@ func readArgSpec(argsJ []ArgumentJSON) ([]Argument, error) { args[i].Indexed = a.Indexed baseType := a.Type - isArray := regexp.MustCompile("(.*)\\[([0-9]+)\\]") + isArray := regexp.MustCompile(`(.*)\[([0-9]+)\]`) m := isArray.FindStringSubmatch(a.Type) if m != nil { args[i].IsArray = true @@ -818,7 +806,7 @@ func readArgSpec(argsJ []ArgumentJSON) ([]Argument, error) { if M < 8 || M > 256 || (M%8) != 0 { return nil, fmt.Errorf("%s is not valid type", baseType) } - if N <= 0 || N > 80 { + if N == 0 || N > 80 { return nil, fmt.Errorf("%s is not valid type", baseType) } if m[1] == "fixed" { @@ -1366,7 +1354,7 @@ func unpack(argSpec []Argument, data []byte, getArg func(int) interface{}) error return err } offset += l - l, err = a.EVM.unpack(data, int(o), e) + _, err = a.EVM.unpack(data, int(o), e) if err != nil { return err } @@ -1396,7 +1384,7 @@ func unpack(argSpec []Argument, data []byte, getArg func(int) interface{}) error // We have been asked to return the value as a string; make intermediate // array of strings; we will concatenate after intermediate := make([]interface{}, a.ArrayLength) - for i, _ := range intermediate { + for i := range intermediate { intermediate[i] = new(string) } array = &intermediate @@ -1437,11 +1425,11 @@ func unpack(argSpec []Argument, data []byte, getArg func(int) interface{}) error if _, ok := arg.(*string); ok { // We have been asked to return the value as a string; make intermediate // array of strings; we will concatenate after - for i, _ := range intermediate { + for i := range intermediate { intermediate[i] = new(string) } } else { - for i, _ := range intermediate { + for i := range intermediate { intermediate[i] = a.EVM.getGoType() } } diff --git a/execution/evm/abi/core.go b/execution/evm/abi/core.go index 0ebd31f68..eb5dd9e32 100644 --- a/execution/evm/abi/core.go +++ b/execution/evm/abi/core.go @@ -150,7 +150,7 @@ func readAbi(root, contract string, logger *logging.Logger) (string, error) { p = path.Join(root, stripHex(contract)+".bin") if _, err = os.Stat(p); err != nil { logger.TraceMsg("abifile not found", "tried", p) - return "", fmt.Errorf("Abi doesn't exist for =>\t%s", p) + return "", fmt.Errorf("abi doesn't exist for =>\t%s", p) } } logger.TraceMsg("Found ABI file", "path", p) diff --git a/execution/evm/abi/core_test.go b/execution/evm/abi/core_test.go index 307ebf2ae..b88f13e7c 100644 --- a/execution/evm/abi/core_test.go +++ b/execution/evm/abi/core_test.go @@ -335,7 +335,7 @@ func TestUnpacker(t *testing.T) { EVMInt{M: 256}, pad([]byte{42}, 32, true), new(int64), - func() *int64 { var v int64; v = 42; return &v }(), + func() *int64 { var v int64 = 42; return &v }(), }, { EVMInt{M: 256}, @@ -365,7 +365,7 @@ func TestUnpacker(t *testing.T) { EVMInt{M: 256}, pad([]byte{0xfd, 0xca, 0, 0, 0, 0, 0, 0}, 32, true), new(uint64), - func() *uint64 { var v uint64; v = 0xfdca000000000000; return &v }(), + func() *uint64 { var v uint64 = 0xfdca000000000000; return &v }(), }, { EVMInt{M: 256}, @@ -378,7 +378,7 @@ func TestUnpacker(t *testing.T) { EVMInt{M: 256}, hexToBytes(t, "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD6"), new(int64), - func() *int64 { var v int64; v = -42; return &v }(), + func() *int64 { var v int64 = -42; return &v }(), }, { EVMInt{M: 256}, @@ -402,7 +402,7 @@ func TestUnpacker(t *testing.T) { EVMInt{M: 256}, hexToBytes(t, "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF35010124111"), new(int64), - func() *int64 { var v int64; v = -0xcafefedbeef; return &v }(), + func() *int64 { var v int64 = -0xcafefedbeef; return &v }(), }, { EVMInt{M: 256}, @@ -418,37 +418,37 @@ func TestUnpacker(t *testing.T) { EVMUint{M: 256}, pad([]byte{42}, 32, true), new(int64), - func() *int64 { var v int64; v = 42; return &v }(), + func() *int64 { var v int64 = 42; return &v }(), }, { EVMUint{M: 256}, pad([]byte{42}, 32, true), new(int32), - func() *int32 { var v int32; v = 42; return &v }(), + func() *int32 { var v int32 = 42; return &v }(), }, { EVMUint{M: 256}, pad([]byte{0x7f, 0xff}, 32, true), new(int16), - func() *int16 { var v int16; v = 0x7fff; return &v }(), + func() *int16 { var v int16 = 0x7fff; return &v }(), }, { EVMUint{M: 256}, pad([]byte{0xfd, 0xca}, 32, true), new(uint16), - func() *uint16 { var v uint16; v = 0xfdca; return &v }(), + func() *uint16 { var v uint16 = 0xfdca; return &v }(), }, { EVMUint{M: 256}, pad([]byte{0xfd, 0xca}, 32, true), new(uint32), - func() *uint32 { var v uint32; v = 0xfdca; return &v }(), + func() *uint32 { var v uint32 = 0xfdca; return &v }(), }, { EVMUint{M: 256}, pad([]byte{0xfd, 0xca, 0, 0, 0, 0, 0, 0}, 32, true), new(uint64), - func() *uint64 { var v uint64; v = 0xfdca000000000000; return &v }(), + func() *uint64 { var v uint64 = 0xfdca000000000000; return &v }(), }, { EVMUint{M: 256}, diff --git a/execution/evm/event_sink.go b/execution/evm/event_sink.go index 677d637da..04642db24 100644 --- a/execution/evm/event_sink.go +++ b/execution/evm/event_sink.go @@ -27,7 +27,6 @@ func (es *noopEventSink) Log(log *exec.LogEvent) error { type logFreeEventSink struct { EventSink - error error } func NewLogFreeEventSink(eventSink EventSink) *logFreeEventSink { diff --git a/execution/evm/snative_test.go b/execution/evm/snative_test.go index c6a26c250..a42ffa231 100644 --- a/execution/evm/snative_test.go +++ b/execution/evm/snative_test.go @@ -86,7 +86,7 @@ func TestSNativeContractDescription_Dispatch(t *testing.T) { gas := uint64(1000) // Should fail since we have no permissions - retValue, err := contract.Dispatch(cache, caller.Address, bc.MustSplice(funcID[:], grantee.Address, + _, err = contract.Dispatch(cache, caller.Address, bc.MustSplice(funcID[:], grantee.Address, permFlagToWord256(permission.CreateAccount)), &gas, logger) if !assert.Error(t, err, "Should fail due to lack of permissions") { return @@ -96,7 +96,7 @@ func TestSNativeContractDescription_Dispatch(t *testing.T) { // Grant all permissions and dispatch should success cache.SetPermission(caller.Address, permission.AddRole, true) require.NoError(t, cache.Error()) - retValue, err = contract.Dispatch(cache, caller.Address, bc.MustSplice(funcID[:], + retValue, err := contract.Dispatch(cache, caller.Address, bc.MustSplice(funcID[:], grantee.Address.Word256(), permFlagToWord256(permission.CreateAccount)), &gas, logger) assert.NoError(t, err) assert.Equal(t, retValue, LeftPadBytes([]byte{1}, 32)) @@ -137,16 +137,6 @@ func permFlagToWord256(permFlag permission.PermFlag) Word256 { return Uint64ToWord256(uint64(permFlag)) } -func allAccountPermissions() permission.AccountPermissions { - return permission.AccountPermissions{ - Base: permission.BasePermissions{ - Perms: permission.AllPermFlags, - SetBit: permission.AllPermFlags, - }, - Roles: []string{}, - } -} - // turns the solidity compiler function summary into a map to drive signature // test func idToSignatureMap() map[string]string { diff --git a/execution/evm/stack.go b/execution/evm/stack.go index ace29114e..cbaa5d03a 100644 --- a/execution/evm/stack.go +++ b/execution/evm/stack.go @@ -16,14 +16,11 @@ package evm import ( "fmt" - - "github.com/hyperledger/burrow/crypto" - - "math/big" - "math" + "math/big" . "github.com/hyperledger/burrow/binary" + "github.com/hyperledger/burrow/crypto" "github.com/hyperledger/burrow/execution/errors" ) diff --git a/execution/evm/vm_test.go b/execution/evm/vm_test.go index ad23d42c5..d3f3ac897 100644 --- a/execution/evm/vm_test.go +++ b/execution/evm/vm_test.go @@ -961,7 +961,7 @@ func TestMemoryBounds(t *testing.T) { code = MustSplice(code, storeAtEnd(), MLOAD) } require.NoError(t, cache.Error()) - output, err = ourVm.Call(cache, NewNoopEventSink(), caller, callee, MustSplice(code, storeAtEnd(), returnAfterStore()), + _, err = ourVm.Call(cache, NewNoopEventSink(), caller, callee, MustSplice(code, storeAtEnd(), returnAfterStore()), nil, 0, &gas) assert.Error(t, err, "Should hit memory out of bounds") } @@ -1381,6 +1381,7 @@ func TestDataStackOverflow(t *testing.T) { // Input is the function hash of `get()` input, err := hex.DecodeString("6d4ce63c") + require.NoError(t, err) _, err = ourVm.Call(cache, eventSink, account1, account2, contractCode, input, 0, &gas) assertErrorCode(t, errors.ErrorCodeDataStackOverflow, err, "Should be stack overflow") diff --git a/execution/execution_test.go b/execution/execution_test.go index 1e506ae58..6462db242 100644 --- a/execution/execution_test.go +++ b/execution/execution_test.go @@ -608,6 +608,7 @@ func TestCreateAccountPermission(t *testing.T) { } tx.AddOutput(users[7].GetAddress(), 10) err = exe.signExecuteCommit(tx, users[:2]...) + require.NoError(t, err) // Two inputs, both with send, both with create, two outputs (one known, one unknown) should pass tx = payload.NewSendTx() @@ -1574,8 +1575,6 @@ func addressPtr(account *acm.Account) *crypto.Address { return &accountAddresss } -var ExceptionTimeOut = errors.NewException(errors.ErrorCodeGeneric, "timed out waiting for event") - type testExecutor struct { *executor *bcm.Blockchain diff --git a/execution/state/validators.go b/execution/state/validators.go index 96e0b5f66..d43dc1385 100644 --- a/execution/state/validators.go +++ b/execution/state/validators.go @@ -35,6 +35,9 @@ func LoadValidatorRing(version int64, ringSize int, ring := validator.NewRing(nil, ringSize) // Load the IAVL state rs.Forest, err = getImmutable(startVersion) + if err != nil { + return nil, err + } // Write the validator state at startVersion from IAVL tree into the ring's current bucket delta err = validator.Write(ring, rs) if err != nil { diff --git a/execution/transactor.go b/execution/transactor.go index b2da0368b..944588447 100644 --- a/execution/transactor.go +++ b/execution/transactor.go @@ -196,6 +196,9 @@ func (trans *Transactor) SignTx(txEnv *txs.Envelope) (*txs.Envelope, error) { signers := make([]acm.AddressableSigner, len(inputs)) for i, input := range inputs { signers[i], err = trans.MempoolAccounts.SigningAccount(input.Address) + if err != nil { + return nil, err + } } err = txEnv.Sign(signers...) if err != nil { diff --git a/storage/cache_db.go b/forensics/cache_db.go similarity index 84% rename from storage/cache_db.go rename to forensics/cache_db.go index af5607c8e..293f0ed2e 100644 --- a/storage/cache_db.go +++ b/forensics/cache_db.go @@ -1,15 +1,16 @@ -package storage +package forensics import ( + "github.com/hyperledger/burrow/storage" dbm "github.com/tendermint/tendermint/libs/db" ) type CacheDB struct { cache *KVCache - backend KVIterableReader + backend storage.KVIterableReader } -func NewCacheDB(backend KVIterableReader) *CacheDB { +func NewCacheDB(backend storage.KVIterableReader) *CacheDB { return &CacheDB{ cache: NewKVCache(), backend: backend, @@ -33,13 +34,13 @@ func (cdb *CacheDB) Has(key []byte) bool { return !deleted && (value != nil || cdb.backend.Has(key)) } -func (cdb *CacheDB) Iterator(low, high []byte) KVIterator { +func (cdb *CacheDB) Iterator(low, high []byte) storage.KVIterator { // Keys from cache will sort first because of order in MultiIterator and Uniq will take the first KVs so KVs // appearing in cache will override values from backend. return Uniq(NewMultiIterator(false, cdb.cache.Iterator(low, high), cdb.backend.Iterator(low, high))) } -func (cdb *CacheDB) ReverseIterator(low, high []byte) KVIterator { +func (cdb *CacheDB) ReverseIterator(low, high []byte) storage.KVIterator { return Uniq(NewMultiIterator(true, cdb.cache.ReverseIterator(low, high), cdb.backend.ReverseIterator(low, high))) } @@ -69,7 +70,7 @@ func (cdb *CacheDB) NewBatch() dbm.Batch { } } -func (cdb *CacheDB) Commit(writer KVWriter) { +func (cdb *CacheDB) Commit(writer storage.KVWriter) { cdb.cache.WriteTo(writer) cdb.cache.Reset() } diff --git a/storage/cache_db_test.go b/forensics/cache_db_test.go similarity index 86% rename from storage/cache_db_test.go rename to forensics/cache_db_test.go index 67ce7a5c3..08b93ef3b 100644 --- a/storage/cache_db_test.go +++ b/forensics/cache_db_test.go @@ -1,4 +1,4 @@ -package storage +package forensics import ( "fmt" @@ -11,10 +11,10 @@ import ( func TestBatchCommit(t *testing.T) { db := dbm.NewMemDB() cdb := NewCacheDB(db) - foo := bz("foo") - bam := bz("bam") - bosh := bz("bosh") - boom := bz("boom") + foo := []byte("foo") + bam := []byte("bam") + bosh := []byte("bosh") + boom := []byte("boom") db.Set(foo, bam) assert.Equal(t, bam, cdb.Get(foo), "underlying writes should be seen") cdb.Set(foo, bosh) @@ -39,10 +39,10 @@ func TestBatchCommit(t *testing.T) { func TestCacheDB_Iterator(t *testing.T) { db := dbm.NewMemDB() cdb := NewCacheDB(db) - foo := bz("foo") - bam := bz("bam") - bosh := bz("bosh") - boom := bz("boom") + foo := []byte("foo") + bam := []byte("bam") + bosh := []byte("bosh") + boom := []byte("boom") db.Set(append(foo, foo...), foo) db.Set(append(foo, bam...), bam) diff --git a/storage/channel_iterator.go b/forensics/channel_iterator.go similarity index 99% rename from storage/channel_iterator.go rename to forensics/channel_iterator.go index a59eb737e..897e86b4f 100644 --- a/storage/channel_iterator.go +++ b/forensics/channel_iterator.go @@ -1,4 +1,4 @@ -package storage +package forensics import ( "bytes" diff --git a/forensics/channel_iterator_test.go b/forensics/channel_iterator_test.go new file mode 100644 index 000000000..998d2bcb9 --- /dev/null +++ b/forensics/channel_iterator_test.go @@ -0,0 +1,52 @@ +package forensics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tendermint/libs/db" +) + +func TestNewChannelIterator(t *testing.T) { + ch := make(chan KVPair) + go sendKVPair(ch, kvPairs("a", "hello", "b", "channel", "c", "this is nice")) + ci := NewChannelIterator(ch, []byte("a"), []byte("c")) + checkItem(t, ci, []byte("a"), []byte("hello")) + checkNext(t, ci, true) + checkItem(t, ci, []byte("b"), []byte("channel")) + checkNext(t, ci, true) + checkItem(t, ci, []byte("c"), []byte("this is nice")) + checkNext(t, ci, false) + checkInvalid(t, ci) +} + +func checkInvalid(t *testing.T, itr dbm.Iterator) { + checkValid(t, itr, false) + checkKeyPanics(t, itr) + checkValuePanics(t, itr) + checkNextPanics(t, itr) +} + +func checkValid(t *testing.T, itr dbm.Iterator, expected bool) { + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkNext(t *testing.T, itr dbm.Iterator, expected bool) { + itr.Next() + valid := itr.Valid() + require.Equal(t, expected, valid) +} + +func checkNextPanics(t *testing.T, itr dbm.Iterator) { + assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") +} +func checkKeyPanics(t *testing.T, itr dbm.Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") +} + +func checkValuePanics(t *testing.T, itr dbm.Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") +} diff --git a/storage/kvcache.go b/forensics/kvcache.go similarity index 86% rename from storage/kvcache.go rename to forensics/kvcache.go index c17004c65..16cafb532 100644 --- a/storage/kvcache.go +++ b/forensics/kvcache.go @@ -1,9 +1,11 @@ -package storage +package forensics import ( "bytes" "sort" "sync" + + "github.com/hyperledger/burrow/storage" ) type KVCache struct { @@ -91,22 +93,22 @@ func (kvc *KVCache) Delete(key []byte) { kvc.cache[skey] = vi } -func (kvc *KVCache) Iterator(low, high []byte) KVIterator { +func (kvc *KVCache) Iterator(low, high []byte) storage.KVIterator { kvc.RLock() defer kvc.RUnlock() - low, high = NormaliseDomain(low, high) + low, high = storage.NormaliseDomain(low, high) return kvc.newIterator(low, high, false) } -func (kvc *KVCache) ReverseIterator(low, high []byte) KVIterator { +func (kvc *KVCache) ReverseIterator(low, high []byte) storage.KVIterator { kvc.RLock() defer kvc.RUnlock() - low, high = NormaliseDomain(low, high) + low, high = storage.NormaliseDomain(low, high) return kvc.newIterator(low, high, true) } // Writes contents of cache to backend without flushing the cache -func (kvc *KVCache) WriteTo(writer KVWriter) { +func (kvc *KVCache) WriteTo(writer storage.KVWriter) { kvc.Lock() defer kvc.Unlock() for k, vi := range kvc.cache { @@ -133,7 +135,7 @@ func (kvc *KVCache) sortedKeysInDomain(low, high []byte) [][]byte { startIndex := len(kvc.keys) for i, key := range sortedKeys { // !(key < start) => key >= start then include (inclusive start) - if CompareKeys(key, low) != -1 { + if storage.CompareKeys(key, low) != -1 { startIndex = i break } @@ -142,7 +144,7 @@ func (kvc *KVCache) sortedKeysInDomain(low, high []byte) [][]byte { sortedKeys = sortedKeys[startIndex:] for i, key := range sortedKeys { // !(key < end) => key >= end then exclude (exclusive end) - if CompareKeys(key, high) != -1 { + if storage.CompareKeys(key, high) != -1 { sortedKeys = sortedKeys[:i] break } @@ -209,15 +211,3 @@ func (kvi *KVCacheIterator) sliceIndex() int { } return kvi.keyIndex } - -func insertKey(sortedKeys [][]byte, key []byte) [][]byte { - i := sort.Search(len(sortedKeys), func(i int) bool { - // Smallest sortedKey such that key - return bytes.Compare(sortedKeys[i], key) > -1 - }) - // ensure space - sortedKeys = append(sortedKeys, nil) - copy(sortedKeys[i+1:], sortedKeys[i:]) - sortedKeys[i] = key - return sortedKeys -} diff --git a/storage/kvcache_test.go b/forensics/kvcache_test.go similarity index 56% rename from storage/kvcache_test.go rename to forensics/kvcache_test.go index 7d1e90910..6c33d80a3 100644 --- a/storage/kvcache_test.go +++ b/forensics/kvcache_test.go @@ -1,10 +1,12 @@ -package storage +package forensics import ( bin "encoding/binary" "math/rand" "testing" + "github.com/hyperledger/burrow/storage" + "github.com/stretchr/testify/assert" ) @@ -29,20 +31,20 @@ func TestKVCache_Iterator(t *testing.T) { } func TestKVCache_Iterator2(t *testing.T) { - assert.Equal(t, []string{"b"}, testIterate(bz("b"), bz("c"), false, "a", "b", "c", "d")) - assert.Equal(t, []string{"b", "c"}, testIterate(bz("b"), bz("cc"), false, "a", "b", "c", "d")) - assert.Equal(t, []string{"a", "b", "c", "d"}, testIterate(bz(""), nil, false, "a", "b", "c", "d")) - assert.Equal(t, []string{"d", "c", "b", "a"}, testIterate(bz(""), nil, true, "a", "b", "c", "d")) + assert.Equal(t, []string{"b"}, testIterate([]byte("b"), []byte("c"), false, "a", "b", "c", "d")) + assert.Equal(t, []string{"b", "c"}, testIterate([]byte("b"), []byte("cc"), false, "a", "b", "c", "d")) + assert.Equal(t, []string{"a", "b", "c", "d"}, testIterate([]byte(""), nil, false, "a", "b", "c", "d")) + assert.Equal(t, []string{"d", "c", "b", "a"}, testIterate([]byte(""), nil, true, "a", "b", "c", "d")) assert.Equal(t, []string{"a", "b", "c", "d"}, testIterate(nil, nil, false, "a", "b", "c", "d")) - assert.Equal(t, []string{}, testIterate(bz(""), bz(""), false, "a", "b", "c", "d")) - assert.Equal(t, []string{}, testIterate(bz("ab"), bz("ab"), false, "a", "b", "c", "d")) - assert.Equal(t, []string{"a"}, testIterate(bz("0"), bz("ab"), true, "a", "b", "c", "d")) - assert.Equal(t, []string{"c", "b", "a"}, testIterate(bz("a"), bz("c1"), true, "a", "b", "c", "d")) - assert.Equal(t, []string{"b", "a"}, testIterate(bz("a"), bz("c"), true, "a", "b", "c", "d")) - assert.Equal(t, []string{"b", "a"}, testIterate(bz("a"), bz("c"), true, "a", "b", "c", "d")) - assert.Equal(t, []string{}, testIterate(bz("c"), bz("e"), true, "a", "b")) - assert.Equal(t, []string{}, testIterate(bz("c"), bz("e"), true, "z", "f")) + assert.Equal(t, []string{}, testIterate([]byte(""), []byte(""), false, "a", "b", "c", "d")) + assert.Equal(t, []string{}, testIterate([]byte("ab"), []byte("ab"), false, "a", "b", "c", "d")) + assert.Equal(t, []string{"a"}, testIterate([]byte("0"), []byte("ab"), true, "a", "b", "c", "d")) + assert.Equal(t, []string{"c", "b", "a"}, testIterate([]byte("a"), []byte("c1"), true, "a", "b", "c", "d")) + assert.Equal(t, []string{"b", "a"}, testIterate([]byte("a"), []byte("c"), true, "a", "b", "c", "d")) + assert.Equal(t, []string{"b", "a"}, testIterate([]byte("a"), []byte("c"), true, "a", "b", "c", "d")) + assert.Equal(t, []string{}, testIterate([]byte("c"), []byte("e"), true, "a", "b")) + assert.Equal(t, []string{}, testIterate([]byte("c"), []byte("e"), true, "z", "f")) } func BenchmarkKVCache_Iterator_1E6_Inserts(b *testing.B) { @@ -81,7 +83,7 @@ func testIterate(low, high []byte, reverse bool, keys ...string) []string { bs := []byte(k) kvc.Set(bs, bs) } - var it KVIterator + var it storage.KVIterator if reverse { it = kvc.ReverseIterator(low, high) } else { diff --git a/storage/multi_iterator.go b/forensics/multi_iterator.go similarity index 79% rename from storage/multi_iterator.go rename to forensics/multi_iterator.go index 8152e5cca..a74cec68d 100644 --- a/storage/multi_iterator.go +++ b/forensics/multi_iterator.go @@ -1,21 +1,23 @@ -package storage +package forensics import ( "bytes" "container/heap" + + "github.com/hyperledger/burrow/storage" ) type MultiIterator struct { start []byte end []byte // Acts as priority queue based on sort order of current key in each iterator - iterators []KVIterator - iteratorOrder map[KVIterator]int + iterators []storage.KVIterator + iteratorOrder map[storage.KVIterator]int lessComp int } // MultiIterator iterates in order over a series o -func NewMultiIterator(reverse bool, iterators ...KVIterator) *MultiIterator { +func NewMultiIterator(reverse bool, iterators ...storage.KVIterator) *MultiIterator { // reuse backing array lessComp := -1 if reverse { @@ -23,7 +25,7 @@ func NewMultiIterator(reverse bool, iterators ...KVIterator) *MultiIterator { } mi := &MultiIterator{ iterators: iterators, - iteratorOrder: make(map[KVIterator]int), + iteratorOrder: make(map[storage.KVIterator]int), lessComp: lessComp, } mi.init() @@ -37,10 +39,10 @@ func (mi *MultiIterator) init() { if it.Valid() { validIterators = append(validIterators, it) start, end := it.Domain() - if i == 0 || CompareKeys(start, mi.start) == mi.lessComp { + if i == 0 || storage.CompareKeys(start, mi.start) == mi.lessComp { mi.start = start } - if i == 0 || CompareKeys(mi.end, end) == mi.lessComp { + if i == 0 || storage.CompareKeys(mi.end, end) == mi.lessComp { mi.end = end } } else { @@ -68,7 +70,7 @@ func (mi *MultiIterator) Swap(i, j int) { } func (mi *MultiIterator) Push(x interface{}) { - mi.iterators = append(mi.iterators, x.(KVIterator)) + mi.iterators = append(mi.iterators, x.(storage.KVIterator)) } func (mi *MultiIterator) Pop() interface{} { @@ -88,7 +90,7 @@ func (mi *MultiIterator) Valid() bool { func (mi *MultiIterator) Next() { // Always advance the lowest iterator - the same one we serve the KV pair from - it := heap.Pop(mi).(KVIterator) + it := heap.Pop(mi).(storage.KVIterator) it.Next() if it.Valid() { heap.Push(mi, it) @@ -103,7 +105,7 @@ func (mi *MultiIterator) Value() []byte { return mi.Peek().Value() } -func (mi *MultiIterator) Peek() KVIterator { +func (mi *MultiIterator) Peek() storage.KVIterator { return mi.iterators[0] } diff --git a/storage/multi_iterator_test.go b/forensics/multi_iterator_test.go similarity index 78% rename from storage/multi_iterator_test.go rename to forensics/multi_iterator_test.go index f054044a0..57f1f6a19 100644 --- a/storage/multi_iterator_test.go +++ b/forensics/multi_iterator_test.go @@ -1,6 +1,7 @@ -package storage +package forensics import ( + "sort" "testing" "github.com/stretchr/testify/assert" @@ -47,3 +48,19 @@ func TestDuplicateKeys(t *testing.T) { assert.Equal(t, []string{"dogs", "frogs", "bar", "zfoo"}, as, "duplicate keys should appear in iterator order") } + +func iteratorOver(kvp []KVPair, reverse ...bool) *ChannelIterator { + var sortable sort.Interface = KVPairs(kvp) + if len(reverse) > 0 && reverse[0] { + sortable = sort.Reverse(sortable) + } + sort.Stable(sortable) + ch := make(chan KVPair) + var start, end []byte + if len(kvp) > 0 { + start, end = kvp[0].Key, kvp[len(kvp)-1].Key + } + go sendKVPair(ch, kvp) + ci := NewChannelIterator(ch, start, end) + return ci +} diff --git a/forensics/replay.go b/forensics/replay.go index be5e0b35d..b5de6bd83 100644 --- a/forensics/replay.go +++ b/forensics/replay.go @@ -16,7 +16,6 @@ import ( "github.com/hyperledger/burrow/execution/state" "github.com/hyperledger/burrow/genesis" "github.com/hyperledger/burrow/logging" - "github.com/hyperledger/burrow/storage" "github.com/hyperledger/burrow/txs" "github.com/pkg/errors" dbm "github.com/tendermint/tendermint/libs/db" @@ -48,14 +47,14 @@ func NewReplay(dbDir string, genesisDoc *genesis.GenesisDoc, logger *logging.Log // burrowDB := core.NewBurrowDB(dbDir) // Avoid writing through to underlying DB db := dbm.NewDB(core.BurrowDBName, dbm.GoLevelDBBackend, dbDir) - cacheDB := storage.NewCacheDB(db) + cacheDB := NewCacheDB(db) return &Replay{ - Explorer: bcm.NewBlockExplorer(dbm.LevelDBBackend, path.Join(dbDir, "data")), - db: db, - cacheDB: cacheDB, - blockchain: bcm.NewBlockchain(cacheDB, genesisDoc), - genesisDoc: genesisDoc, - logger: logger, + Explorer: bcm.NewBlockExplorer(dbm.LevelDBBackend, path.Join(dbDir, "data")), + db: db, + cacheDB: cacheDB, + blockchain: bcm.NewBlockchain(cacheDB, genesisDoc), + genesisDoc: genesisDoc, + logger: logger, } } diff --git a/forensics/replay_test.go b/forensics/replay_test.go index 9bf6341e4..c12d9aba4 100644 --- a/forensics/replay_test.go +++ b/forensics/replay_test.go @@ -35,8 +35,9 @@ import ( //const criticalBlock uint64 = 6 // const goodDir = "/home/silas/burrows/production-t9/dealspace/002" -const badDir = "/home/silas/burrows/production-t9/dealspace/001" +const badDir = "/home/silas/burrows/production-t9/dealspace/001" const criticalBlock uint64 = 38 + //const criticalBlock uint64 = 52 func TestState(t *testing.T) { diff --git a/storage/unique_iterator.go b/forensics/unique_iterator.go similarity index 80% rename from storage/unique_iterator.go rename to forensics/unique_iterator.go index b1449914f..281ebdbc9 100644 --- a/storage/unique_iterator.go +++ b/forensics/unique_iterator.go @@ -1,13 +1,17 @@ -package storage +package forensics -import "bytes" +import ( + "bytes" + + "github.com/hyperledger/burrow/storage" +) type uniqueIterator struct { - source KVIterator + source storage.KVIterator prevKey []byte } -func Uniq(source KVIterator) *uniqueIterator { +func Uniq(source storage.KVIterator) *uniqueIterator { return &uniqueIterator{ source: source, } diff --git a/storage/unique_iterator_test.go b/forensics/unique_iterator_test.go similarity index 94% rename from storage/unique_iterator_test.go rename to forensics/unique_iterator_test.go index bf82bccab..3db8300fa 100644 --- a/storage/unique_iterator_test.go +++ b/forensics/unique_iterator_test.go @@ -1,4 +1,4 @@ -package storage +package forensics import ( "testing" diff --git a/storage/util_test.go b/forensics/util_test.go similarity index 58% rename from storage/util_test.go rename to forensics/util_test.go index fba4ad3d1..ecb56b37a 100644 --- a/storage/util_test.go +++ b/forensics/util_test.go @@ -1,11 +1,12 @@ -package storage +package forensics import ( - "sort" "strings" "testing" + "github.com/hyperledger/burrow/storage" "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tendermint/libs/db" ) func sendKVPair(ch chan<- KVPair, kvs []KVPair) { @@ -15,7 +16,7 @@ func sendKVPair(ch chan<- KVPair, kvs []KVPair) { close(ch) } -func collectIterator(it KVIterator) KVPairs { +func collectIterator(it storage.KVIterator) KVPairs { var kvp []KVPair for it.Valid() { kvp = append(kvp, KVPair{it.Key(), it.Value()}) @@ -33,7 +34,7 @@ func kvPairs(kvs ...string) KVPairs { return kvp } -func assertIteratorSorted(t *testing.T, it KVIterator, reverse bool) { +func assertIteratorSorted(t *testing.T, it storage.KVIterator, reverse bool) { prev := "" for it.Valid() { strKey := string(it.Key()) @@ -52,22 +53,8 @@ func assertIteratorSorted(t *testing.T, it KVIterator, reverse bool) { } } -func iteratorOver(kvp []KVPair, reverse ...bool) *ChannelIterator { - var sortable sort.Interface = KVPairs(kvp) - if len(reverse) > 0 && reverse[0] { - sortable = sort.Reverse(sortable) - } - sort.Stable(sortable) - ch := make(chan KVPair) - var start, end []byte - if len(kvp) > 0 { - start, end = kvp[0].Key, kvp[len(kvp)-1].Key - } - go sendKVPair(ch, kvp) - ci := NewChannelIterator(ch, start, end) - return ci -} - -func bz(s string) []byte { - return []byte(s) +func checkItem(t *testing.T, itr dbm.Iterator, key []byte, value []byte) { + k, v := itr.Key(), itr.Value() + assert.Exactly(t, key, k) + assert.Exactly(t, value, v) } diff --git a/genesis/spec/presets.go b/genesis/spec/presets.go index 39d9b0ca5..16774d030 100644 --- a/genesis/spec/presets.go +++ b/genesis/spec/presets.go @@ -167,17 +167,3 @@ func mergeStrings(as, bs []string) []string { sort.Strings(strs) return strs } - -func addUint64Pointers(a, b *uint64) *uint64 { - if a == nil && b == nil { - return nil - } - amt := uint64(0) - if a != nil { - amt += *a - } - if b != nil { - amt += *b - } - return &amt -} diff --git a/go.mod b/go.mod index 320a33d04..19d8da494 100644 --- a/go.mod +++ b/go.mod @@ -62,4 +62,5 @@ require ( google.golang.org/grpc v1.20.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/yaml.v2 v2.2.2 + honnef.co/go/tools v0.0.0-20190614002413-cb51c254f01b // indirect ) diff --git a/go.sum b/go.sum index e5b763c27..9d0bf53f0 100644 --- a/go.sum +++ b/go.sum @@ -71,6 +71,7 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pO github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -100,6 +101,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= @@ -136,6 +140,7 @@ github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nL github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -181,9 +186,11 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -193,6 +200,7 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -208,6 +216,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190530171427-2b03ca6e44eb h1:mnQlcVx8Qq8L70HV0DxUGuiuAtiEHTwF1gYJE/EL9nU= +golang.org/x/tools v0.0.0-20190530171427-2b03ca6e44eb/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= @@ -218,6 +228,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -226,3 +238,5 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190614002413-cb51c254f01b h1:SWAO5HXhUnouVG7YwFyCqej4vr94EiYYu4O7YRXsxBU= +honnef.co/go/tools v0.0.0-20190614002413-cb51c254f01b/go.mod h1:JlmFZigtG9vBVR3QGIQ9g/Usz4BzH+Xm6Z8iHQWRYUw= diff --git a/integration/rpctest/helpers.go b/integration/rpctest/helpers.go index 3aab10ecd..cdfa99def 100644 --- a/integration/rpctest/helpers.go +++ b/integration/rpctest/helpers.go @@ -25,8 +25,6 @@ import ( // so... (I didn't say it had to make sense): const UpsieDownsieCallCount = 1 + (34 - 17) + 1 + (34 - 23) -var i = UpsieDownsieCallCount - var PrivateAccounts = integration.MakePrivateAccounts(10) // make keys var GenesisDoc = integration.TestGenesisDoc(PrivateAccounts) diff --git a/keys/core.go b/keys/core.go index aaaa92071..d4176c4b0 100644 --- a/keys/core.go +++ b/keys/core.go @@ -39,7 +39,7 @@ func returnNamesDir(dir string) (string, error) { func writeKey(keyDir string, addr, keyJson []byte) ([]byte, error) { dir, err := returnDataDir(keyDir) if err != nil { - return nil, fmt.Errorf("Failed to get keys dir: %v", err) + return nil, fmt.Errorf("failed to get keys dir: %v", err) } if err := WriteKeyFile(addr, dir, keyJson); err != nil { return nil, err @@ -60,7 +60,7 @@ func coreNameAdd(keysDir, name, addr string) error { return err } if _, err := os.Stat(path.Join(dataDir, addr+".json")); err != nil { - return fmt.Errorf("Unknown key %s", addr) + return fmt.Errorf("unknown key %s", addr) } return ioutil.WriteFile(path.Join(namesDir, name), []byte(addr), 0600) } @@ -85,22 +85,6 @@ func coreNameList(keysDir string) (map[string]string, error) { return names, nil } -func coreAddrList(keysDir string) (map[int]string, error) { - dir, err := returnDataDir(keysDir) - if err != nil { - return nil, err - } - addrs := make(map[int]string) - fs, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - for i := 0; i < len(fs); i++ { - addrs[i] = fs[i].Name() - } - return addrs, nil -} - func coreNameRm(keysDir string, name string) error { dir, err := returnNamesDir(keysDir) if err != nil { diff --git a/keys/key_client.go b/keys/key_client.go index 36c48af28..2414cf4e7 100644 --- a/keys/key_client.go +++ b/keys/key_client.go @@ -56,7 +56,7 @@ type remoteKeyClient struct { } func (l *localKeyClient) Sign(signAddress crypto.Address, message []byte) (*crypto.Signature, error) { - resp, err := l.ks.Sign(nil, &SignRequest{Address: signAddress.String(), Message: message}) + resp, err := l.ks.Sign(context.Background(), &SignRequest{Address: signAddress.String(), Message: message}) if err != nil { return nil, err } @@ -64,7 +64,7 @@ func (l *localKeyClient) Sign(signAddress crypto.Address, message []byte) (*cryp } func (l *localKeyClient) PublicKey(address crypto.Address) (publicKey crypto.PublicKey, err error) { - resp, err := l.ks.PublicKey(nil, &PubRequest{Address: address.String()}) + resp, err := l.ks.PublicKey(context.Background(), &PubRequest{Address: address.String()}) if err != nil { return crypto.PublicKey{}, err } @@ -77,7 +77,7 @@ func (l *localKeyClient) PublicKey(address crypto.Address) (publicKey crypto.Pub // Generate requests that a key be generate within the keys instance and returns the address func (l *localKeyClient) Generate(keyName string, curveType crypto.CurveType) (keyAddress crypto.Address, err error) { - resp, err := l.ks.GenerateKey(nil, &GenRequest{KeyName: keyName, CurveType: curveType.String()}) + resp, err := l.ks.GenerateKey(context.Background(), &GenRequest{KeyName: keyName, CurveType: curveType.String()}) if err != nil { return crypto.Address{}, err } diff --git a/keys/key_store.go b/keys/key_store.go index 86ae353a8..e782bd8d7 100644 --- a/keys/key_store.go +++ b/keys/key_store.go @@ -13,8 +13,7 @@ import ( "sync" "github.com/hyperledger/burrow/crypto" - "github.com/hyperledger/burrow/logging" - hex "github.com/tmthrgd/go-hex" + "github.com/tmthrgd/go-hex" "golang.org/x/crypto/scrypt" ) @@ -114,7 +113,6 @@ type KeyStore struct { sync.Mutex AllowBadFilePermissions bool keysDirPath string - logger *logging.Logger } func (ks *KeyStore) Gen(passphrase string, curveType crypto.CurveType) (key *Key, err error) { diff --git a/keys/server.go b/keys/server.go index cedaa5d60..975d58244 100644 --- a/keys/server.go +++ b/keys/server.go @@ -151,7 +151,7 @@ func (k *KeyStore) Hash(ctx context.Context, in *HashRequest) (*HashResponse, er hasher = sha256.New() // case "sha3": default: - return nil, fmt.Errorf("Unknown hash type %v", in.GetHashtype()) + return nil, fmt.Errorf("unknown hash type %v", in.GetHashtype()) } hasher.Write(in.GetMessage()) diff --git a/logging/lifecycle/lifecycle.go b/logging/lifecycle/lifecycle.go index 3ba9e1d25..d9eb037c2 100644 --- a/logging/lifecycle/lifecycle.go +++ b/logging/lifecycle/lifecycle.go @@ -58,13 +58,13 @@ func NewLoggerFromLoggingConfig(loggingConfig *logconfig.LoggingConfig) (*loggin // Hot swap logging config by replacing output loggers of passed InfoTraceLogger // with those built from loggingConfig -func SwapOutputLoggersFromLoggingConfig(logger *logging.Logger, loggingConfig *logconfig.LoggingConfig) (error, channels.Channel) { +func SwapOutputLoggersFromLoggingConfig(logger *logging.Logger, loggingConfig *logconfig.LoggingConfig) (channels.Channel, error) { outputLogger, errCh, err := loggerFromLoggingConfig(loggingConfig) if err != nil { - return err, channels.NewDeadChannel() + return channels.NewDeadChannel(), err } logger.SwapOutput(outputLogger) - return nil, errCh + return errCh, nil } func NewStdErrLogger() (*logging.Logger, error) { diff --git a/logging/logconfig/sinks.go b/logging/logconfig/sinks.go index 8cef644e2..2d7de7c91 100644 --- a/logging/logconfig/sinks.go +++ b/logging/logconfig/sinks.go @@ -74,7 +74,7 @@ func (mode filterMode) Exclude() bool { // The predicate should evaluate true if at least one of the key value predicates matches func (mode filterMode) MatchAny() bool { - return !mode.MatchAny() + return !mode.MatchAll() } // Sink configuration types diff --git a/logging/logger.go b/logging/logger.go index e1ac1d288..a88b8499a 100644 --- a/logging/logger.go +++ b/logging/logger.go @@ -17,6 +17,7 @@ package logging import ( "github.com/go-kit/kit/log" "github.com/hyperledger/burrow/logging/structure" + "github.com/hyperledger/burrow/util/slice" ) // InfoTraceLogger maintains provides two logging 'channels' that are interlaced @@ -149,6 +150,6 @@ func (l *Logger) WithScope(scopeName string) *Logger { // Record a structured log line with a message func Msg(logger log.Logger, message string, keyvals ...interface{}) error { - prepended := structure.CopyPrepend(keyvals, structure.MessageKey, message) + prepended := slice.CopyPrepend(keyvals, structure.MessageKey, message) return logger.Log(prepended...) } diff --git a/logging/loggers/burrow_format_logger.go b/logging/loggers/burrow_format_logger.go index 5f3c21e07..cf02702fd 100644 --- a/logging/loggers/burrow_format_logger.go +++ b/logging/loggers/burrow_format_logger.go @@ -16,13 +16,12 @@ package loggers import ( "fmt" - "time" - "sync" + "time" "github.com/go-kit/kit/log" "github.com/hyperledger/burrow/logging/structure" - hex "github.com/tmthrgd/go-hex" + "github.com/tmthrgd/go-hex" ) // Logger that implements some formatting conventions for burrow and burrow-client @@ -49,12 +48,12 @@ func (bfl *burrowFormatLogger) Log(keyvals ...interface{}) error { func(key interface{}, value interface{}) (interface{}, interface{}) { switch v := value.(type) { case string: + case time.Time: + value = v.Format(time.RFC3339Nano) case fmt.Stringer: value = v.String() case []byte: value = hex.EncodeUpperToString(v) - case time.Time: - value = v.Format(time.RFC3339Nano) } return structure.StringifyKey(key), value }) diff --git a/logging/loggers/stream_logger.go b/logging/loggers/stream_logger.go index 644f239ad..70ba74b57 100644 --- a/logging/loggers/stream_logger.go +++ b/logging/loggers/stream_logger.go @@ -17,10 +17,6 @@ const ( defaultFormatName = TerminalFormat ) -const ( - newline = '\n' -) - type Syncable interface { Sync() error } diff --git a/logging/loggers/stream_logger_test.go b/logging/loggers/stream_logger_test.go index 474228384..7ce9bd0b4 100644 --- a/logging/loggers/stream_logger_test.go +++ b/logging/loggers/stream_logger_test.go @@ -19,7 +19,7 @@ func TestNewStreamLogger(t *testing.T) { err = structure.Sync(logger) require.NoError(t, err) - assert.Equal(t, "oh=my\n", string(buf.Bytes())) + assert.Equal(t, "oh=my\n", buf.String()) } func TestNewTemplateLogger(t *testing.T) { diff --git a/logging/structure/structure.go b/logging/structure/structure.go index 2b47f2690..f9e196ec3 100644 --- a/logging/structure/structure.go +++ b/logging/structure/structure.go @@ -237,19 +237,6 @@ func DeleteAt(slice []interface{}, i int) []interface{} { return Delete(slice, i, 1) } -// Prepend elements to slice in the order they appear -func CopyPrepend(slice []interface{}, elements ...interface{}) []interface{} { - elementsLength := len(elements) - newSlice := make([]interface{}, len(slice)+elementsLength) - for i, e := range elements { - newSlice[i] = e - } - for i, e := range slice { - newSlice[elementsLength+i] = e - } - return newSlice -} - // Provides a canonical way to stringify keys func StringifyKey(key interface{}) string { switch key { diff --git a/logging/structure/structure_test.go b/logging/structure/structure_test.go index ee55756fb..e8064908b 100644 --- a/logging/structure/structure_test.go +++ b/logging/structure/structure_test.go @@ -17,6 +17,8 @@ package structure import ( "testing" + "github.com/hyperledger/burrow/util/slice" + "github.com/stretchr/testify/assert" ) @@ -99,8 +101,8 @@ func TestDelete(t *testing.T) { func TestCopyPrepend(t *testing.T) { assert.Equal(t, []interface{}{"three", 4, 1, "two"}, - CopyPrepend([]interface{}{1, "two"}, "three", 4)) - assert.Equal(t, []interface{}{}, CopyPrepend(nil)) - assert.Equal(t, []interface{}{1}, CopyPrepend(nil, 1)) - assert.Equal(t, []interface{}{1}, CopyPrepend([]interface{}{1})) + slice.CopyPrepend([]interface{}{1, "two"}, "three", 4)) + assert.Equal(t, []interface{}{}, slice.CopyPrepend(nil)) + assert.Equal(t, []interface{}{1}, slice.CopyPrepend(nil, 1)) + assert.Equal(t, []interface{}{1}, slice.CopyPrepend([]interface{}{1})) } diff --git a/permission/perm_flag.go b/permission/perm_flag.go index 8284cb728..dedc52825 100644 --- a/permission/perm_flag.go +++ b/permission/perm_flag.go @@ -58,16 +58,16 @@ const ( DefaultPermFlags PermFlag = Send | Call | CreateContract | CreateAccount | Bond | Name | HasBase | HasRole | Proposal | Input | Batch // Chain permissions strings - RootString string = "root" - SendString = "send" - CallString = "call" - CreateContractString = "createContract" - CreateAccountString = "createAccount" - BondString = "bond" - NameString = "name" - ProposalString = "proposal" - InputString = "input" - BatchString = "batch" + RootString = "root" + SendString = "send" + CallString = "call" + CreateContractString = "createContract" + CreateAccountString = "createAccount" + BondString = "bond" + NameString = "name" + ProposalString = "proposal" + InputString = "input" + BatchString = "batch" // Moderator permissions strings HasBaseString = "hasBase" diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 5d49945ea..9d2840bbe 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -15,7 +15,6 @@ import ( "github.com/hyperledger/burrow/process" - "github.com/go-kit/kit/log/term" "github.com/hyperledger/burrow/logging/lifecycle" "github.com/hyperledger/burrow/rpc/lib/client" "github.com/hyperledger/burrow/rpc/lib/server" @@ -87,19 +86,6 @@ func TestMain(m *testing.M) { os.Exit(code) } -var colorFn = func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "socket" { - if keyvals[i+1] == "tcp" { - return term.FgBgColor{Fg: term.DarkBlue} - } else if keyvals[i+1] == "unix" { - return term.FgBgColor{Fg: term.DarkCyan} - } - } - } - return term.FgBgColor{} -} - // launch unix and tcp servers func setup() { logger, _ := lifecycle.NewStdErrLogger() diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 274ee30f2..b26ef5d17 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -278,11 +278,11 @@ func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error arg := GetParam(r, name) // log.Notice("param to arg", "argType", argType, "name", name, "arg", arg) - if "" == arg { + if arg == "" { continue } - v, err, ok := nonJSONToArg(argType, arg) + v, ok, err := nonJSONToArg(argType, arg) if err != nil { return nil, err } @@ -310,7 +310,7 @@ func jsonStringToArg(ty reflect.Type, arg string) (reflect.Value, error) { return v, nil } -func nonJSONToArg(ty reflect.Type, arg string) (reflect.Value, error, bool) { +func nonJSONToArg(ty reflect.Type, arg string) (reflect.Value, bool, error) { expectingString := ty.Kind() == reflect.String expectingBytes := (ty.Kind() == reflect.Slice || ty.Kind() == reflect.Array) && ty.Elem().Kind() == reflect.Uint8 @@ -318,7 +318,7 @@ func nonJSONToArg(ty reflect.Type, arg string) (reflect.Value, error, bool) { // Throw quoted strings at JSON parser later... because it always has... if expectingString && !isQuotedString { - return reflect.ValueOf(arg), nil, true + return reflect.ValueOf(arg), true, nil } if expectingBytes { @@ -326,9 +326,9 @@ func nonJSONToArg(ty reflect.Type, arg string) (reflect.Value, error, bool) { rv := reflect.New(ty) err := json.Unmarshal([]byte(arg), rv.Interface()) if err != nil { - return reflect.ValueOf(nil), err, false + return reflect.ValueOf(nil), false, err } - return rv.Elem(), nil, true + return rv.Elem(), true, nil } if strings.HasPrefix(strings.ToLower(arg), "0x") { arg = arg[2:] @@ -336,18 +336,18 @@ func nonJSONToArg(ty reflect.Type, arg string) (reflect.Value, error, bool) { var value []byte value, err := hex.DecodeString(arg) if err != nil { - return reflect.ValueOf(nil), err, false + return reflect.ValueOf(nil), false, err } if ty.Kind() == reflect.Array { // Gives us an empty array of the right type rv := reflect.New(ty).Elem() reflect.Copy(rv, reflect.ValueOf(value)) - return rv, nil, true + return rv, true, nil } - return reflect.ValueOf(value), nil, true + return reflect.ValueOf(value), true, nil } - return reflect.ValueOf(nil), nil, false + return reflect.ValueOf(nil), false, nil } // rpc.http diff --git a/rpc/metrics/exporter.go b/rpc/metrics/exporter.go index 3e9f19a1c..b6a46c882 100644 --- a/rpc/metrics/exporter.go +++ b/rpc/metrics/exporter.go @@ -250,8 +250,7 @@ func (e *Exporter) getPeers() error { func (e *Exporter) getBlocks() (*rpc.ResultBlocks, error) { var minHeight uint64 - var maxHeight uint64 - maxHeight = uint64(e.datum.LatestBlockHeight) + maxHeight := uint64(e.datum.LatestBlockHeight) if maxHeight >= e.blockSampleSize { minHeight = maxHeight - (e.blockSampleSize - 1) diff --git a/rpc/rpcinfo/info_server.go b/rpc/rpcinfo/info_server.go index 949d581b4..156f9c831 100644 --- a/rpc/rpcinfo/info_server.go +++ b/rpc/rpcinfo/info_server.go @@ -26,7 +26,7 @@ import ( func StartServer(service *rpc.Service, pattern string, listener net.Listener, logger *logging.Logger) (*http.Server, error) { logger = logger.With(structure.ComponentKey, "RPC_Info") - routes := GetRoutes(service, logger) + routes := GetRoutes(service) mux := http.NewServeMux() wm := server.NewWebsocketManager(routes, logger) mux.HandleFunc(pattern, wm.WebsocketHandler) diff --git a/rpc/rpcinfo/methods.go b/rpc/rpcinfo/methods.go index 3066e4160..1309aecfc 100644 --- a/rpc/rpcinfo/methods.go +++ b/rpc/rpcinfo/methods.go @@ -2,7 +2,6 @@ package rpcinfo import ( "github.com/hyperledger/burrow/acm" - "github.com/hyperledger/burrow/logging" "github.com/hyperledger/burrow/rpc" "github.com/hyperledger/burrow/rpc/lib/server" ) @@ -41,8 +40,7 @@ const ( Consensus = "consensus" ) -func GetRoutes(service *rpc.Service, logger *logging.Logger) map[string]*server.RPCFunc { - logger = logger.WithScope("GetRoutes") +func GetRoutes(service *rpc.Service) map[string]*server.RPCFunc { return map[string]*server.RPCFunc{ // Status Status: server.NewRPCFunc(service.StatusWithin, "block_time_within,block_seen_time_within"), diff --git a/rpc/rpcquery/query_server.go b/rpc/rpcquery/query_server.go index 82805dd2d..9fab09099 100644 --- a/rpc/rpcquery/query_server.go +++ b/rpc/rpcquery/query_server.go @@ -66,6 +66,9 @@ func (qs *queryServer) GetStorage(ctx context.Context, param *GetStorageParam) ( func (qs *queryServer) ListAccounts(param *ListAccountsParam, stream Query_ListAccountsServer) error { qry, err := query.NewOrEmpty(param.Query) + if err != nil { + return err + } var streamErr error err = qs.accounts.IterateAccounts(func(acc *acm.Account) error { if qry.Matches(acc.Tagged()) { @@ -155,7 +158,7 @@ func (qs *queryServer) GetProposal(ctx context.Context, param *GetProposalParam) func (qs *queryServer) ListProposals(param *ListProposalsParam, stream Query_ListProposalsServer) error { var streamErr error err := qs.proposalReg.IterateProposals(func(hash []byte, ballot *payload.Ballot) error { - if param.GetProposed() == false || ballot.ProposalState == payload.Ballot_PROPOSED { + if !param.GetProposed() || ballot.ProposalState == payload.Ballot_PROPOSED { return stream.Send(&ProposalResult{Hash: hash, Ballot: ballot}) } else { return nil diff --git a/rpc/service.go b/rpc/service.go index 690fb5698..7304795a3 100644 --- a/rpc/service.go +++ b/rpc/service.go @@ -90,9 +90,7 @@ func (s *Service) UnconfirmedTxs(maxTxs int64) (*ResultUnconfirmedTxs, error) { return nil, err } wrappedTxs := make([]*txs.Envelope, len(transactions)) - for i, tx := range transactions { - wrappedTxs[i] = tx - } + copy(wrappedTxs, transactions) return &ResultUnconfirmedTxs{ NumTxs: len(transactions), Txs: wrappedTxs, diff --git a/storage/channel_iterator_test.go b/storage/channel_iterator_test.go deleted file mode 100644 index 482467059..000000000 --- a/storage/channel_iterator_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package storage - -import "testing" - -func TestNewChannelIterator(t *testing.T) { - ch := make(chan KVPair) - go sendKVPair(ch, kvPairs("a", "hello", "b", "channel", "c", "this is nice")) - ci := NewChannelIterator(ch, bz("a"), bz("c")) - checkItem(t, ci, bz("a"), bz("hello")) - checkNext(t, ci, true) - checkItem(t, ci, bz("b"), bz("channel")) - checkNext(t, ci, true) - checkItem(t, ci, bz("c"), bz("this is nice")) - checkNext(t, ci, false) - checkInvalid(t, ci) -} diff --git a/storage/commit_id.go b/storage/commit_id.go deleted file mode 100644 index eff39ae88..000000000 --- a/storage/commit_id.go +++ /dev/null @@ -1,44 +0,0 @@ -package storage - -import ( - "fmt" - - "github.com/hyperledger/burrow/binary" - amino "github.com/tendermint/go-amino" -) - -var codec = amino.NewCodec() - -type CommitID struct { - Hash binary.HexBytes - Version int64 -} - -func MarshalCommitID(hash []byte, version int64) ([]byte, error) { - commitID := CommitID{ - Version: version, - Hash: hash, - } - bs, err := codec.MarshalBinaryBare(commitID) - if err != nil { - return nil, fmt.Errorf("MarshalCommitID() could not encode CommitID %v: %v", commitID, err) - } - if bs == nil { - // Normalise zero value to non-nil so we can store it IAVL tree without panic - return []byte{}, nil - } - return bs, nil -} - -func UnmarshalCommitID(bs []byte) (*CommitID, error) { - commitID := new(CommitID) - err := codec.UnmarshalBinaryBare(bs, commitID) - if err != nil { - return nil, fmt.Errorf("could not unmarshal CommitID: %v", err) - } - return commitID, nil -} - -func (cid CommitID) String() string { - return fmt.Sprintf("Commit{Hash: %v, Version: %v}", cid.Hash, cid.Version) -} diff --git a/storage/forest.go b/storage/forest.go new file mode 100644 index 000000000..63823ede7 --- /dev/null +++ b/storage/forest.go @@ -0,0 +1,347 @@ +package storage + +import ( + "fmt" + + lru "github.com/hashicorp/golang-lru" + "github.com/hyperledger/burrow/binary" + "github.com/tendermint/go-amino" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/xlab/treeprint" +) + +const ( + commitsPrefix = "c" + treePrefix = "t" +) + +// Access the read path of a forest +type ForestReader interface { + Reader(prefix []byte) (KVCallbackIterableReader, error) +} + +// MutableForest is a collection of versioned lazily-loaded RWTrees organised by prefix. It maintains a global state hash +// by storing CommitIDs in a special commitsTree (you could think of it is a two-layer single tree rather than a forest). +// +// The trees (or sub-trees if you prefer) in the forest are RWTrees which wrap an IAVL MutableTree routing writes to the +// MutableTree and reads to the last saved ImmutableTree. In this way reads act only against committed state and can be +// lock free (this allows us to avoid blocking commits - particularly for long-running iterations). +// +// The trees in the forest are created lazily as required by new writes. There is a cache of most recently used trees +// and trees that may require a save are marked as such. New writes are only available to read after a Save(). +// +// Here is an example forest (the output is generated by the Dump() function): +// . +// ├── balances +// │   ├── Caitlin -> 2344 +// │   ├── Cora -> 654456 +// │   ├── Edward -> 34 +// │   └── Lindsay -> 654 +// └── names +// ├── Caitlin -> female +// ├── Cora -> female +// ├── Edward -> male +// └── Lindsay -> unisex +// +// Here there are two tree indexed by the prefixes 'balances' and 'names'. +// +// To perform reads of the forest we access it in the following way: +// +// tree, err := forest.Reader("names") +// gender := tree.Get("Cora") +// +// To perform writes: +// +// tree, err := forest.Writer("names") +// tree.Set("Cora", "unspecified") +// +// If there is no tree currently stored at the prefix passed then it will be created when the forest is saved: +// +// hash, version, err := forest.Save() +// +// where the global version for the forest is returned. + +type MutableForest struct { + // A tree containing a reference for all contained trees in the form of prefix -> CommitID + commitsTree *RWTree + // Much of the implementation of MutableForest is contained in ImmutableForest which is embedded here and used + // mutable via its private API. This embedded instance holds a reference to commitsTree above. + *ImmutableForest + // Map of prefix -> tree for trees that may require a save (but only will be if they have actually been updated) + dirty map[string]*RWTree + // List of dirty prefixes in deterministic order so we may loop over them on Save() and obtain a consistent commitTree hash + dirtyPrefixes []string +} + +// ImmutableForest contains much of the implementation for MutableForest yet it's external API is immutable +type ImmutableForest struct { + // Store of tree prefix -> last commitID (version + hash) - serves as a set of all known trees and provides a global hash + commitsTree KVCallbackIterableReader + treeDB dbm.DB + // Cache for frequently used trees + treeCache *lru.Cache + // Cache size is used in multiple places - for the LRU cache and node cache for any trees created - it probably + // makes sense for them to be roughly the same size + cacheSize int + // Determines whether we use LoadVersionForOverwriting on underlying MutableTrees - since ImmutableForest is used + // by MutableForest in a writing context sometimes we do need to load a version destructively + overwriting bool +} + +// This is the object that is stored in the leaves of the commitsTree - it captures the sub-tree hashes so that the +// commitsTree's hash becomes a mixture of the hashes of all the sub-trees. +type CommitID struct { + Hash binary.HexBytes + Version int64 +} + +type ForestOption func(*ImmutableForest) + +var WithOverwriting ForestOption = func(imf *ImmutableForest) { imf.overwriting = true } + +func NewMutableForest(db dbm.DB, cacheSize int) (*MutableForest, error) { + // The tree whose state root hash is the global state hash + commitsTree := NewRWTree(NewPrefixDB(db, commitsPrefix), cacheSize) + forest, err := NewImmutableForest(commitsTree, NewPrefixDB(db, treePrefix), cacheSize, WithOverwriting) + if err != nil { + return nil, err + } + return &MutableForest{ + ImmutableForest: forest, + commitsTree: commitsTree, + dirty: make(map[string]*RWTree), + }, nil +} + +func NewImmutableForest(commitsTree KVCallbackIterableReader, treeDB dbm.DB, cacheSize int, + options ...ForestOption) (*ImmutableForest, error) { + cache, err := lru.New(cacheSize) + if err != nil { + return nil, fmt.Errorf("NewImmutableForest() could not create cache: %v", err) + } + imf := &ImmutableForest{ + commitsTree: commitsTree, + treeDB: treeDB, + treeCache: cache, + cacheSize: cacheSize, + } + for _, opt := range options { + opt(imf) + } + return imf, nil +} + +// Load mutable forest from database +func (muf *MutableForest) Load(version int64) error { + return muf.commitsTree.Load(version, true) +} + +func (muf *MutableForest) Save() (hash []byte, version int64, _ error) { + // Save each tree in forest that requires save + for _, prefix := range muf.dirtyPrefixes { + tree := muf.dirty[prefix] + if tree.Updated() { + err := muf.saveTree([]byte(prefix), tree) + if err != nil { + return nil, 0, err + } + } + } + // empty dirty cache + muf.dirty = make(map[string]*RWTree, len(muf.dirty)) + muf.dirtyPrefixes = muf.dirtyPrefixes[:0] + return muf.commitsTree.Save() +} + +func (muf *MutableForest) GetImmutable(version int64) (*ImmutableForest, error) { + commitsTree, err := muf.commitsTree.GetImmutable(version) + if err != nil { + return nil, fmt.Errorf("MutableForest.GetImmutable() could not get commits tree for version %d: %v", + version, err) + } + return NewImmutableForest(commitsTree, muf.treeDB, muf.cacheSize) +} + +// Calls to writer should be serialised as should writes to the tree +func (muf *MutableForest) Writer(prefix []byte) (*RWTree, error) { + // Try dirty cache first (if tree is new it may only be in this location) + prefixString := string(prefix) + if tree, ok := muf.dirty[prefixString]; ok { + return tree, nil + } + tree, err := muf.tree(prefix) + if err != nil { + return nil, err + } + // Mark tree as dirty + muf.dirty[prefixString] = tree + muf.dirtyPrefixes = append(muf.dirtyPrefixes, prefixString) + return tree, nil +} + +func (muf *MutableForest) IterateRWTree(start, end []byte, ascending bool, fn func(prefix []byte, tree *RWTree) error) error { + return muf.commitsTree.Iterate(start, end, ascending, func(prefix []byte, _ []byte) error { + rwt, err := muf.tree(prefix) + if err != nil { + return err + } + return fn(prefix, rwt) + }) +} + +// Delete a tree - if the tree exists will return the CommitID of the latest saved version +func (muf *MutableForest) Delete(prefix []byte) (*CommitID, error) { + bs, removed := muf.commitsTree.Delete(prefix) + if !removed { + return nil, nil + } + return unmarshalCommitID(bs) +} + +// Get the current global hash for all trees in this forest +func (muf *MutableForest) Hash() []byte { + return muf.commitsTree.Hash() +} + +// Get the current global version for all versions of all trees in this forest +func (muf *MutableForest) Version() int64 { + return muf.commitsTree.Version() +} + +func (muf *MutableForest) saveTree(prefix []byte, tree *RWTree) error { + hash, version, err := tree.Save() + if err != nil { + return fmt.Errorf("MutableForest.saveTree() could not save tree: %v", err) + } + return muf.setCommit(prefix, hash, version) +} + +func (muf *MutableForest) setCommit(prefix, hash []byte, version int64) error { + bs, err := marshalCommitID(hash, version) + if err != nil { + return fmt.Errorf("MutableForest.setCommit() could not marshal CommitID: %v", err) + } + muf.commitsTree.Set([]byte(prefix), bs) + return nil +} + +// ImmutableForest + +// Get the tree at prefix for making reads +func (imf *ImmutableForest) Reader(prefix []byte) (KVCallbackIterableReader, error) { + return imf.tree(prefix) +} + +func (imf *ImmutableForest) Iterate(start, end []byte, ascending bool, fn func(prefix []byte, tree KVCallbackIterableReader) error) error { + return imf.commitsTree.Iterate(start, end, ascending, func(prefix []byte, _ []byte) error { + rwt, err := imf.tree(prefix) + if err != nil { + return err + } + return fn(prefix, rwt) + }) +} + +func (imf *ImmutableForest) Dump() string { + dump := treeprint.New() + AddTreePrintTree("Commits", dump, imf.commitsTree) + err := imf.Iterate(nil, nil, true, func(prefix []byte, tree KVCallbackIterableReader) error { + AddTreePrintTree(string(prefix), dump, tree) + return nil + }) + if err != nil { + return fmt.Sprintf("ImmutableForest.Dump(): iteration error: %v", err) + } + return dump.String() +} + +// Shared implementation - these methods + +// Lazy load tree +func (imf *ImmutableForest) tree(prefix []byte) (*RWTree, error) { + // Try cache + if value, ok := imf.treeCache.Get(string(prefix)); ok { + return value.(*RWTree), nil + } + // Not in caches but non-negative version - we should be able to load into memory + return imf.loadOrCreateTree(prefix) +} + +func (imf *ImmutableForest) commitID(prefix []byte) (*CommitID, error) { + bs := imf.commitsTree.Get(prefix) + if bs == nil { + return new(CommitID), nil + } + commitID, err := unmarshalCommitID(bs) + if err != nil { + return nil, fmt.Errorf("could not get commitID for prefix %X: %v", prefix, err) + } + return commitID, nil +} + +func (imf *ImmutableForest) loadOrCreateTree(prefix []byte) (*RWTree, error) { + const errHeader = "ImmutableForest.loadOrCreateTree():" + tree := imf.newTree(prefix) + commitID, err := imf.commitID(prefix) + if err != nil { + return nil, fmt.Errorf("%s %v", errHeader, err) + } + if commitID.Version == 0 { + // This is the first time we have been asked to load this tree + return imf.newTree(prefix), nil + } + err = tree.Load(commitID.Version, imf.overwriting) + if err != nil { + return nil, fmt.Errorf("%s could not load tree: %v", errHeader, err) + } + return tree, nil +} + +// Create a new in-memory IAVL tree +func (imf *ImmutableForest) newTree(prefix []byte) *RWTree { + p := string(prefix) + tree := NewRWTree(NewPrefixDB(imf.treeDB, p), imf.cacheSize) + imf.treeCache.Add(p, tree) + return tree +} + +// CommitID serialisation + +var codec = amino.NewCodec() + +func (cid *CommitID) UnmarshalBinary(data []byte) error { + return codec.UnmarshalBinaryBare(data, cid) +} + +func (cid *CommitID) MarshalBinary() (data []byte, err error) { + return codec.MarshalBinaryBare(cid) +} + +func (cid CommitID) String() string { + return fmt.Sprintf("Commit{Hash: %v, Version: %v}", cid.Hash, cid.Version) +} + +func marshalCommitID(hash []byte, version int64) ([]byte, error) { + commitID := CommitID{ + Version: version, + Hash: hash, + } + bs, err := commitID.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("MarshalCommitID() could not encode CommitID %v: %v", commitID, err) + } + if bs == nil { + // Normalise zero value to non-nil so we can store it IAVL tree without panic + return []byte{}, nil + } + return bs, nil +} + +func unmarshalCommitID(bs []byte) (*CommitID, error) { + commitID := new(CommitID) + err := commitID.UnmarshalBinary(bs) + if err != nil { + return nil, fmt.Errorf("could not unmarshal CommitID: %v", err) + } + return commitID, nil +} diff --git a/storage/mutable_forest_test.go b/storage/forest_test.go similarity index 92% rename from storage/mutable_forest_test.go rename to storage/forest_test.go index e1766bb9e..97ccad8d4 100644 --- a/storage/mutable_forest_test.go +++ b/storage/forest_test.go @@ -14,11 +14,11 @@ import ( func TestMutableForest_Genesis(t *testing.T) { rwf, err := NewMutableForest(dbm.NewMemDB(), 100) require.NoError(t, err) - prefix := bz("fooos") + prefix := []byte("fooos") tree, err := rwf.Writer(prefix) require.NoError(t, err) - key1 := bz("bar") - val1 := bz("nog") + key1 := []byte("bar") + val1 := []byte("nog") tree.Set(key1, val1) _, _, err = rwf.Save() @@ -38,11 +38,11 @@ func TestMutableForest_Genesis(t *testing.T) { func TestMutableForest_Save(t *testing.T) { forest, err := NewMutableForest(dbm.NewMemDB(), 100) require.NoError(t, err) - prefix1 := bz("fooos") + prefix1 := []byte("fooos") tree, err := forest.Writer(prefix1) require.NoError(t, err) - key1 := bz("bar") - val1 := bz("nog") + key1 := []byte("bar") + val1 := []byte("nog") tree.Set(key1, val1) hash1, version1, err := forest.Save() @@ -55,9 +55,9 @@ func TestMutableForest_Save(t *testing.T) { └── "bar" -> "nog" `) - prefix2 := bz("prefixo") - key2 := bz("hogs") - val2 := bz("they are dogs") + prefix2 := []byte("prefixo") + key2 := []byte("hogs") + val2 := []byte("they are dogs") tree, err = forest.Writer(prefix2) require.NoError(t, err) tree.Set(key2, val2) @@ -83,11 +83,11 @@ func TestMutableForest_Load(t *testing.T) { db := dbm.NewMemDB() forest, err := NewMutableForest(db, 100) require.NoError(t, err) - prefix1 := bz("prefixes can be long if you want") + prefix1 := []byte("prefixes can be long if you want") tree, err := forest.Writer(prefix1) require.NoError(t, err) - key1 := bz("El Nubble") - val1 := bz("Diplodicus") + key1 := []byte("El Nubble") + val1 := []byte("Diplodicus") tree.Set(key1, val1) hash, version, err := forest.Save() @@ -121,8 +121,10 @@ func TestSorted(t *testing.T) { setForest(t, forest, "balances", "Lindsay", "654") setForest(t, forest, "balances", "Cora", "654456") _, _, err = forest.Save() - tree, err := forest.Writer(bz("age")) - tree.Get(bz("foo")) + require.NoError(t, err) + tree, err := forest.Writer([]byte("age")) + require.NoError(t, err) + tree.Get([]byte("foo")) setForest(t, forest, "age", "Lindsay", "34") setForest(t, forest, "age", "Cora", "1") _, _, err = forest.Save() diff --git a/storage/immutable_forest.go b/storage/immutable_forest.go deleted file mode 100644 index 1922bef55..000000000 --- a/storage/immutable_forest.go +++ /dev/null @@ -1,129 +0,0 @@ -package storage - -import ( - "fmt" - - "github.com/xlab/treeprint" - - lru "github.com/hashicorp/golang-lru" - dbm "github.com/tendermint/tendermint/libs/db" -) - -type ImmutableForest struct { - // Store of tree prefix -> last commitID (version + hash) - serves as a set of all known trees and provides a global hash - commitsTree KVCallbackIterableReader - treeDB dbm.DB - // Cache for frequently used trees - treeCache *lru.Cache - // Cache size is used in multiple places - for the LRU cache and node cache for any trees created - it probably - // makes sense for them to be roughly the same size - cacheSize int - // Determines whether we use LoadVersionForOverwriting on underlying MutableTrees - since ImmutableForest is used - // by MutableForest in a writing context sometimes we do need to load a version destructively - overwriting bool -} - -type ForestOption func(*ImmutableForest) - -var WithOverwriting ForestOption = func(imf *ImmutableForest) { imf.overwriting = true } - -func NewImmutableForest(commitsTree KVCallbackIterableReader, treeDB dbm.DB, cacheSize int, - options ...ForestOption) (*ImmutableForest, error) { - cache, err := lru.New(cacheSize) - if err != nil { - return nil, fmt.Errorf("NewImmutableForest() could not create cache: %v", err) - } - imf := &ImmutableForest{ - commitsTree: commitsTree, - treeDB: treeDB, - treeCache: cache, - cacheSize: cacheSize, - } - for _, opt := range options { - opt(imf) - } - return imf, nil -} - -func (imf *ImmutableForest) Iterate(start, end []byte, ascending bool, fn func(prefix []byte, tree KVCallbackIterableReader) error) error { - return imf.commitsTree.Iterate(start, end, ascending, func(prefix []byte, _ []byte) error { - rwt, err := imf.tree(prefix) - if err != nil { - return err - } - return fn(prefix, rwt) - }) -} - -func (imf *ImmutableForest) IterateRWTree(start, end []byte, ascending bool, fn func(prefix []byte, tree *RWTree) error) error { - return imf.commitsTree.Iterate(start, end, ascending, func(prefix []byte, _ []byte) error { - rwt, err := imf.tree(prefix) - if err != nil { - return err - } - return fn(prefix, rwt) - }) -} - -// Get the tree at prefix for making reads -func (imf *ImmutableForest) Reader(prefix []byte) (KVCallbackIterableReader, error) { - return imf.tree(prefix) -} - -// Lazy load tree -func (imf *ImmutableForest) tree(prefix []byte) (*RWTree, error) { - // Try cache - if value, ok := imf.treeCache.Get(string(prefix)); ok { - return value.(*RWTree), nil - } - // Not in caches but non-negative version - we should be able to load into memory - return imf.loadOrCreateTree(prefix) -} - -func (imf *ImmutableForest) commitID(prefix []byte) (*CommitID, error) { - bs := imf.commitsTree.Get(prefix) - if bs == nil { - return new(CommitID), nil - } - commitID, err := UnmarshalCommitID(bs) - if err != nil { - return nil, fmt.Errorf("could not get commitID for prefix %X: %v", prefix, err) - } - return commitID, nil -} - -func (imf *ImmutableForest) loadOrCreateTree(prefix []byte) (*RWTree, error) { - const errHeader = "ImmutableForest.loadOrCreateTree():" - tree := imf.newTree(prefix) - commitID, err := imf.commitID(prefix) - if err != nil { - return nil, fmt.Errorf("%s %v", errHeader, err) - } - if commitID.Version == 0 { - // This is the first time we have been asked to load this tree - return imf.newTree(prefix), nil - } - err = tree.Load(commitID.Version, imf.overwriting) - if err != nil { - return nil, fmt.Errorf("%s could not load tree: %v", errHeader, err) - } - return tree, nil -} - -// Create a new in-memory IAVL tree -func (imf *ImmutableForest) newTree(prefix []byte) *RWTree { - p := string(prefix) - tree := NewRWTree(NewPrefixDB(imf.treeDB, p), imf.cacheSize) - imf.treeCache.Add(p, tree) - return tree -} - -func (imf *ImmutableForest) Dump() string { - dump := treeprint.New() - AddTreePrintTree("Commits", dump, imf.commitsTree) - imf.Iterate(nil, nil, true, func(prefix []byte, tree KVCallbackIterableReader) error { - AddTreePrintTree(string(prefix), dump, tree) - return nil - }) - return dump.String() -} diff --git a/storage/immutable_tree.go b/storage/immutable_tree.go deleted file mode 100644 index c9aa347ee..000000000 --- a/storage/immutable_tree.go +++ /dev/null @@ -1,28 +0,0 @@ -package storage - -import "github.com/tendermint/iavl" - -// We wrap IAVL's tree types in order to provide iteration helpers and to harmonise other interface types with what we -// expect - -type ImmutableTree struct { - *iavl.ImmutableTree -} - -func (imt *ImmutableTree) Get(key []byte) []byte { - _, value := imt.ImmutableTree.Get(key) - return value -} - -func (imt *ImmutableTree) Iterate(start, end []byte, ascending bool, fn func(key []byte, value []byte) error) error { - var err error - imt.ImmutableTree.IterateRange(start, end, ascending, func(key, value []byte) bool { - err = fn(key, value) - if err != nil { - // stop - return true - } - return false - }) - return err -} diff --git a/storage/key_format.go b/storage/key_format.go index 3d0b55042..dc3201153 100644 --- a/storage/key_format.go +++ b/storage/key_format.go @@ -3,6 +3,7 @@ package storage import ( "encoding/binary" "fmt" + "reflect" "strings" ) @@ -60,6 +61,39 @@ func NewKeyFormat(prefix string, layout ...int) (*KeyFormat, error) { return kf, nil } +var expectedKeyFormatType = reflect.TypeOf(MustKeyFormat{}) + +// Checks that a struct containing KeyFormat fields has no collisions on prefix and so acts as a sane 'KeyFormatStore' +func EnsureKeyFormatStore(ks interface{}) error { + rv := reflect.ValueOf(ks) + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + rt := rv.Type() + + keyFormats := make(map[string]MustKeyFormat) + for i := 0; i < rt.NumField(); i++ { + fv := rv.Field(i) + if fv.Kind() == reflect.Ptr { + if fv.IsNil() { + return fmt.Errorf("key format field '%s' is nil", rt.Field(i).Name) + } + fv = fv.Elem() + } + ft := fv.Type() + if ft == expectedKeyFormatType { + kf := fv.Interface().(MustKeyFormat) + prefix := kf.Prefix().String() + if kfDuplicate, ok := keyFormats[prefix]; ok { + return fmt.Errorf("duplicate prefix %q between key format %v and %v", + prefix, kfDuplicate, kf) + } + keyFormats[prefix] = kf + } + } + return nil +} + // Format the byte segments into the key format - will panic if the segment lengths do not match the layout. func (kf *KeyFormat) KeyBytes(segments ...[]byte) ([]byte, error) { key := make([]byte, kf.length) diff --git a/storage/key_format_store.go b/storage/key_format_store.go deleted file mode 100644 index 99975afb1..000000000 --- a/storage/key_format_store.go +++ /dev/null @@ -1,38 +0,0 @@ -package storage - -import ( - "fmt" - "reflect" -) - -var expectedKeyFormatType = reflect.TypeOf(MustKeyFormat{}) - -func EnsureKeyFormatStore(ks interface{}) error { - rv := reflect.ValueOf(ks) - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - rt := rv.Type() - - keyFormats := make(map[string]MustKeyFormat) - for i := 0; i < rt.NumField(); i++ { - fv := rv.Field(i) - if fv.Kind() == reflect.Ptr { - if fv.IsNil() { - return fmt.Errorf("key format field '%s' is nil", rt.Field(i).Name) - } - fv = fv.Elem() - } - ft := fv.Type() - if ft == expectedKeyFormatType { - kf := fv.Interface().(MustKeyFormat) - prefix := kf.Prefix().String() - if kfDuplicate, ok := keyFormats[prefix]; ok { - return fmt.Errorf("duplicate prefix %q between key format %v and %v", - prefix, kfDuplicate, kf) - } - keyFormats[prefix] = kf - } - } - return nil -} diff --git a/storage/key_format_store_test.go b/storage/key_format_store_test.go deleted file mode 100644 index 1331c5068..000000000 --- a/storage/key_format_store_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -type testKeyStore = struct { - Accounts *MustKeyFormat - Storage *MustKeyFormat - foo string -} - -func TestEnsureKeyStore(t *testing.T) { - keyStore := testKeyStore{ - Accounts: NewMustKeyFormat("foo", 4, 5, 6), - Storage: NewMustKeyFormat("foos", 4, 5, 6), - } - err := EnsureKeyFormatStore(keyStore) - require.NoError(t, err) - - err = EnsureKeyFormatStore(&keyStore) - require.NoError(t, err, "pointer to keystore should work") - - keyStore = testKeyStore{ - Accounts: NewMustKeyFormat("foo", 4, 5, 6), - Storage: NewMustKeyFormat("foo", 4, 5, 6), - } - err = EnsureKeyFormatStore(&keyStore) - require.Error(t, err, "duplicate prefixes should be detected") - - // Test missing formats - keyStore = testKeyStore{} - err = EnsureKeyFormatStore(&keyStore) - require.Error(t, err, "all formats should be set") - - keyStore = testKeyStore{ - Accounts: NewMustKeyFormat("foo", 4, 5, 6), - } - err = EnsureKeyFormatStore(&keyStore) - require.Error(t, err, "all formats should be set") - - keyStore2 := struct { - Accounts MustKeyFormat - Storage *MustKeyFormat - }{ - Accounts: *NewMustKeyFormat("foo", 56, 6), - Storage: NewMustKeyFormat("foo2", 1, 2), - } - - err = EnsureKeyFormatStore(keyStore2) - require.NoError(t, err) - - keyStore2 = struct { - Accounts MustKeyFormat - Storage *MustKeyFormat - }{ - Storage: NewMustKeyFormat("foo2", 1, 2), - } - err = EnsureKeyFormatStore(keyStore2) - require.NoError(t, err) - - err = EnsureKeyFormatStore(keyStore2) - require.NoError(t, err) - - keyStore2 = struct { - Accounts MustKeyFormat - Storage *MustKeyFormat - }{ - Accounts: *NewMustKeyFormat("foo", 56, 6), - Storage: NewMustKeyFormat("foo", 1, 2), - } - - err = EnsureKeyFormatStore(keyStore2) - require.Error(t, err, "duplicate prefixes should be detected") -} diff --git a/storage/key_format_test.go b/storage/key_format_test.go index 0a9c0f179..bc7d61ee1 100644 --- a/storage/key_format_test.go +++ b/storage/key_format_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestKeyFormatBytes(t *testing.T) { @@ -33,7 +34,7 @@ func TestKeyFormat(t *testing.T) { assert.Equal(t, b, *bo) assert.Equal(t, c, *co) - ao, bo, co = new(int64), new(int64), new(int64) + ao, bo, _ = new(int64), new(int64), new(int64) bs := new([]byte) kf.Scan(key, ao, bo, bs) assert.Equal(t, a, *ao) @@ -108,3 +109,73 @@ func TestKeyFormat_Layout(t *testing.T) { key := kf.KeyNoPrefix([]byte("Hi, "), "dinosaur") assert.Equal(t, "Hi, dinosaur", key.String()) } + +type testKeyStore = struct { + Accounts *MustKeyFormat + Storage *MustKeyFormat + foo string +} + +func TestEnsureKeyStore(t *testing.T) { + keyStore := testKeyStore{ + Accounts: NewMustKeyFormat("foo", 4, 5, 6), + Storage: NewMustKeyFormat("foos", 4, 5, 6), + } + err := EnsureKeyFormatStore(keyStore) + require.NoError(t, err) + + err = EnsureKeyFormatStore(&keyStore) + require.NoError(t, err, "pointer to keystore should work") + + keyStore = testKeyStore{ + Accounts: NewMustKeyFormat("foo", 4, 5, 6), + Storage: NewMustKeyFormat("foo", 4, 5, 6), + } + err = EnsureKeyFormatStore(&keyStore) + require.Error(t, err, "duplicate prefixes should be detected") + + // Test missing formats + keyStore = testKeyStore{} + err = EnsureKeyFormatStore(&keyStore) + require.Error(t, err, "all formats should be set") + + keyStore = testKeyStore{ + Accounts: NewMustKeyFormat("foo", 4, 5, 6), + } + err = EnsureKeyFormatStore(&keyStore) + require.Error(t, err, "all formats should be set") + + keyStore2 := struct { + Accounts MustKeyFormat + Storage *MustKeyFormat + }{ + Accounts: *NewMustKeyFormat("foo", 56, 6), + Storage: NewMustKeyFormat("foo2", 1, 2), + } + + err = EnsureKeyFormatStore(keyStore2) + require.NoError(t, err) + + keyStore2 = struct { + Accounts MustKeyFormat + Storage *MustKeyFormat + }{ + Storage: NewMustKeyFormat("foo2", 1, 2), + } + err = EnsureKeyFormatStore(keyStore2) + require.NoError(t, err) + + err = EnsureKeyFormatStore(keyStore2) + require.NoError(t, err) + + keyStore2 = struct { + Accounts MustKeyFormat + Storage *MustKeyFormat + }{ + Accounts: *NewMustKeyFormat("foo", 56, 6), + Storage: NewMustKeyFormat("foo", 1, 2), + } + + err = EnsureKeyFormatStore(keyStore2) + require.Error(t, err, "duplicate prefixes should be detected") +} diff --git a/storage/kvcascade.go b/storage/kvcascade.go deleted file mode 100644 index 581167f57..000000000 --- a/storage/kvcascade.go +++ /dev/null @@ -1,39 +0,0 @@ -package storage - -type KVCascade []KVIterableReader - -func (kvc KVCascade) Get(key []byte) []byte { - for _, kvs := range kvc { - value := kvs.Get(key) - if value != nil { - return value - } - } - return nil -} - -func (kvc KVCascade) Has(key []byte) bool { - for _, kvs := range kvc { - has := kvs.Has(key) - if has { - return true - } - } - return false -} - -func (kvc KVCascade) Iterator(low, high []byte) KVIterator { - iterators := make([]KVIterator, len(kvc)) - for i, kvs := range kvc { - iterators[i] = kvs.Iterator(low, high) - } - return NewMultiIterator(false, iterators...) -} - -func (kvc KVCascade) ReverseIterator(low, high []byte) KVIterator { - iterators := make([]KVIterator, len(kvc)) - for i, kvs := range kvc { - iterators[i] = kvs.ReverseIterator(low, high) - } - return NewMultiIterator(true, iterators...) -} diff --git a/storage/kvstore.go b/storage/kvstore.go index 2ac984528..ee02e6bd9 100644 --- a/storage/kvstore.go +++ b/storage/kvstore.go @@ -32,18 +32,6 @@ type KVCallbackIterable interface { Iterate(low, high []byte, ascending bool, fn func(key []byte, value []byte) error) error } -func KVCallbackIterator(rit KVCallbackIterable, ascending bool, low, high []byte) dbm.Iterator { - ch := make(chan KVPair) - go func() { - defer close(ch) - rit.Iterate(low, high, ascending, func(key, value []byte) (err error) { - ch <- KVPair{key, value} - return - }) - }() - return NewChannelIterator(ch, low, high) -} - type KVReader interface { // Get returns nil iff key doesn't exist. Panics on nil key. Get(key []byte) []byte @@ -79,14 +67,6 @@ type KVStore interface { KVIterable } -// NormaliseDomain encodes the assumption that when nil is used as a lower bound is interpreted as low rather than high -func NormaliseDomain(low, high []byte) ([]byte, []byte) { - if len(low) == 0 { - low = []byte{} - } - return low, high -} - // KeyOrder maps []byte{} -> -1, []byte(nil) -> 1, and everything else to 0. This encodes the assumptions of the // KVIterator domain endpoints func KeyOrder(key []byte) int { @@ -114,3 +94,11 @@ func CompareKeys(k1, k2 []byte) int { } return bytes.Compare(k1, k2) } + +// NormaliseDomain encodes the assumption that when nil is used as a lower bound is interpreted as low rather than high +func NormaliseDomain(low, high []byte) ([]byte, []byte) { + if len(low) == 0 { + low = []byte{} + } + return low, high +} diff --git a/storage/mutable_forest.go b/storage/mutable_forest.go deleted file mode 100644 index 10d473ebb..000000000 --- a/storage/mutable_forest.go +++ /dev/null @@ -1,165 +0,0 @@ -package storage - -import ( - "fmt" - - dbm "github.com/tendermint/tendermint/libs/db" -) - -const ( - commitsPrefix = "c" - treePrefix = "t" -) - -type ForestReader interface { - Reader(prefix []byte) (KVCallbackIterableReader, error) -} - -// MutableForest is a collection of versioned lazily-loaded RWTrees organised by prefix. It maintains a global state hash -// by storing CommitIDs in a special commitsTree (you could think of it is a two-layer single tree rather than a forest). -// -// The trees (or sub-trees if you prefer) in the forest are RWTrees which wrap an IAVL MutableTree routing writes to the -// MutableTree and reads to the last saved ImmutableTree. In this way reads act only against committed state and can be -// lock free (this allows us to avoid blocking commits - particularly for long-running iterations). -// -// The trees in the forest are created lazily as required by new writes. There is a cache of most recently used trees -// and trees that may require a save are marked as such. New writes are only available to read after a Save(). -// -// Here is an example forest (the output is generated by the Dump() function): -// . -// ├── balances -// │   ├── Caitlin -> 2344 -// │   ├── Cora -> 654456 -// │   ├── Edward -> 34 -// │   └── Lindsay -> 654 -// └── names -// ├── Caitlin -> female -// ├── Cora -> female -// ├── Edward -> male -// └── Lindsay -> unisex -// -// Here there are two tree indexed by the prefixes 'balances' and 'names'. -// -// To perform reads of the forest we access it in the following way: -// -// tree, err := forest.Reader("names") -// gender := tree.Get("Cora") -// -// To perform writes: -// -// tree, err := forest.Writer("names") -// tree.Set("Cora", "unspecified") -// -// If there is no tree currently stored at the prefix passed then it will be created when the forest is saved: -// -// hash, version, err := forest.Save() -// -// where the global version for the forest is returned. - -type MutableForest struct { - // A tree containing a reference for all contained trees in the form of prefix -> CommitID - commitsTree *RWTree - *ImmutableForest - // Map of prefix -> tree for trees that may require a save (but only will be if they have actually been updated) - dirty map[string]*RWTree - // List of dirty prefixes in deterministic order so we may loop over them on Save() and obtain a consistent commitTree hash - dirtyPrefixes []string -} - -func NewMutableForest(db dbm.DB, cacheSize int) (*MutableForest, error) { - tree := NewRWTree(NewPrefixDB(db, commitsPrefix), cacheSize) - forest, err := NewImmutableForest(tree, NewPrefixDB(db, treePrefix), cacheSize, WithOverwriting) - if err != nil { - return nil, err - } - return &MutableForest{ - ImmutableForest: forest, - commitsTree: tree, - dirty: make(map[string]*RWTree), - }, nil -} - -// Load mutable forest from database, pass overwriting = true if you wish to make writes to version version + 1. -// this will -func (muf *MutableForest) Load(version int64) error { - return muf.commitsTree.Load(version, true) -} - -func (muf *MutableForest) Save() ([]byte, int64, error) { - // Save each tree in forest that requires save - for _, prefix := range muf.dirtyPrefixes { - tree := muf.dirty[prefix] - if tree.Updated() { - err := muf.saveTree([]byte(prefix), tree) - if err != nil { - return nil, 0, err - } - } - } - // empty dirty cache - muf.dirty = make(map[string]*RWTree, len(muf.dirty)) - muf.dirtyPrefixes = muf.dirtyPrefixes[:0] - return muf.commitsTree.Save() -} - -func (muf *MutableForest) GetImmutable(version int64) (*ImmutableForest, error) { - commitsTree, err := muf.commitsTree.GetImmutable(version) - if err != nil { - return nil, fmt.Errorf("MutableForest.GetImmutable() could not get commits tree for version %d: %v", - version, err) - } - return NewImmutableForest(commitsTree, muf.treeDB, muf.cacheSize) -} - -// Calls to writer should be serialised as should writes to the tree -func (muf *MutableForest) Writer(prefix []byte) (*RWTree, error) { - // Try dirty cache first (if tree is new it may only be in this location) - prefixString := string(prefix) - if tree, ok := muf.dirty[prefixString]; ok { - return tree, nil - } - tree, err := muf.tree(prefix) - if err != nil { - return nil, err - } - // Mark tree as dirty - muf.dirty[prefixString] = tree - muf.dirtyPrefixes = append(muf.dirtyPrefixes, prefixString) - return tree, nil -} - -// Delete a tree - if the tree exists will return the CommitID of the latest saved version -func (muf *MutableForest) Delete(prefix []byte) (*CommitID, error) { - bs, removed := muf.commitsTree.Delete(prefix) - if !removed { - return nil, nil - } - return UnmarshalCommitID(bs) -} - -// Get the current global hash for all trees in this forest -func (muf *MutableForest) Hash() []byte { - return muf.commitsTree.Hash() -} - -// Get the current global version for all versions of all trees in this forest -func (muf *MutableForest) Version() int64 { - return muf.commitsTree.Version() -} - -func (muf *MutableForest) saveTree(prefix []byte, tree *RWTree) error { - hash, version, err := tree.Save() - if err != nil { - return fmt.Errorf("MutableForest.saveTree() could not save tree: %v", err) - } - return muf.setCommit(prefix, hash, version) -} - -func (muf *MutableForest) setCommit(prefix, hash []byte, version int64) error { - bs, err := MarshalCommitID(hash, version) - if err != nil { - return fmt.Errorf("MutableForest.setCommit() could not marshal CommitID: %v", err) - } - muf.commitsTree.Set([]byte(prefix), bs) - return nil -} diff --git a/storage/prefix_db_test.go b/storage/prefix_db_test.go index 9469eba50..23b1771a2 100644 --- a/storage/prefix_db_test.go +++ b/storage/prefix_db_test.go @@ -13,15 +13,15 @@ import ( func mockDBWithStuff() dbm.DB { db := dbm.NewMemDB() // Under "key" prefix - db.Set(bz("key"), bz("value")) - db.Set(bz("key1"), bz("value1")) - db.Set(bz("key2"), bz("value2")) - db.Set(bz("key3"), bz("value3")) - db.Set(bz("something"), bz("else")) - db.Set(bz(""), bz("")) - db.Set(bz("k"), bz("val")) - db.Set(bz("ke"), bz("valu")) - db.Set(bz("kee"), bz("valuu")) + db.Set([]byte("key"), []byte("value")) + db.Set([]byte("key1"), []byte("value1")) + db.Set([]byte("key2"), []byte("value2")) + db.Set([]byte("key3"), []byte("value3")) + db.Set([]byte("something"), []byte("else")) + db.Set([]byte(""), []byte("")) + db.Set([]byte("k"), []byte("val")) + db.Set([]byte("ke"), []byte("valu")) + db.Set([]byte("kee"), []byte("valuu")) return db } @@ -29,18 +29,18 @@ func TestPrefixDBSimple(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - checkValue(t, pdb, bz("key"), nil) - checkValue(t, pdb, bz(""), bz("value")) - checkValue(t, pdb, bz("key1"), nil) - checkValue(t, pdb, bz("1"), bz("value1")) - checkValue(t, pdb, bz("key2"), nil) - checkValue(t, pdb, bz("2"), bz("value2")) - checkValue(t, pdb, bz("key3"), nil) - checkValue(t, pdb, bz("3"), bz("value3")) - checkValue(t, pdb, bz("something"), nil) - checkValue(t, pdb, bz("k"), nil) - checkValue(t, pdb, bz("ke"), nil) - checkValue(t, pdb, bz("kee"), nil) + checkValue(t, pdb, []byte("key"), nil) + checkValue(t, pdb, []byte(""), []byte("value")) + checkValue(t, pdb, []byte("key1"), nil) + checkValue(t, pdb, []byte("1"), []byte("value1")) + checkValue(t, pdb, []byte("key2"), nil) + checkValue(t, pdb, []byte("2"), []byte("value2")) + checkValue(t, pdb, []byte("key3"), nil) + checkValue(t, pdb, []byte("3"), []byte("value3")) + checkValue(t, pdb, []byte("something"), nil) + checkValue(t, pdb, []byte("k"), nil) + checkValue(t, pdb, []byte("ke"), nil) + checkValue(t, pdb, []byte("kee"), nil) } func TestPrefixDBIterator1(t *testing.T) { @@ -49,13 +49,13 @@ func TestPrefixDBIterator1(t *testing.T) { itr := pdb.Iterator(nil, nil) checkDomain(t, itr, nil, nil) - checkItem(t, itr, bz(""), bz("value")) + checkItem(t, itr, []byte(""), []byte("value")) checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) + checkItem(t, itr, []byte("1"), []byte("value1")) checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) + checkItem(t, itr, []byte("2"), []byte("value2")) checkNext(t, itr, true) - checkItem(t, itr, bz("3"), bz("value3")) + checkItem(t, itr, []byte("3"), []byte("value3")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() @@ -65,8 +65,8 @@ func TestPrefixDBIterator2(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.Iterator(nil, bz("")) - checkDomain(t, itr, nil, bz("")) + itr := pdb.Iterator(nil, []byte("")) + checkDomain(t, itr, nil, []byte("")) checkInvalid(t, itr) itr.Close() } @@ -75,15 +75,15 @@ func TestPrefixDBIterator3(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.Iterator(bz(""), nil) - checkDomain(t, itr, bz(""), nil) - checkItem(t, itr, bz(""), bz("value")) + itr := pdb.Iterator([]byte(""), nil) + checkDomain(t, itr, []byte(""), nil) + checkItem(t, itr, []byte(""), []byte("value")) checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) + checkItem(t, itr, []byte("1"), []byte("value1")) checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) + checkItem(t, itr, []byte("2"), []byte("value2")) checkNext(t, itr, true) - checkItem(t, itr, bz("3"), bz("value3")) + checkItem(t, itr, []byte("3"), []byte("value3")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() @@ -93,8 +93,8 @@ func TestPrefixDBIterator4(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.Iterator(bz(""), bz("")) - checkDomain(t, itr, bz(""), bz("")) + itr := pdb.Iterator([]byte(""), []byte("")) + checkDomain(t, itr, []byte(""), []byte("")) checkInvalid(t, itr) itr.Close() } @@ -105,13 +105,13 @@ func TestPrefixDBReverseIterator1(t *testing.T) { itr := pdb.ReverseIterator(nil, nil) checkDomain(t, itr, nil, nil) - checkItem(t, itr, bz("3"), bz("value3")) + checkItem(t, itr, []byte("3"), []byte("value3")) checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) + checkItem(t, itr, []byte("2"), []byte("value2")) checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) + checkItem(t, itr, []byte("1"), []byte("value1")) checkNext(t, itr, true) - checkItem(t, itr, bz(""), bz("value")) + checkItem(t, itr, []byte(""), []byte("value")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() @@ -121,15 +121,15 @@ func TestPrefixDBReverseIterator2(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.ReverseIterator(bz(""), nil) - checkDomain(t, itr, bz(""), nil) - checkItem(t, itr, bz("3"), bz("value3")) + itr := pdb.ReverseIterator([]byte(""), nil) + checkDomain(t, itr, []byte(""), nil) + checkItem(t, itr, []byte("3"), []byte("value3")) checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) + checkItem(t, itr, []byte("2"), []byte("value2")) checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) + checkItem(t, itr, []byte("1"), []byte("value1")) checkNext(t, itr, true) - checkItem(t, itr, bz(""), bz("value")) + checkItem(t, itr, []byte(""), []byte("value")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() @@ -139,8 +139,8 @@ func TestPrefixDBReverseIterator3(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.ReverseIterator(nil, bz("")) - checkDomain(t, itr, nil, bz("")) + itr := pdb.ReverseIterator(nil, []byte("")) + checkDomain(t, itr, nil, []byte("")) checkInvalid(t, itr) itr.Close() } @@ -149,8 +149,8 @@ func TestPrefixDBReverseIterator4(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.ReverseIterator(bz(""), bz("")) - checkDomain(t, itr, bz(""), bz("")) + itr := pdb.ReverseIterator([]byte(""), []byte("")) + checkDomain(t, itr, []byte(""), []byte("")) checkInvalid(t, itr) itr.Close() } @@ -159,13 +159,13 @@ func TestPrefixDBReverseIterator5(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.ReverseIterator(bz("1"), nil) - checkDomain(t, itr, bz("1"), nil) - checkItem(t, itr, bz("3"), bz("value3")) + itr := pdb.ReverseIterator([]byte("1"), nil) + checkDomain(t, itr, []byte("1"), nil) + checkItem(t, itr, []byte("3"), []byte("value3")) checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) + checkItem(t, itr, []byte("2"), []byte("value2")) checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) + checkItem(t, itr, []byte("1"), []byte("value1")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() @@ -175,11 +175,11 @@ func TestPrefixDBReverseIterator6(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.ReverseIterator(bz("2"), nil) - checkDomain(t, itr, bz("2"), nil) - checkItem(t, itr, bz("3"), bz("value3")) + itr := pdb.ReverseIterator([]byte("2"), nil) + checkDomain(t, itr, []byte("2"), nil) + checkItem(t, itr, []byte("3"), []byte("value3")) checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) + checkItem(t, itr, []byte("2"), []byte("value2")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() @@ -189,11 +189,11 @@ func TestPrefixDBReverseIterator7(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, "key") - itr := pdb.ReverseIterator(nil, bz("2")) - checkDomain(t, itr, nil, bz("2")) - checkItem(t, itr, bz("1"), bz("value1")) + itr := pdb.ReverseIterator(nil, []byte("2")) + checkDomain(t, itr, nil, []byte("2")) + checkItem(t, itr, []byte("1"), []byte("value1")) checkNext(t, itr, true) - checkItem(t, itr, bz(""), bz("value")) + checkItem(t, itr, []byte(""), []byte("value")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() diff --git a/storage/rwtree.go b/storage/rwtree.go index ce7cb6666..7dce9f4fd 100644 --- a/storage/rwtree.go +++ b/storage/rwtree.go @@ -9,6 +9,9 @@ import ( "github.com/xlab/treeprint" ) +// RWTree provides an abstraction over IAVL that maintains separate read and write paths. Reads are routed to the most +// recently saved version of the tree - which provides immutable access. Writes are routed to a working tree that is +// mutable. On save the working tree is saved to DB, frozen, and replaces the previous immutable read tree. type RWTree struct { // Working tree accumulating writes tree *MutableTree diff --git a/storage/rwtree_test.go b/storage/rwtree_test.go index 2de7292dd..dd5afc2d5 100644 --- a/storage/rwtree_test.go +++ b/storage/rwtree_test.go @@ -13,14 +13,15 @@ import ( func TestSave(t *testing.T) { db := dbm.NewMemDB() rwt := NewRWTree(db, 100) - foo := bz("foo") - gaa := bz("gaa") - dam := bz("dam") + foo := []byte("foo") + gaa := []byte("gaa") + dam := []byte("dam") rwt.Set(foo, gaa) - rwt.Save() + _, _, err := rwt.Save() + require.NoError(t, err) assert.Equal(t, gaa, rwt.Get(foo)) rwt.Set(foo, dam) - _, _, err := rwt.Save() + _, _, err = rwt.Save() require.NoError(t, err) assert.Equal(t, dam, rwt.Get(foo)) } @@ -34,12 +35,13 @@ func TestEmptyTree(t *testing.T) { func TestRollback(t *testing.T) { db := dbm.NewMemDB() rwt := NewRWTree(db, 100) - rwt.Set(bz("Raffle"), bz("Topper")) + rwt.Set([]byte("Raffle"), []byte("Topper")) _, _, err := rwt.Save() + require.NoError(t, err) - foo := bz("foo") - gaa := bz("gaa") - dam := bz("dam") + foo := []byte("foo") + gaa := []byte("gaa") + dam := []byte("dam") rwt.Set(foo, gaa) hash1, version1, err := rwt.Save() require.NoError(t, err) @@ -48,7 +50,8 @@ func TestRollback(t *testing.T) { rwt.Set(foo, gaa) rwt.Set(gaa, dam) hash2, version2, err := rwt.Save() - rwt.Iterate(nil, nil, true, func(key, value []byte) error { + require.NoError(t, err) + err = rwt.Iterate(nil, nil, true, func(key, value []byte) error { fmt.Println(string(key), " => ", string(value)) return nil }) @@ -66,10 +69,11 @@ func TestRollback(t *testing.T) { rwt.Set(gaa, dam) hash3, version3, err := rwt.Save() require.NoError(t, err) - rwt.Iterate(nil, nil, true, func(key, value []byte) error { + err = rwt.Iterate(nil, nil, true, func(key, value []byte) error { fmt.Println(string(key), " => ", string(value)) return nil }) + require.NoError(t, err) // Expect the same hashes assert.Equal(t, hash2, hash3) @@ -79,18 +83,18 @@ func TestRollback(t *testing.T) { func TestVersionDivergence(t *testing.T) { // This test serves as a reminder that IAVL nodes contain the version and a new node is created for every write rwt1 := NewRWTree(dbm.NewMemDB(), 100) - rwt1.Set(bz("Raffle"), bz("Topper")) + rwt1.Set([]byte("Raffle"), []byte("Topper")) hash11, _, err := rwt1.Save() require.NoError(t, err) rwt2 := NewRWTree(dbm.NewMemDB(), 100) - rwt2.Set(bz("Raffle"), bz("Topper")) + rwt2.Set([]byte("Raffle"), []byte("Topper")) hash21, _, err := rwt2.Save() require.NoError(t, err) // The following 'ought' to be idempotent but isn't since it replaces the previous node with an identical one, but // with an incremented version number - rwt2.Set(bz("Raffle"), bz("Topper")) + rwt2.Set([]byte("Raffle"), []byte("Topper")) hash22, _, err := rwt2.Save() require.NoError(t, err) @@ -100,24 +104,24 @@ func TestVersionDivergence(t *testing.T) { func TestMutableTree_Iterate(t *testing.T) { mut := NewMutableTree(dbm.NewMemDB(), 100) - mut.Set(bz("aa"), bz("1")) - mut.Set(bz("aab"), bz("2")) - mut.Set(bz("aac"), bz("3")) - mut.Set(bz("aad"), bz("4")) - mut.Set(bz("ab"), bz("5")) + mut.Set([]byte("aa"), []byte("1")) + mut.Set([]byte("aab"), []byte("2")) + mut.Set([]byte("aac"), []byte("3")) + mut.Set([]byte("aad"), []byte("4")) + mut.Set([]byte("ab"), []byte("5")) _, _, err := mut.SaveVersion() require.NoError(t, err) - mut.IterateRange(bz("aab"), bz("aad"), true, func(key []byte, value []byte) bool { + mut.IterateRange([]byte("aab"), []byte("aad"), true, func(key []byte, value []byte) bool { fmt.Printf("%q -> %q\n", key, value) return false }) fmt.Println("foo") - mut.IterateRange(bz("aab"), bz("aad"), false, func(key []byte, value []byte) bool { + mut.IterateRange([]byte("aab"), []byte("aad"), false, func(key []byte, value []byte) bool { fmt.Printf("%q -> %q\n", key, value) return false }) fmt.Println("foo") - mut.IterateRange(bz("aad"), bz("aab"), true, func(key []byte, value []byte) bool { + mut.IterateRange([]byte("aad"), []byte("aab"), true, func(key []byte, value []byte) bool { fmt.Printf("%q -> %q\n", key, value) return false }) diff --git a/storage/mutable_tree.go b/storage/tree.go similarity index 76% rename from storage/mutable_tree.go rename to storage/tree.go index 045790c23..a216d3814 100644 --- a/storage/mutable_tree.go +++ b/storage/tree.go @@ -7,10 +7,15 @@ import ( dbm "github.com/tendermint/tendermint/libs/db" ) +// We wrap IAVL's tree types in order to implement standard DB interface and iteration helpers type MutableTree struct { *iavl.MutableTree } +type ImmutableTree struct { + *iavl.ImmutableTree +} + func NewMutableTree(db dbm.DB, cacheSize int) *MutableTree { tree := iavl.NewMutableTree(db, cacheSize) return &MutableTree{ @@ -39,6 +44,19 @@ func (mut *MutableTree) Load(version int64, overwriting bool) error { return nil } +func (mut *MutableTree) Iterate(start, end []byte, ascending bool, fn func(key []byte, value []byte) error) error { + return mut.asImmutable().Iterate(start, end, ascending, fn) +} + +func (mut *MutableTree) IterateWriteTree(start, end []byte, ascending bool, fn func(key []byte, value []byte) error) error { + var err error + mut.MutableTree.IterateRange(start, end, ascending, func(key, value []byte) (stop bool) { + err = fn(key, value) + return err != nil + }) + return err +} + func (mut *MutableTree) Get(key []byte) []byte { _, bs := mut.MutableTree.Get(key) return bs @@ -52,24 +70,21 @@ func (mut *MutableTree) GetImmutable(version int64) (*ImmutableTree, error) { return &ImmutableTree{tree}, nil } -// Get the current working tree as an ImmutableTree (for the methods - not immutable!) -func (mut *MutableTree) asImmutable() *ImmutableTree { - return &ImmutableTree{mut.MutableTree.ImmutableTree} -} - -func (mut *MutableTree) Iterate(start, end []byte, ascending bool, fn func(key []byte, value []byte) error) error { - return mut.asImmutable().Iterate(start, end, ascending, fn) +func (imt *ImmutableTree) Get(key []byte) []byte { + _, value := imt.ImmutableTree.Get(key) + return value } -func (mut *MutableTree) IterateWriteTree(start, end []byte, ascending bool, fn func(key []byte, value []byte) error) error { +func (imt *ImmutableTree) Iterate(start, end []byte, ascending bool, fn func(key []byte, value []byte) error) error { var err error - mut.MutableTree.IterateRange(start, end, ascending, func(key, value []byte) bool { + imt.ImmutableTree.IterateRange(start, end, ascending, func(key, value []byte) bool { err = fn(key, value) - if err != nil { - // stop - return true - } - return false + return err != nil }) return err } + +// Get the current working tree as an ImmutableTree (for the methods - not immutable!) +func (mut *MutableTree) asImmutable() *ImmutableTree { + return &ImmutableTree{mut.MutableTree.ImmutableTree} +} diff --git a/sync/ring_mutex.go b/sync/ring_mutex.go index 514bc3d39..d722687cb 100644 --- a/sync/ring_mutex.go +++ b/sync/ring_mutex.go @@ -45,8 +45,8 @@ func NewRingMutex(mutexCount int, hashMaker func() hash.Hash64) *RingMutex { ringMutex := &RingMutex{ mutexCount: uint64(mutexCount), // max slice length is bounded by max(int) thus the argument type - mutexes: make([]sync.RWMutex, mutexCount, mutexCount), - values: make([]Value, mutexCount, mutexCount), + mutexes: make([]sync.RWMutex, mutexCount), + values: make([]Value, mutexCount), hash: func(address []byte) uint64 { buf := make([]byte, 8) copy(buf, address) diff --git a/txs/payload/bond_tx.go b/txs/payload/bond_tx.go index d33f8b6ac..1aeb83f38 100644 --- a/txs/payload/bond_tx.go +++ b/txs/payload/bond_tx.go @@ -33,7 +33,7 @@ func (tx *BondTx) AddInput(st acmstate.AccountGetter, pubkey crypto.PublicKey, a return err } if acc == nil { - return fmt.Errorf("Invalid address %s from pubkey %s", addr, pubkey) + return fmt.Errorf("invalid address %s from pubkey %s", addr, pubkey) } return tx.AddInputWithSequence(pubkey, amt, acc.Sequence+uint64(1)) } diff --git a/txs/tx.go b/txs/tx.go index 0e9e4eadb..1e9e17a7c 100644 --- a/txs/tx.go +++ b/txs/tx.go @@ -109,6 +109,9 @@ func (tx *Tx) UnmarshalJSON(data []byte) error { tx.ChainID = w.ChainID // Now we know the Type we can deserialise the Payload tx.Payload, err = payload.New(w.Type) + if err != nil { + return err + } return json.Unmarshal(w.Payload, tx.Payload) } diff --git a/util/fs.go b/util/fs.go index 7979db7e5..f9eea8576 100644 --- a/util/fs.go +++ b/util/fs.go @@ -24,12 +24,12 @@ import ( func EnsureDir(dir string, mode os.FileMode) error { if fileOptions, err := os.Stat(dir); os.IsNotExist(err) { if errMake := os.MkdirAll(dir, mode); errMake != nil { - return fmt.Errorf("Could not create directory %s. %v", dir, err) + return fmt.Errorf("could not create directory %s. %v", dir, err) } } else if err != nil { - return fmt.Errorf("Error asserting directory %s: %v", dir, err) + return fmt.Errorf("error asserting directory %s: %v", dir, err) } else if !fileOptions.IsDir() { - return fmt.Errorf("Path already exists as a file: %s", dir) + return fmt.Errorf("path already exists as a file: %s", dir) } return nil } diff --git a/util/slice/slice.go b/util/slice/slice.go index 485510ff7..945bee4e4 100644 --- a/util/slice/slice.go +++ b/util/slice/slice.go @@ -28,12 +28,8 @@ func EmptySlice() []interface{} { func CopyAppend(slice []interface{}, elements ...interface{}) []interface{} { sliceLength := len(slice) newSlice := make([]interface{}, sliceLength+len(elements)) - for i, e := range slice { - newSlice[i] = e - } - for i, e := range elements { - newSlice[sliceLength+i] = e - } + copy(newSlice, slice) + copy(newSlice[sliceLength:], elements) return newSlice } @@ -41,12 +37,8 @@ func CopyAppend(slice []interface{}, elements ...interface{}) []interface{} { func CopyPrepend(slice []interface{}, elements ...interface{}) []interface{} { elementsLength := len(elements) newSlice := make([]interface{}, len(slice)+elementsLength) - for i, e := range elements { - newSlice[i] = e - } - for i, e := range slice { - newSlice[elementsLength+i] = e - } + copy(newSlice, elements) + copy(newSlice[elementsLength:], slice) return newSlice } @@ -73,11 +65,6 @@ func Delete(slice []interface{}, i int, n int) []interface{} { return append(slice[:i], slice[i+n:]...) } -// Delete an element at a specific index and return the contracted list -func DeleteAt(slice []interface{}, i int) []interface{} { - return Delete(slice, i, 1) -} - // Flatten a slice by a list by splicing any elements of the list that are // themselves lists into the slice elements to the list in place of slice itself func Flatten(slice []interface{}) []interface{} { diff --git a/vent/service/decoder.go b/vent/service/decoder.go index 547aeee90..28c462e5b 100644 --- a/vent/service/decoder.go +++ b/vent/service/decoder.go @@ -21,7 +21,7 @@ func decodeEvent(header *exec.Header, log *exec.LogEvent, origin *exec.Origin, a evAbi, ok := abiSpec.EventsById[eventID] if !ok { - return nil, fmt.Errorf("Abi spec not found for event %x", eventID) + return nil, fmt.Errorf("abi spec not found for event %x", eventID) } // decode header to get context data for each event @@ -36,7 +36,7 @@ func decodeEvent(header *exec.Header, log *exec.LogEvent, origin *exec.Origin, a // unpack event data (topics & data part) if err := abi.UnpackEvent(&evAbi, log.Topics, log.Data, unpackedData...); err != nil { - return nil, errors.Wrap(err, "Could not unpack event data") + return nil, errors.Wrap(err, "could not unpack event data") } // for each decoded item value, stores it in given item name diff --git a/vent/service/rowbuilder.go b/vent/service/rowbuilder.go index 4d4b17bf4..72b8a255d 100644 --- a/vent/service/rowbuilder.go +++ b/vent/service/rowbuilder.go @@ -75,7 +75,7 @@ func buildBlkData(tbls types.EventTables, block *exec.BlockExecution) (types.Eve if _, ok := tbls[tables.Block]; ok { blockHeader, err := json.Marshal(block.Header) if err != nil { - return types.EventDataRow{}, fmt.Errorf("Couldn't marshal BlockHeader in block %v", block) + return types.EventDataRow{}, fmt.Errorf("couldn not marshal BlockHeader in block %v", block) } row[columns.Height] = fmt.Sprintf("%v", block.Height) diff --git a/vent/sqldb/adapters/postgres_adapter.go b/vent/sqldb/adapters/postgres_adapter.go index 1fc434231..a0e99a6f7 100644 --- a/vent/sqldb/adapters/postgres_adapter.go +++ b/vent/sqldb/adapters/postgres_adapter.go @@ -57,6 +57,9 @@ func (pa *PostgresAdapter) Open(dbURL string) (*sqlx.DB, error) { if pa.Schema != "" { err = ensureSchema(db, pa.Schema, pa.Log) + if err != nil { + return nil, err + } } else { return nil, fmt.Errorf("no schema supplied") } diff --git a/vent/sqlsol/projection.go b/vent/sqlsol/projection.go index 8dca7e4a0..d8b2a6d7a 100644 --- a/vent/sqlsol/projection.go +++ b/vent/sqlsol/projection.go @@ -256,7 +256,7 @@ func getSQLType(evmSignature string, bytesToString bool) (types.SQLColumnType, i return types.SQLColumnTypeNumeric, 0, nil } default: - return -1, 0, fmt.Errorf("Don't know how to map evmSignature: %s ", evmSignature) + return -1, 0, fmt.Errorf("do not know how to map evmSignature: %s ", evmSignature) } } diff --git a/vent/sqlsol/projection_test.go b/vent/sqlsol/projection_test.go index ca5b3ca11..40a5c2530 100644 --- a/vent/sqlsol/projection_test.go +++ b/vent/sqlsol/projection_test.go @@ -222,6 +222,6 @@ func TestNewProjectionFromEventSpec(t *testing.T) { // Create a column conflict between burn and unreliable fields (both map to burnt so the SQL column def must be identical) field = eventSpec[1].GetFieldMapping("unreliable") field.Primary = !field.Primary - projection, err = sqlsol.NewProjectionFromEventSpec(eventSpec) + _, err = sqlsol.NewProjectionFromEventSpec(eventSpec) require.Error(t, err) }