From 3752507a11c8977a6f3d5ff4c2b8d97030ce168c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 4 May 2016 19:45:50 +0300 Subject: [PATCH 01/44] [release/1.4.6] travis: run CI builds against multiple Go versions (cherry picked from commit bc3b406bffc9c6f38420f1504cb7eba0b92cad3b) --- .travis.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index c1d545c548daa..24486d4a0a127 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,12 @@ language: go go: - 1.4.2 + - 1.5.4 + - 1.6.2 install: # - go get code.google.com/p/go.tools/cmd/goimports # - go get github.com/golang/lint/golint - # - go get golang.org/x/tools/cmd/vet + # - go get golang.org/x/tools/cmd/vet - go get golang.org/x/tools/cmd/cover before_script: # - gofmt -l -w . @@ -24,6 +26,6 @@ notifications: webhooks: urls: - https://webhooks.gitter.im/e/e09ccdce1048c5e03445 - on_success: change + on_success: change on_failure: always - on_start: false + on_start: false From e3b3c298df45131d8856724da6b5b0e70558a6e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 11 May 2016 11:49:44 +0300 Subject: [PATCH 02/44] [release/1.4.6] cmd/geth, internal/web3ext, rpc: surface rpc module, fix shh, fix miner (cherry picked from commit bc6fdad786706019fc5aeb9605f1d23b3c5b7522) --- cmd/geth/js.go | 2 +- internal/web3ext/web3ext.go | 177 ++++++++++++++++++------------------ rpc/server.go | 5 +- rpc/utils.go | 2 +- 4 files changed, 94 insertions(+), 92 deletions(-) diff --git a/cmd/geth/js.go b/cmd/geth/js.go index 729cc2fd711ee..5f455d7a33d23 100644 --- a/cmd/geth/js.go +++ b/cmd/geth/js.go @@ -198,7 +198,7 @@ func (js *jsre) apiBindings() error { // load only supported API's in javascript runtime shortcuts := "var eth = web3.eth; var personal = web3.personal; " for _, apiName := range apiNames { - if apiName == "web3" || apiName == "rpc" { + if apiName == "web3" { continue // manually mapped or ignore } diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 1928913deaacb..8d5d1500fff71 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -18,44 +18,17 @@ package web3ext var Modules = map[string]string{ - "txpool": TxPool_JS, "admin": Admin_JS, - "personal": Personal_JS, + "debug": Debug_JS, "eth": Eth_JS, "miner": Miner_JS, - "debug": Debug_JS, "net": Net_JS, + "personal": Personal_JS, + "rpc": RPC_JS, + "shh": Shh_JS, + "txpool": TxPool_JS, } -const TxPool_JS = ` -web3._extend({ - property: 'txpool', - methods: - [ - ], - properties: - [ - new web3._extend.Property({ - name: 'content', - getter: 'txpool_content' - }), - new web3._extend.Property({ - name: 'inspect', - getter: 'txpool_inspect' - }), - new web3._extend.Property({ - name: 'status', - getter: 'txpool_status', - outputFormatter: function(status) { - status.pending = web3._extend.utils.toDecimal(status.pending); - status.queued = web3._extend.utils.toDecimal(status.queued); - return status; - } - }) - ] -}); -` - const Admin_JS = ` web3._extend({ property: 'admin', @@ -176,60 +149,6 @@ web3._extend({ }); ` -const Eth_JS = ` -web3._extend({ - property: 'eth', - methods: - [ - new web3._extend.Method({ - name: 'sign', - call: 'eth_sign', - params: 2, - inputFormatter: [web3._extend.formatters.inputAddressFormatter, null] - }), - new web3._extend.Method({ - name: 'resend', - call: 'eth_resend', - params: 3, - inputFormatter: [web3._extend.formatters.inputTransactionFormatter, web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal] - }), - new web3._extend.Method({ - name: 'getNatSpec', - call: 'eth_getNatSpec', - params: 1, - inputFormatter: [web3._extend.formatters.inputTransactionFormatter] - }), - new web3._extend.Method({ - name: 'signTransaction', - call: 'eth_signTransaction', - params: 1, - inputFormatter: [web3._extend.formatters.inputTransactionFormatter] - }), - new web3._extend.Method({ - name: 'submitTransaction', - call: 'eth_submitTransaction', - params: 1, - inputFormatter: [web3._extend.formatters.inputTransactionFormatter] - }) - ], - properties: - [ - new web3._extend.Property({ - name: 'pendingTransactions', - getter: 'eth_pendingTransactions', - outputFormatter: function(txs) { - var formatted = []; - for (var i = 0; i < txs.length; i++) { - formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i])); - formatted[i].blockHash = null; - } - return formatted; - } - }) - ] -}); -` - const Debug_JS = ` web3._extend({ property: 'debug', @@ -382,6 +301,60 @@ web3._extend({ }); ` +const Eth_JS = ` +web3._extend({ + property: 'eth', + methods: + [ + new web3._extend.Method({ + name: 'sign', + call: 'eth_sign', + params: 2, + inputFormatter: [web3._extend.formatters.inputAddressFormatter, null] + }), + new web3._extend.Method({ + name: 'resend', + call: 'eth_resend', + params: 3, + inputFormatter: [web3._extend.formatters.inputTransactionFormatter, web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal] + }), + new web3._extend.Method({ + name: 'getNatSpec', + call: 'eth_getNatSpec', + params: 1, + inputFormatter: [web3._extend.formatters.inputTransactionFormatter] + }), + new web3._extend.Method({ + name: 'signTransaction', + call: 'eth_signTransaction', + params: 1, + inputFormatter: [web3._extend.formatters.inputTransactionFormatter] + }), + new web3._extend.Method({ + name: 'submitTransaction', + call: 'eth_submitTransaction', + params: 1, + inputFormatter: [web3._extend.formatters.inputTransactionFormatter] + }) + ], + properties: + [ + new web3._extend.Property({ + name: 'pendingTransactions', + getter: 'eth_pendingTransactions', + outputFormatter: function(txs) { + var formatted = []; + for (var i = 0; i < txs.length; i++) { + formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i])); + formatted[i].blockHash = null; + } + return formatted; + } + }) + ] +}); +` + const Miner_JS = ` web3._extend({ property: 'miner', @@ -412,7 +385,7 @@ web3._extend({ name: 'setGasPrice', call: 'miner_setGasPrice', params: 1, - inputFormatter: [web3._extend.utils.fromDecial] + inputFormatter: [web3._extend.utils.fromDecimal] }), new web3._extend.Method({ name: 'startAutoDAG', @@ -491,7 +464,35 @@ web3._extend({ [ new web3._extend.Property({ name: 'version', - getter: 'shh_version' + getter: 'shh_version', + outputFormatter: web3._extend.utils.toDecimal + }) + ] +}); +` + +const TxPool_JS = ` +web3._extend({ + property: 'txpool', + methods: [], + properties: + [ + new web3._extend.Property({ + name: 'content', + getter: 'txpool_content' + }), + new web3._extend.Property({ + name: 'inspect', + getter: 'txpool_inspect' + }), + new web3._extend.Property({ + name: 'status', + getter: 'txpool_status', + outputFormatter: function(status) { + status.pending = web3._extend.utils.toDecimal(status.pending); + status.queued = web3._extend.utils.toDecimal(status.queued); + return status; + } }) ] }); diff --git a/rpc/server.go b/rpc/server.go index 001107a1b7359..69f3271e8ecce 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -34,7 +34,8 @@ const ( notificationBufferSize = 10000 // max buffered notifications before codec is closed - DefaultIPCApis = "admin,eth,debug,miner,net,shh,txpool,personal,web3" + MetadataApi = "rpc" + DefaultIPCApis = "admin,debug,eth,miner,net,personal,shh,txpool,web3" DefaultHTTPApis = "eth,net,web3" ) @@ -61,7 +62,7 @@ func NewServer() *Server { // register a default service which will provide meta information about the RPC service such as the services and // methods it offers. rpcService := &RPCService{server} - server.RegisterName("rpc", rpcService) + server.RegisterName(MetadataApi, rpcService) return server } diff --git a/rpc/utils.go b/rpc/utils.go index 86938e9b37572..fe482e19dd0df 100644 --- a/rpc/utils.go +++ b/rpc/utils.go @@ -234,7 +234,7 @@ func SupportedModules(client Client) (map[string]string, error) { req := JSONRequest{ Id: []byte("1"), Version: "2.0", - Method: "rpc_modules", + Method: MetadataApi + "_modules", } if err := client.Send(req); err != nil { return nil, err From efa2b3da7ede771d2c80d27abb2aabd140028d66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 11 May 2016 11:58:48 +0300 Subject: [PATCH 03/44] [release/1.4.6] cmd/geth: use text/templates in the tester, not html (cherry picked from commit 284f1d6beb589d240a18e9b63560266041fa9798) --- cmd/geth/run_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/run_test.go b/cmd/geth/run_test.go index a82eb9d68aabc..ba4ce0c60f24a 100644 --- a/cmd/geth/run_test.go +++ b/cmd/geth/run_test.go @@ -20,7 +20,6 @@ import ( "bufio" "bytes" "fmt" - "html/template" "io" "io/ioutil" "os" @@ -28,6 +27,7 @@ import ( "regexp" "sync" "testing" + "text/template" "time" ) From 90beb6112ee61dc3943748eab66f0dad8daf7d20 Mon Sep 17 00:00:00 2001 From: Changhoon Lee <6londe@gmail.com> Date: Thu, 12 May 2016 22:40:47 +0900 Subject: [PATCH 04/44] [release/1.4.6] README: fix typos README: fix typos (cherry picked from commit 2348f8e2a8f2203e8ce4058cb23135579556be53) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index aaf44997d6e40..bdd85b3fa46b6 100644 --- a/README.md +++ b/README.md @@ -58,14 +58,14 @@ anyone on the internet, and are grateful for even the smallest of fixes! If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request for the maintainers to review and merge into the main code base. If you wish to submit more complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) -to ensure those changes are in line with the general philosopy of the project and/or get some +to ensure those changes are in line with the general philosophy of the project and/or get some early feedback which can make both your efforts much lighter as well as our review and merge procedures quick and simple. -Please make sure your contributions adhere to our coding guidlines: +Please make sure your contributions adhere to our coding guidelines: * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). - * Code must be documented adherign to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines. + * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines. * Pull requests need to be based on and opened against the `develop` branch. * Commit messages should be prefixed with the package(s) they modify. * E.g. "eth, rpc: make trace configs optional" From 5fb29fd45fee413fc9269189a79b570aa9a93d1a Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 18 May 2016 11:31:00 +0200 Subject: [PATCH 05/44] [release/1.4.6] node, p2p: move network config out of Server This silences a go vet message about copying p2p.Server in package node. (cherry picked from commit 542b839ec74ff0ce94ab0d34e25531f5819d95d1) --- node/node.go | 8 +++----- p2p/dial_test.go | 3 ++- p2p/server.go | 14 ++++++++------ p2p/server_test.go | 39 +++++++++++++++++++++++---------------- 4 files changed, 36 insertions(+), 28 deletions(-) diff --git a/node/node.go b/node/node.go index 06a1b7aed76df..1f517a027e5f9 100644 --- a/node/node.go +++ b/node/node.go @@ -49,7 +49,7 @@ type Node struct { datadir string // Path to the currently used data directory eventmux *event.TypeMux // Event multiplexer used between the services of a stack - serverConfig *p2p.Server // Configuration of the underlying P2P networking layer + serverConfig p2p.Config server *p2p.Server // Currently running P2P networking layer serviceFuncs []ServiceConstructor // Service constructors (in dependency order) @@ -97,7 +97,7 @@ func New(conf *Config) (*Node, error) { } return &Node{ datadir: conf.DataDir, - serverConfig: &p2p.Server{ + serverConfig: p2p.Config{ PrivateKey: conf.NodeKey(), Name: conf.Name, Discovery: !conf.NoDiscovery, @@ -151,9 +151,7 @@ func (n *Node) Start() error { return ErrNodeRunning } // Otherwise copy and specialize the P2P configuration - running := new(p2p.Server) - *running = *n.serverConfig - + running := &p2p.Server{Config: n.serverConfig} services := make(map[reflect.Type]Service) for _, constructor := range n.serviceFuncs { // Create a new context for the particular service diff --git a/p2p/dial_test.go b/p2p/dial_test.go index 3447660a3f060..05d9b75626ff4 100644 --- a/p2p/dial_test.go +++ b/p2p/dial_test.go @@ -478,7 +478,8 @@ func TestDialResolve(t *testing.T) { } // Now run the task, it should resolve the ID once. - srv := &Server{ntab: table, Dialer: &net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}} + config := Config{Dialer: &net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}} + srv := &Server{ntab: table, Config: config} tasks[0].Do(srv) if !reflect.DeepEqual(table.resolveCalls, []discover.NodeID{dest.ID}) { t.Fatalf("wrong resolve calls, got %v", table.resolveCalls) diff --git a/p2p/server.go b/p2p/server.go index 3b2f2b0786e20..880aa7cf1fb1c 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -54,12 +54,8 @@ var errServerStopped = errors.New("server stopped") var srvjslog = logger.NewJsonLogger() -// Server manages all peer connections. -// -// The fields of Server are used as configuration parameters. -// You should set them before starting the Server. Fields may not be -// modified while the server is running. -type Server struct { +// Config holds Server options. +type Config struct { // This field must be set to a valid secp256k1 private key. PrivateKey *ecdsa.PrivateKey @@ -120,6 +116,12 @@ type Server struct { // If NoDial is true, the server will not dial any peers. NoDial bool +} + +// Server manages all peer connections. +type Server struct { + // Config fields may not be modified while the server is running. + Config // Hooks for testing. These are useful because we can inhibit // the whole protocol stack. diff --git a/p2p/server_test.go b/p2p/server_test.go index b437ac3676867..deb34f5bb1841 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -67,11 +67,14 @@ func (c *testTransport) close(err error) { } func startTestServer(t *testing.T, id discover.NodeID, pf func(*Peer)) *Server { + config := Config{ + Name: "test", + MaxPeers: 10, + ListenAddr: "127.0.0.1:0", + PrivateKey: newkey(), + } server := &Server{ - Name: "test", - MaxPeers: 10, - ListenAddr: "127.0.0.1:0", - PrivateKey: newkey(), + Config: config, newPeerHook: pf, newTransport: func(fd net.Conn) transport { return newTestTransport(id, fd) }, } @@ -200,10 +203,10 @@ func TestServerTaskScheduling(t *testing.T) { // The Server in this test isn't actually running // because we're only interested in what run does. srv := &Server{ - MaxPeers: 10, - quit: make(chan struct{}), - ntab: fakeTable{}, - running: true, + Config: Config{MaxPeers: 10}, + quit: make(chan struct{}), + ntab: fakeTable{}, + running: true, } srv.loopWG.Add(1) go func() { @@ -314,10 +317,12 @@ func (t *testTask) Do(srv *Server) { func TestServerAtCap(t *testing.T) { trustedID := randomID() srv := &Server{ - PrivateKey: newkey(), - MaxPeers: 10, - NoDial: true, - TrustedNodes: []*discover.Node{{ID: trustedID}}, + Config: Config{ + PrivateKey: newkey(), + MaxPeers: 10, + NoDial: true, + TrustedNodes: []*discover.Node{{ID: trustedID}}, + }, } if err := srv.Start(); err != nil { t.Fatalf("could not start: %v", err) @@ -415,10 +420,12 @@ func TestServerSetupConn(t *testing.T) { for i, test := range tests { srv := &Server{ - PrivateKey: srvkey, - MaxPeers: 10, - NoDial: true, - Protocols: []Protocol{discard}, + Config: Config{ + PrivateKey: srvkey, + MaxPeers: 10, + NoDial: true, + Protocols: []Protocol{discard}, + }, newTransport: func(fd net.Conn) transport { return test.tt }, } if !test.dontstart { From 7df36e5ec1d099f843258ca42b2c5490d5e55a8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 25 Feb 2016 18:36:42 +0200 Subject: [PATCH 06/44] [release/1.4.6] eth/downloader: implement concurrent header downloads (cherry picked from commit b40dc8a1daf4bd1f293cf322274b470ad91517fb) --- eth/downloader/downloader.go | 480 +++++++++++++++++++----------- eth/downloader/downloader_test.go | 47 +-- eth/downloader/peer.go | 66 +++- eth/downloader/queue.go | 202 ++++++++++++- 4 files changed, 584 insertions(+), 211 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 0f76357cba2f6..2b2de1b5f6479 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -42,6 +42,7 @@ var ( MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request + MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request MaxStateFetch = 384 // Amount of node state values to allow fetching per request @@ -52,7 +53,8 @@ var ( blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired - headerTTL = 3 * time.Second // [eth/62] Time it takes for a header request to time out + headerTargetRTT = time.Second // [eth/62] Target time for completing a header retrieval request (only for measurements for now) + headerTTL = 2 * time.Second // [eth/62] Time it takes for a header request to time out bodyTargetRTT = 3 * time.Second / 2 // [eth/62] Target time for completing a block body retrieval request bodyTTL = 3 * bodyTargetRTT // [eth/62] Maximum time allowance before a block body request is considered expired receiptTargetRTT = 3 * time.Second / 2 // [eth/63] Target time for completing a receipt retrieval request @@ -60,9 +62,10 @@ var ( stateTargetRTT = 2 * time.Second / 2 // [eth/63] Target time for completing a state trie retrieval request stateTTL = 3 * stateTargetRTT // [eth/63] Maximum time allowance before a node data request is considered expired - maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection) - maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) - maxResultsProcess = 256 // Number of download results to import at once into the chain + maxQueuedHashes = 32 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection) + maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) + maxHeadersProcess = 2048 // Number of header download results to import at once into the chain + maxResultsProcess = 4096 // Number of content download results to import at once into the chain fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected @@ -72,29 +75,30 @@ var ( ) var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errNoPeers = errors.New("no peers to keep download active") - errTimeout = errors.New("timeout") - errEmptyHashSet = errors.New("empty hash set by peer") - errEmptyHeaderSet = errors.New("empty header set by peer") - errPeersUnavailable = errors.New("no peers available or all tried for download") - errAlreadyInPool = errors.New("hash already in pool") - errInvalidAncestor = errors.New("retrieved ancestor is invalid") - errInvalidChain = errors.New("retrieved hash chain is invalid") - errInvalidBlock = errors.New("retrieved block is invalid") - errInvalidBody = errors.New("retrieved block body is invalid") - errInvalidReceipt = errors.New("retrieved receipt is invalid") - errCancelHashFetch = errors.New("hash download canceled (requested)") - errCancelBlockFetch = errors.New("block download canceled (requested)") - errCancelHeaderFetch = errors.New("block header download canceled (requested)") - errCancelBodyFetch = errors.New("block body download canceled (requested)") - errCancelReceiptFetch = errors.New("receipt download canceled (requested)") - errCancelStateFetch = errors.New("state data download canceled (requested)") - errCancelProcessing = errors.New("processing canceled (requested)") - errNoSyncActive = errors.New("no sync active") + errBusy = errors.New("busy") + errUnknownPeer = errors.New("peer is unknown or unhealthy") + errBadPeer = errors.New("action from bad peer ignored") + errStallingPeer = errors.New("peer is stalling") + errNoPeers = errors.New("no peers to keep download active") + errTimeout = errors.New("timeout") + errEmptyHashSet = errors.New("empty hash set by peer") + errEmptyHeaderSet = errors.New("empty header set by peer") + errPeersUnavailable = errors.New("no peers available or all tried for download") + errAlreadyInPool = errors.New("hash already in pool") + errInvalidAncestor = errors.New("retrieved ancestor is invalid") + errInvalidChain = errors.New("retrieved hash chain is invalid") + errInvalidBlock = errors.New("retrieved block is invalid") + errInvalidBody = errors.New("retrieved block body is invalid") + errInvalidReceipt = errors.New("retrieved receipt is invalid") + errCancelHashFetch = errors.New("hash download canceled (requested)") + errCancelBlockFetch = errors.New("block download canceled (requested)") + errCancelHeaderFetch = errors.New("block header download canceled (requested)") + errCancelBodyFetch = errors.New("block body download canceled (requested)") + errCancelReceiptFetch = errors.New("receipt download canceled (requested)") + errCancelStateFetch = errors.New("state data download canceled (requested)") + errCancelHeaderProcessing = errors.New("header processing canceled (requested)") + errCancelContentProcessing = errors.New("content processing canceled (requested)") + errNoSyncActive = errors.New("no sync active") ) type Downloader struct { @@ -137,16 +141,17 @@ type Downloader struct { // Channels newPeerCh chan *peer - hashCh chan dataPack // [eth/61] Channel receiving inbound hashes - blockCh chan dataPack // [eth/61] Channel receiving inbound blocks - headerCh chan dataPack // [eth/62] Channel receiving inbound block headers - bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies - receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts - stateCh chan dataPack // [eth/63] Channel receiving inbound node state data - blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks - bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks - receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks - stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks + hashCh chan dataPack // [eth/61] Channel receiving inbound hashes + blockCh chan dataPack // [eth/61] Channel receiving inbound blocks + headerCh chan dataPack // [eth/62] Channel receiving inbound block headers + bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies + receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts + stateCh chan dataPack // [eth/63] Channel receiving inbound node state data + blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks + bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks + receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks + stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks + headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers @@ -194,6 +199,7 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha bodyWakeCh: make(chan bool, 1), receiptWakeCh: make(chan bool, 1), stateWakeCh: make(chan bool, 1), + headerProcCh: make(chan []*types.Header, 1), } } @@ -308,6 +314,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode default: } } + for empty := false; !empty; { + select { + case <-d.headerProcCh: + default: + empty = true + } + } // Reset any ephemeral sync statistics d.syncStatsLock.Lock() d.syncStatsStateTotal = 0 @@ -373,7 +386,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e if d.syncInitHook != nil { d.syncInitHook(origin, latest) } - return d.spawnSync( + return d.spawnSync(origin+1, func() error { return d.fetchHashes61(p, td, origin+1) }, func() error { return d.fetchBlocks61(origin + 1) }, ) @@ -423,11 +436,12 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e if d.syncInitHook != nil { d.syncInitHook(origin, latest) } - return d.spawnSync( - func() error { return d.fetchHeaders(p, td, origin+1) }, // Headers are always retrieved - func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync - func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync - func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync + return d.spawnSync(origin+1, + func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved + func() error { return d.processHeaders(origin+1, td) }, // Headers are always retrieved + func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync + func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync + func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync ) default: @@ -439,11 +453,11 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e // spawnSync runs d.process and all given fetcher functions to completion in // separate goroutines, returning the first error that appears. -func (d *Downloader) spawnSync(fetchers ...func() error) error { +func (d *Downloader) spawnSync(origin uint64, fetchers ...func() error) error { var wg sync.WaitGroup errc := make(chan error, len(fetchers)+1) wg.Add(len(fetchers) + 1) - go func() { defer wg.Done(); errc <- d.process() }() + go func() { defer wg.Done(); errc <- d.processContent() }() for _, fn := range fetchers { fn := fn go func() { defer wg.Done(); errc <- fn() }() @@ -1149,55 +1163,38 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { return start, nil } -// fetchHeaders keeps retrieving headers from the requested number, until no more -// are returned, potentially throttling on the way. -// -// The queue parameter can be used to switch between queuing headers for block -// body download too, or directly import as pure header chains. -func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { - glog.V(logger.Debug).Infof("%v: downloading headers from #%d", p, from) +// fetchHeaders keeps retrieving headers concurrently from the number +// requested, until no more are returned, potentially throttling on the way. To +// facilitate concurrency but still protect against malicious nodes sending bad +// headers, we construct a header chain skeleton using the "origin" peer we are +// syncing with, and fill in the missing headers using anyone else. Headers from +// other peers are only accepted if they map cleanly to the skeleton. If noone +// can fill in the skeleton - not even the origin peer - it's assumed invalid and +// the origin is dropped. +func (d *Downloader) fetchHeaders(p *peer, from uint64) error { + glog.V(logger.Debug).Infof("%v: directing header downloads from #%d", p, from) defer glog.V(logger.Debug).Infof("%v: header download terminated", p) - // Calculate the pivoting point for switching from fast to slow sync - pivot := d.queue.FastSyncPivot() - - // Keep a count of uncertain headers to roll back - rollback := []*types.Header{} - defer func() { - if len(rollback) > 0 { - // Flatten the headers and roll them back - hashes := make([]common.Hash, len(rollback)) - for i, header := range rollback { - hashes[i] = header.Hash() - } - lh, lfb, lb := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number() - d.rollback(hashes) - glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", - len(hashes), lh, d.headHeader().Number, lfb, d.headFastBlock().Number(), lb, d.headBlock().Number()) - - // If we're already past the pivot point, this could be an attack, disable fast sync - if rollback[len(rollback)-1].Number.Uint64() > pivot { - d.noFast = true - } - } - }() - - // Create a timeout timer, and the associated hash fetcher - request := time.Now() // time of the last fetch request + // Create a timeout timer, and the associated header fetcher + skeleton := true // Skeleton assembly phase or finishing up + request := time.Now() // time of the last skeleton fetch request timeout := time.NewTimer(0) // timer to dump a non-responsive active peer <-timeout.C // timeout channel should be initially empty defer timeout.Stop() getHeaders := func(from uint64) { - glog.V(logger.Detail).Infof("%v: fetching %d headers from #%d", p, MaxHeaderFetch, from) - - go p.getAbsHeaders(from, MaxHeaderFetch, 0, false) + if skeleton { + glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from) + go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) + } else { + glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from) + go p.getAbsHeaders(from, MaxHeaderFetch, 0, false) + } request = time.Now() timeout.Reset(headerTTL) } - // Start pulling headers, until all are exhausted + // Start pulling the header chain skeleton until all is done getHeaders(from) - gotHeaders := false for { select { @@ -1205,115 +1202,44 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { return errCancelHeaderFetch case packet := <-d.headerCh: - // Make sure the active peer is giving us the headers + // Make sure the active peer is giving us the skeleton headers if packet.PeerId() != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer (%s)", packet.PeerId()) + glog.V(logger.Debug).Infof("Received skeleton headers from incorrect peer (%s)", packet.PeerId()) break } headerReqTimer.UpdateSince(request) timeout.Stop() + // If the skeleton's finished, pull any remaining head headers directly from the origin + if packet.Items() == 0 && skeleton { + skeleton = false + getHeaders(from) + continue + } // If no more headers are inbound, notify the content fetchers and return if packet.Items() == 0 { glog.V(logger.Debug).Infof("%v: no available headers", p) - - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - // If no headers were retrieved at all, the peer violated it's TD promise that it had a - // better chain compared to ours. The only exception is if it's promised blocks were - // already imported by other means (e.g. fetcher): - // - // R , L : Both at block 10 - // R: Mine block 11, and propagate it to L - // L: Queue block 11 for import - // L: Notice that R's head and TD increased compared to ours, start sync - // L: Import of block 11 finishes - // L: Sync begins, and finds common ancestor at 11 - // L: Request new headers up from 11 (R's TD was higher, it must have something) - // R: Nothing to give - if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { - return errStallingPeer - } - // If fast or light syncing, ensure promised headers are indeed delivered. This is - // needed to detect scenarios where an attacker feeds a bad pivot and then bails out - // of delivering the post-pivot blocks that would flag the invalid content. - // - // This check cannot be executed "as is" for full imports, since blocks may still be - // queued for processing when the header download completes. However, as long as the - // peer gave us something useful, we're already happy/progressed (above check). - if d.mode == FastSync || d.mode == LightSync { - if td.Cmp(d.getTd(d.headHeader().Hash())) > 0 { - return errStallingPeer - } - } - rollback = nil + d.headerProcCh <- nil return nil } - gotHeaders = true headers := packet.(*headerPack).headers - // Otherwise insert all the new headers, aborting in case of junk - glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) - - if d.mode == FastSync || d.mode == LightSync { - // Collect the yet unknown headers to mark them as uncertain - unknown := make([]*types.Header, 0, len(headers)) - for _, header := range headers { - if !d.hasHeader(header.Hash()) { - unknown = append(unknown, header) - } - } - // If we're importing pure headers, verify based on their recentness - frequency := fsHeaderCheckFrequency - if headers[len(headers)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { - frequency = 1 - } - if n, err := d.insertHeaders(headers, frequency); err != nil { - // If some headers were inserted, add them too to the rollback list - if n > 0 { - rollback = append(rollback, headers[:n]...) - } - glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err) + // If we received a skeleton batch, resolve internals concurrently + if skeleton { + filled, err := d.fillHeaderSkeleton(from, headers) + if err != nil { + glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err) return errInvalidChain } - // All verifications passed, store newly found uncertain headers - rollback = append(rollback, unknown...) - if len(rollback) > fsHeaderSafetyNet { - rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) - } - } - if d.mode == FullSync || d.mode == FastSync { - inserts := d.queue.Schedule(headers, from) - if len(inserts) != len(headers) { - glog.V(logger.Debug).Infof("%v: stale headers", p) - return errBadPeer - } - } - // Notify the content fetchers of new headers, but stop if queue is full - cont := d.queue.PendingBlocks() < maxQueuedHeaders && d.queue.PendingReceipts() < maxQueuedHeaders - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { - if cont { - // We still have headers to fetch, send continuation wake signal (potential) - select { - case ch <- true: - default: - } - } else { - // Header limit reached, send a termination wake signal (enforced) - select { - case ch <- false: - case <-d.cancelCh: - } - } + headers = filled } - if !cont { - return nil + // Insert all the new headers and fetch the next batch + glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) + select { + case d.headerProcCh <- headers: + case <-d.cancelCh: + return errCancelHeaderFetch } - // Queue not yet full, fetch the next batch from += uint64(len(headers)) getHeaders(from) @@ -1330,7 +1256,11 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { case <-d.cancelCh: } } - return nil + select { + case d.headerProcCh <- nil: + case <-d.cancelCh: + } + return errBadPeer case <-d.hashCh: case <-d.blockCh: @@ -1340,6 +1270,34 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { } } +// fillHeaderSkeleton concurrently retrieves headers from all our available peers +// and maps them to the provided skeleton header chain. +func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, error) { + glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from) + d.queue.ScheduleSkeleton(from, skeleton) + + var ( + deliver = func(packet dataPack) (int, error) { + pack := packet.(*headerPack) + return d.queue.DeliverHeaders(pack.peerId, pack.headers) + } + expire = func() map[string]int { return d.queue.ExpireHeaders(headerTTL) } + throttle = func() bool { return false } + reserve = func(p *peer, count int) (*fetchRequest, bool, error) { + return d.queue.ReserveHeaders(p, count), false, nil + } + fetch = func(p *peer, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } + capacity = func(p *peer) int { return p.HeaderCapacity() } + setIdle = func(p *peer, accepted int) { p.SetHeadersIdle(accepted) } + ) + err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, + d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, + nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header") + + glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err) + return d.queue.RetrieveHeaders(), err +} + // fetchBodies iteratively downloads the scheduled block bodies, taking any // available peers, reserving a chunk of blocks for each, waiting for delivery // and also periodically checking for timeouts. @@ -1398,6 +1356,11 @@ func (d *Downloader) fetchNodeData() error { deliver = func(packet dataPack) (int, error) { start := time.Now() return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) { + // If the peer gave us nothing, stalling fast sync, drop + if delivered == 0 { + glog.V(logger.Debug).Infof("peer %s: stalling state delivery, dropping", packet.PeerId()) + d.dropPeer(packet.PeerId()) + } if err != nil { // If the node data processing failed, the root hash is very wrong, abort glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err) @@ -1554,7 +1517,9 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv continue } if glog.V(logger.Detail) { - if len(request.Headers) > 0 { + if request.From > 0 { + glog.Infof("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From) + } else if len(request.Headers) > 0 { glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number) } else { glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind)) @@ -1588,9 +1553,162 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv } } -// process takes fetch results from the queue and tries to import them into the -// chain. The type of import operation will depend on the result contents. -func (d *Downloader) process() error { +// processHeaders takes batches of retrieved headers from an input channel and +// keeps processing and scheduling them into the header chain and downloader's +// queue until the stream ends or a failure occurs. +func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { + // Calculate the pivoting point for switching from fast to slow sync + pivot := d.queue.FastSyncPivot() + + // Keep a count of uncertain headers to roll back + rollback := []*types.Header{} + defer func() { + if len(rollback) > 0 { + // Flatten the headers and roll them back + hashes := make([]common.Hash, len(rollback)) + for i, header := range rollback { + hashes[i] = header.Hash() + } + lh, lfb, lb := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number() + d.rollback(hashes) + glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", + len(hashes), lh, d.headHeader().Number, lfb, d.headFastBlock().Number(), lb, d.headBlock().Number()) + + // If we're already past the pivot point, this could be an attack, disable fast sync + if rollback[len(rollback)-1].Number.Uint64() > pivot { + d.noFast = true + } + } + }() + + // Wait for batches of headers to process + gotHeaders := false + + for { + select { + case <-d.cancelCh: + return errCancelHeaderProcessing + + case headers := <-d.headerProcCh: + // Terminate header processing if we synced up + if len(headers) == 0 { + // Notify everyone that headers are fully processed + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { + select { + case ch <- false: + case <-d.cancelCh: + } + } + // If no headers were retrieved at all, the peer violated it's TD promise that it had a + // better chain compared to ours. The only exception is if it's promised blocks were + // already imported by other means (e.g. fecher): + // + // R , L : Both at block 10 + // R: Mine block 11, and propagate it to L + // L: Queue block 11 for import + // L: Notice that R's head and TD increased compared to ours, start sync + // L: Import of block 11 finishes + // L: Sync begins, and finds common ancestor at 11 + // L: Request new headers up from 11 (R's TD was higher, it must have something) + // R: Nothing to give + if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { + return errStallingPeer + } + // If fast or light syncing, ensure promised headers are indeed delivered. This is + // needed to detect scenarios where an attacker feeds a bad pivot and then bails out + // of delivering the post-pivot blocks that would flag the invalid content. + // + // This check cannot be executed "as is" for full imports, since blocks may still be + // queued for processing when the header download completes. However, as long as the + // peer gave us something useful, we're already happy/progressed (above check). + if d.mode == FastSync || d.mode == LightSync { + if td.Cmp(d.getTd(d.headHeader().Hash())) > 0 { + return errStallingPeer + } + } + // Disable any rollback and return + rollback = nil + return nil + } + // Otherwise split the chunk of headers into batches and process them + gotHeaders = true + + for len(headers) > 0 { + // Terminate if something failed in between processing chunks + select { + case <-d.cancelCh: + return errCancelHeaderProcessing + default: + } + // Select the next chunk of headers to import + limit := maxHeadersProcess + if limit > len(headers) { + limit = len(headers) + } + chunk := headers[:limit] + + // In case of header only syncing, validate the chunk immediately + if d.mode == FastSync || d.mode == LightSync { + // Collect the yet unknown headers to mark them as uncertain + unknown := make([]*types.Header, 0, len(headers)) + for _, header := range chunk { + if !d.hasHeader(header.Hash()) { + unknown = append(unknown, header) + } + } + // If we're importing pure headers, verify based on their recentness + frequency := fsHeaderCheckFrequency + if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { + frequency = 1 + } + if n, err := d.insertHeaders(chunk, frequency); err != nil { + // If some headers were inserted, add them too to the rollback list + if n > 0 { + rollback = append(rollback, chunk[:n]...) + } + glog.V(logger.Debug).Infof("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err) + return errInvalidChain + } + // All verifications passed, store newly found uncertain headers + rollback = append(rollback, unknown...) + if len(rollback) > fsHeaderSafetyNet { + rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) + } + } + // Unless we're doing light chains, schedule the headers for associated content retrieval + if d.mode == FullSync || d.mode == FastSync { + // If we've reached the allowed number of pending headers, stall a bit + for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { + select { + case <-d.cancelCh: + return errCancelHeaderProcessing + case <-time.After(time.Second): + } + } + // Otherwise insert the headers for content retrieval + inserts := d.queue.Schedule(chunk, origin) + if len(inserts) != len(chunk) { + glog.V(logger.Debug).Infof("stale headers") + return errBadPeer + } + } + headers = headers[limit:] + origin += uint64(limit) + } + // Signal the content downloaders of the availablility of new tasks + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { + select { + case ch <- true: + default: + } + } + } + } +} + +// processContent takes fetch results from the queue and tries to import them +// into the chain. The type of import operation will depend on the result contents. +func (d *Downloader) processContent() error { pivot := d.queue.FastSyncPivot() for { results := d.queue.WaitResults() @@ -1608,7 +1726,7 @@ func (d *Downloader) process() error { for len(results) != 0 { // Check for any termination requests if atomic.LoadInt32(&d.interrupt) == 1 { - return errCancelProcessing + return errCancelContentProcessing } // Retrieve the a batch of results to import var ( diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index b0b0c2bd32132..c013f3d2cc430 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -560,8 +560,8 @@ func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) fu hashes := dl.peerHashes[id] headers := dl.peerHeaders[id] result := make([]*types.Header, 0, amount) - for i := 0; i < amount && len(hashes)-int(origin)-1-i >= 0; i++ { - if header, ok := headers[hashes[len(hashes)-int(origin)-1-i]]; ok { + for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ { + if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok { result = append(result, header) } } @@ -1348,27 +1348,28 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { result error drop bool }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end - {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin - {errInvalidBody, false}, // A bad peer was detected, but not the sync origin - {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin - {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {nil, false}, // Sync succeeded, all is well + {errBusy, false}, // Sync is already in progress, no problem + {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop + {errBadPeer, true}, // Peer was deemed bad for some reason, drop it + {errStallingPeer, true}, // Peer was detected to be stalling, drop it + {errNoPeers, false}, // No peers to download from, soft race, no issue + {errTimeout, true}, // No hashes received in due time, drop the peer + {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end + {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end + {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser + {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter + {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop + {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin + {errInvalidBody, false}, // A bad peer was detected, but not the sync origin + {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin + {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop } // Run the tests and check disconnection status tester := newTester() diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index c4846194ba854..6aab907d7e135 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -58,15 +58,18 @@ type peer struct { id string // Unique identifier of the peer head common.Hash // Hash of the peers latest known block + headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) + headerThroughput float64 // Number of headers measured to be retrievable per second blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second receiptThroughput float64 // Number of receipts measured to be retrievable per second stateThroughput float64 // Number of node data pieces measured to be retrievable per second - blockStarted time.Time // Time instance when the last block (body)fetch was started + headerStarted time.Time // Time instance when the last header fetch was started + blockStarted time.Time // Time instance when the last block (body) fetch was started receiptStarted time.Time // Time instance when the last receipt fetch was started stateStarted time.Time // Time instance when the last node data fetch was started @@ -118,10 +121,12 @@ func (p *peer) Reset() { p.lock.Lock() defer p.lock.Unlock() + atomic.StoreInt32(&p.headerIdle, 0) atomic.StoreInt32(&p.blockIdle, 0) atomic.StoreInt32(&p.receiptIdle, 0) atomic.StoreInt32(&p.stateIdle, 0) + p.headerThroughput = 0 p.blockThroughput = 0 p.receiptThroughput = 0 p.stateThroughput = 0 @@ -151,6 +156,24 @@ func (p *peer) Fetch61(request *fetchRequest) error { return nil } +// FetchHeaders sends a header retrieval request to the remote peer. +func (p *peer) FetchHeaders(from uint64, count int) error { + // Sanity check the protocol version + if p.version < 62 { + panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version)) + } + // Short circuit if the peer is already fetching + if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { + return errAlreadyFetching + } + p.headerStarted = time.Now() + + // Issue the header retrieval request (absolut upwards without gaps) + go p.getAbsHeaders(from, count, 0, false) + + return nil +} + // FetchBodies sends a block body retrieval request to the remote peer. func (p *peer) FetchBodies(request *fetchRequest) error { // Sanity check the protocol version @@ -217,6 +240,13 @@ func (p *peer) FetchNodeData(request *fetchRequest) error { return nil } +// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval +// requests. Its estimated header retrieval throughput is updated with that measured +// just now. +func (p *peer) SetHeadersIdle(delivered int) { + p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) +} + // SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval // requests. Its estimated block retrieval throughput is updated with that measured // just now. @@ -264,6 +294,15 @@ func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, id *throughput = (1-throughputImpact)*(*throughput) + throughputImpact*measured } +// HeaderCapacity retrieves the peers header download allowance based on its +// previously discovered throughput. +func (p *peer) HeaderCapacity() int { + p.lock.RLock() + defer p.lock.RUnlock() + + return int(math.Max(1, math.Min(p.headerThroughput*float64(headerTargetRTT)/float64(time.Second), float64(MaxHeaderFetch)))) +} + // BlockCapacity retrieves the peers block download allowance based on its // previously discovered throughput. func (p *peer) BlockCapacity() int { @@ -323,14 +362,15 @@ func (p *peer) String() string { defer p.lock.RUnlock() return fmt.Sprintf("Peer %s [%s]", p.id, - fmt.Sprintf("blocks %3.2f/s, ", p.blockThroughput)+ + fmt.Sprintf("headers %3.2f/s, ", p.headerThroughput)+ + fmt.Sprintf("blocks %3.2f/s, ", p.blockThroughput)+ fmt.Sprintf("receipts %3.2f/s, ", p.receiptThroughput)+ fmt.Sprintf("states %3.2f/s, ", p.stateThroughput)+ fmt.Sprintf("lacking %4d", len(p.lacking)), ) } -// peerSet represents the collection of active peer participating in the block +// peerSet represents the collection of active peer participating in the chain // download procedure. type peerSet struct { peers map[string]*peer @@ -359,7 +399,7 @@ func (ps *peerSet) Reset() { // peer is already known. // // The method also sets the starting throughput values of the new peer to the -// average of all existing peers, to give it a realistic change of being used +// average of all existing peers, to give it a realistic chance of being used // for data retrievals. func (ps *peerSet) Register(p *peer) error { ps.lock.Lock() @@ -369,15 +409,17 @@ func (ps *peerSet) Register(p *peer) error { return errAlreadyRegistered } if len(ps.peers) > 0 { - p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0 + p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 for _, peer := range ps.peers { peer.lock.RLock() + p.headerThroughput += peer.headerThroughput p.blockThroughput += peer.blockThroughput p.receiptThroughput += peer.receiptThroughput p.stateThroughput += peer.stateThroughput peer.lock.RUnlock() } + p.headerThroughput /= float64(len(ps.peers)) p.blockThroughput /= float64(len(ps.peers)) p.receiptThroughput /= float64(len(ps.peers)) p.stateThroughput /= float64(len(ps.peers)) @@ -441,6 +483,20 @@ func (ps *peerSet) BlockIdlePeers() ([]*peer, int) { return ps.idlePeers(61, 61, idle, throughput) } +// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers +// within the active peer set, ordered by their reputation. +func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) { + idle := func(p *peer) bool { + return atomic.LoadInt32(&p.headerIdle) == 0 + } + throughput := func(p *peer) float64 { + p.lock.RLock() + defer p.lock.RUnlock() + return p.headerThroughput + } + return ps.idlePeers(62, 64, idle, throughput) +} + // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within // the active peer set, ordered by their reputation. func (ps *peerSet) BodyIdlePeers() ([]*peer, int) { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index d8d1bddceefff..1f46d0a4a3be2 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -39,8 +39,8 @@ import ( ) var ( - blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download - maxInFlightStates = 4096 // Maximum number of state downloads to allow concurrently + blockCacheLimit = 16384 // Maximum number of blocks to cache before throttling the download + maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently ) var ( @@ -52,6 +52,7 @@ var ( // fetchRequest is a currently running data retrieval operation. type fetchRequest struct { Peer *peer // Peer to which the request was sent + From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) Headers []*types.Header // [eth/62] Requested headers, sorted by request order Time time.Time // Time when the request was made @@ -79,6 +80,17 @@ type queue struct { headerHead common.Hash // [eth/62] Hash of the last queued header to verify order + // Headers are "special", they download in batches, supported by a skeleton chain + headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers + headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for + headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable + headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations + headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches + headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers + headerOffset uint64 // [eth/62] Number of the first header in the result cache + headerContCh chan bool // [eth/62] Channel to notify when header download finishes + + // All data retrievals below are based on an already assembles header chain blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations @@ -113,6 +125,8 @@ func newQueue(stateDb ethdb.Database) *queue { return &queue{ hashPool: make(map[common.Hash]int), hashQueue: prque.New(), + headerPendPool: make(map[string]*fetchRequest), + headerContCh: make(chan bool), blockTaskPool: make(map[common.Hash]*types.Header), blockTaskQueue: prque.New(), blockPendPool: make(map[string]*fetchRequest), @@ -149,6 +163,8 @@ func (q *queue) Reset() { q.headerHead = common.Hash{} + q.headerPendPool = make(map[string]*fetchRequest) + q.blockTaskPool = make(map[common.Hash]*types.Header) q.blockTaskQueue.Reset() q.blockPendPool = make(map[string]*fetchRequest) @@ -178,6 +194,14 @@ func (q *queue) Close() { q.active.Broadcast() } +// PendingHeaders retrieves the number of header requests pending for retrieval. +func (q *queue) PendingHeaders() int { + q.lock.Lock() + defer q.lock.Unlock() + + return q.headerTaskQueue.Size() +} + // PendingBlocks retrieves the number of block (body) requests pending for retrieval. func (q *queue) PendingBlocks() int { q.lock.Lock() @@ -205,6 +229,15 @@ func (q *queue) PendingNodeData() int { return 0 } +// InFlightHeaders retrieves whether there are header fetch requests currently +// in flight. +func (q *queue) InFlightHeaders() bool { + q.lock.Lock() + defer q.lock.Unlock() + + return len(q.headerPendPool) > 0 +} + // InFlightBlocks retrieves whether there are block fetch requests currently in // flight. func (q *queue) InFlightBlocks() bool { @@ -317,6 +350,44 @@ func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash { return inserts } +// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill +// up an already retrieved header skeleton. +func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { + q.lock.Lock() + defer q.lock.Unlock() + + // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) + if q.headerResults != nil { + panic("skeleton assembly already in progress") + } + // Shedule all the header retrieval tasks for the skeleton assembly + q.headerTaskPool = make(map[uint64]*types.Header) + q.headerTaskQueue = prque.New() + q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains + q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) + q.headerOffset = from + q.headerContCh = make(chan bool, 1) + + for i, header := range skeleton { + index := from + uint64(i*MaxHeaderFetch) + + q.headerTaskPool[index] = header + q.headerTaskQueue.Push(index, -float32(index)) + } +} + +// RetrieveHeaders retrieves the header chain assemble based on the scheduled +// skeleton. +func (q *queue) RetrieveHeaders() []*types.Header { + q.lock.Lock() + defer q.lock.Unlock() + + headers := q.headerResults + q.headerResults = nil + + return headers +} + // Schedule adds a set of headers for the download queue for scheduling, returning // the new headers encountered. func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { @@ -437,6 +508,46 @@ func (q *queue) countProcessableItems() int { return len(q.resultCache) } +// ReserveHeaders reserves a set of headers for the given peer, skipping any +// previously failed batches. +func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest { + q.lock.Lock() + defer q.lock.Unlock() + + // Short circuit if the peer's already downloading something (sanity check to + // not corrupt state) + if _, ok := q.headerPendPool[p.id]; ok { + return nil + } + // Retrieve a batch of hashes, skipping previously failed ones + send, skip := uint64(0), []uint64{} + for send == 0 && !q.headerTaskQueue.Empty() { + from, _ := q.headerTaskQueue.Pop() + if q.headerPeerMiss[p.id] != nil { + if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { + skip = append(skip, from.(uint64)) + continue + } + } + send = from.(uint64) + } + // Merge all the skipped batches back + for _, from := range skip { + q.headerTaskQueue.Push(from, -float32(from)) + } + // Assemble and return the block download request + if send == 0 { + return nil + } + request := &fetchRequest{ + Peer: p, + From: send, + Time: time.Now(), + } + q.headerPendPool[p.id] = request + return request +} + // ReserveBlocks reserves a set of block hashes for the given peer, skipping any // previously failed download. func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest { @@ -635,6 +746,11 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ return request, progress, nil } +// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. +func (q *queue) CancelHeaders(request *fetchRequest) { + q.cancel(request, q.headerTaskQueue, q.headerPendPool) +} + // CancelBlocks aborts a fetch request, returning all pending hashes to the queue. func (q *queue) CancelBlocks(request *fetchRequest) { q.cancel(request, q.hashQueue, q.blockPendPool) @@ -663,6 +779,9 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m q.lock.Lock() defer q.lock.Unlock() + if request.From > 0 { + taskQueue.Push(request.From, -float32(request.From)) + } for hash, index := range request.Hashes { taskQueue.Push(hash, float32(index)) } @@ -702,6 +821,15 @@ func (q *queue) Revoke(peerId string) { } } +// ExpireHeaders checks for in flight requests that exceeded a timeout allowance, +// canceling them and returning the responsible peers for penalisation. +func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { + q.lock.Lock() + defer q.lock.Unlock() + + return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) +} + // ExpireBlocks checks for in flight requests that exceeded a timeout allowance, // canceling them and returning the responsible peers for penalisation. func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int { @@ -753,6 +881,9 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, timeoutMeter.Mark(1) // Return any non satisfied requests to the pool + if request.From > 0 { + taskQueue.Push(request.From, -float32(request.From)) + } for hash, index := range request.Hashes { taskQueue.Push(hash, float32(index)) } @@ -842,6 +973,73 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) { } } +// DeliverHeaders injects a header retrieval response into the header results +// cache. This method either accepts all headers it received, or none of them +// if they do not map correctly to the skeleton. +func (q *queue) DeliverHeaders(id string, headers []*types.Header) (int, error) { + q.lock.Lock() + defer q.lock.Unlock() + + // Short circuit if the data was never requested + request := q.headerPendPool[id] + if request == nil { + return 0, errNoFetchesPending + } + headerReqTimer.UpdateSince(request.Time) + delete(q.headerPendPool, id) + + // Ensure headers can be mapped onto the skeleton chain + target := q.headerTaskPool[request.From].Hash() + + accepted := len(headers) == MaxHeaderFetch + if accepted { + if headers[0].Number.Uint64() != request.From { + glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From) + accepted = false + } else if headers[len(headers)-1].Hash() != target { + glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]) + accepted = false + } + } + if accepted { + for i, header := range headers[1:] { + hash := header.Hash() + if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { + glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ordering, expected %d", id, header.Number, hash[:4], want) + accepted = false + break + } + if headers[i].Hash() != header.ParentHash { + glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ancestry", id, header.Number, hash[:4]) + accepted = false + break + } + } + } + // If the batch of headers wasn't accepted, mark as unavailable + if !accepted { + glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From) + + miss := q.headerPeerMiss[id] + if miss == nil { + q.headerPeerMiss[id] = make(map[uint64]struct{}) + miss = q.headerPeerMiss[id] + } + miss[request.From] = struct{}{} + + q.headerTaskQueue.Push(request.From, -float32(request.From)) + return 0, errors.New("delivery not accepted") + } + // Clean up a successful fetch, check for termination and return + copy(q.headerResults[request.From-q.headerOffset:], headers) + delete(q.headerTaskPool, request.From) + + if len(q.headerTaskPool) == 0 { + q.headerContCh <- false + } + return len(headers), nil +} + // DeliverBodies injects a block body retrieval response into the results queue. // The method returns the number of blocks bodies accepted from the delivery and // also wakes any threads waiting for data delivery. From 8ebbd9b7c747e6cb97b02e07d50a03cd316a77f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 19 Apr 2016 12:27:37 +0300 Subject: [PATCH 07/44] [release/1.4.6] eth/downloader: stream partial skeleton filling to processor (cherry picked from commit e86619e75d1bd1209818ab4df2fac52e3c43b5e1) --- eth/downloader/downloader.go | 38 ++++++++++++++++++++----------- eth/downloader/downloader_test.go | 1 + eth/downloader/queue.go | 35 +++++++++++++++++++++++----- 3 files changed, 55 insertions(+), 19 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 2b2de1b5f6479..2f79c2dfdefab 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -54,7 +54,7 @@ var ( blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired headerTargetRTT = time.Second // [eth/62] Target time for completing a header retrieval request (only for measurements for now) - headerTTL = 2 * time.Second // [eth/62] Time it takes for a header request to time out + headerTTL = 3 * time.Second // [eth/62] Time it takes for a header request to time out bodyTargetRTT = 3 * time.Second / 2 // [eth/62] Target time for completing a block body retrieval request bodyTTL = 3 * bodyTargetRTT // [eth/62] Maximum time allowance before a block body request is considered expired receiptTargetRTT = 3 * time.Second / 2 // [eth/63] Target time for completing a receipt retrieval request @@ -1064,7 +1064,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { continue } // Otherwise check if we already know the header or not - if (d.mode != LightSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) { + if (d.mode == FullSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) { number, hash = headers[i].Number.Uint64(), headers[i].Hash() break } @@ -1226,21 +1226,24 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error { // If we received a skeleton batch, resolve internals concurrently if skeleton { - filled, err := d.fillHeaderSkeleton(from, headers) + filled, proced, err := d.fillHeaderSkeleton(from, headers) if err != nil { glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err) return errInvalidChain } - headers = filled + headers = filled[proced:] + from += uint64(proced) } // Insert all the new headers and fetch the next batch - glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) - select { - case d.headerProcCh <- headers: - case <-d.cancelCh: - return errCancelHeaderFetch + if len(headers) > 0 { + glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) + select { + case d.headerProcCh <- headers: + case <-d.cancelCh: + return errCancelHeaderFetch + } + from += uint64(len(headers)) } - from += uint64(len(headers)) getHeaders(from) case <-timeout.C: @@ -1272,14 +1275,21 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error { // fillHeaderSkeleton concurrently retrieves headers from all our available peers // and maps them to the provided skeleton header chain. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, error) { +// +// Any partial results from the beginning of the skeleton is (if possible) forwarded +// immediately to the header processor to keep the rest of the pipeline full even +// in the case of header stalls. +// +// The method returs the entire filled skeleton and also the number of headers +// already forwarded for processing. +func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from) d.queue.ScheduleSkeleton(from, skeleton) var ( deliver = func(packet dataPack) (int, error) { pack := packet.(*headerPack) - return d.queue.DeliverHeaders(pack.peerId, pack.headers) + return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) } expire = func() map[string]int { return d.queue.ExpireHeaders(headerTTL) } throttle = func() bool { return false } @@ -1295,7 +1305,9 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ( nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header") glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err) - return d.queue.RetrieveHeaders(), err + + filled, proced := d.queue.RetrieveHeaders() + return filled, proced, err } // fetchBodies iteratively downloads the scheduled block bodies, taking any diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index c013f3d2cc430..4ea8a8abe3a81 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -1258,6 +1258,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { // rolled back, and also the pivot point being reverted to a non-block status. tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts) missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 + delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing]) if err := tester.sync("block-attack", nil, mode); err == nil { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 1f46d0a4a3be2..dd839de1963e7 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -87,6 +87,7 @@ type queue struct { headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers + headerProced int // [eth/62] Number of headers already processed from the results headerOffset uint64 // [eth/62] Number of the first header in the result cache headerContCh chan bool // [eth/62] Channel to notify when header download finishes @@ -365,6 +366,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { q.headerTaskQueue = prque.New() q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) + q.headerProced = 0 q.headerOffset = from q.headerContCh = make(chan bool, 1) @@ -378,14 +380,14 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { // RetrieveHeaders retrieves the header chain assemble based on the scheduled // skeleton. -func (q *queue) RetrieveHeaders() []*types.Header { +func (q *queue) RetrieveHeaders() ([]*types.Header, int) { q.lock.Lock() defer q.lock.Unlock() - headers := q.headerResults - q.headerResults = nil + headers, proced := q.headerResults, q.headerProced + q.headerResults, q.headerProced = nil, 0 - return headers + return headers, proced } // Schedule adds a set of headers for the download queue for scheduling, returning @@ -976,7 +978,11 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) { // DeliverHeaders injects a header retrieval response into the header results // cache. This method either accepts all headers it received, or none of them // if they do not map correctly to the skeleton. -func (q *queue) DeliverHeaders(id string, headers []*types.Header) (int, error) { +// +// If the headers are accepted, the method makes an attempt to deliver the set +// of ready headers to the processor to keep the pipeline full. However it will +// not block to prevent stalling other pending deliveries. +func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { q.lock.Lock() defer q.lock.Unlock() @@ -1030,10 +1036,27 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header) (int, error) q.headerTaskQueue.Push(request.From, -float32(request.From)) return 0, errors.New("delivery not accepted") } - // Clean up a successful fetch, check for termination and return + // Clean up a successful fetch and try to deliver any sub-results copy(q.headerResults[request.From-q.headerOffset:], headers) delete(q.headerTaskPool, request.From) + ready := 0 + for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { + ready += MaxHeaderFetch + } + if ready > 0 { + // Headers are ready for delivery, gather them and push forward (non blocking) + process := make([]*types.Header, ready) + copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) + + select { + case headerProcCh <- process: + glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number) + q.headerProced += len(process) + default: + } + } + // Check for termination and return if len(q.headerTaskPool) == 0 { q.headerContCh <- false } From 170036289bcede5d09083c2caee5bce2cd6f2eb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 17 May 2016 11:12:57 +0300 Subject: [PATCH 08/44] [release/1.4.6] eth/downloader: fix reviewer comments (cherry picked from commit 8906b2fe0934c67ebb1db5d4d77acdf1a7e988f0) --- eth/downloader/downloader.go | 37 +++++++++++++++++++++++++++++------- eth/downloader/queue.go | 4 ++-- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 2f79c2dfdefab..74bff2b66f79f 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -65,7 +65,7 @@ var ( maxQueuedHashes = 32 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection) maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) maxHeadersProcess = 2048 // Number of header download results to import at once into the chain - maxResultsProcess = 4096 // Number of content download results to import at once into the chain + maxResultsProcess = 2048 // Number of content download results to import at once into the chain fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected @@ -716,9 +716,9 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error { getHashes := func(from uint64) { glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from) - go p.getAbsHashes(from, MaxHashFetch) request = time.Now() timeout.Reset(hashTTL) + go p.getAbsHashes(from, MaxHashFetch) } // Start pulling hashes, until all are exhausted getHashes(from) @@ -1168,7 +1168,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { // facilitate concurrency but still protect against malicious nodes sending bad // headers, we construct a header chain skeleton using the "origin" peer we are // syncing with, and fill in the missing headers using anyone else. Headers from -// other peers are only accepted if they map cleanly to the skeleton. If noone +// other peers are only accepted if they map cleanly to the skeleton. If no one // can fill in the skeleton - not even the origin peer - it's assumed invalid and // the origin is dropped. func (d *Downloader) fetchHeaders(p *peer, from uint64) error { @@ -1183,6 +1183,9 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error { defer timeout.Stop() getHeaders := func(from uint64) { + request = time.Now() + timeout.Reset(headerTTL) + if skeleton { glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from) go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) @@ -1190,8 +1193,6 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error { glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from) go p.getAbsHeaders(from, MaxHeaderFetch, 0, false) } - request = time.Now() - timeout.Reset(headerTTL) } // Start pulling the header chain skeleton until all is done getHeaders(from) @@ -1413,6 +1414,28 @@ func (d *Downloader) fetchNodeData() error { // fetchParts iteratively downloads scheduled block parts, taking any available // peers, reserving a chunk of fetch requests for each, waiting for delivery and // also periodically checking for timeouts. +// +// As the scheduling/timeout logic mostly is the same for all downloaded data +// types, this method is used by each for data gathering and is instrumented with +// various callbacks to handle the slight differences between processing them. +// +// The instrumentation parameters: +// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) +// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) +// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) +// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) +// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) +// - pending: task callback for the number of requests still needing download (detect completion/non-completability) +// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) +// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) +// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) +// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) +// - fetch: network callback to actually send a particular download request to a physical remote peer +// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) +// - capacity: network callback to retreive the estimated type-specific bandwidth capacity of a peer (traffic shaping) +// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks +// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) +// - kind: textual label of the type being downloaded to display in log mesages func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, @@ -1581,10 +1604,10 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { for i, header := range rollback { hashes[i] = header.Hash() } - lh, lfb, lb := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number() + lastHeader, lastFastBlock, lastBlock := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number() d.rollback(hashes) glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", - len(hashes), lh, d.headHeader().Number, lfb, d.headFastBlock().Number(), lb, d.headBlock().Number()) + len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, d.headFastBlock().Number(), lastBlock, d.headBlock().Number()) // If we're already past the pivot point, this could be an attack, disable fast sync if rollback[len(rollback)-1].Number.Uint64() > pivot { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index dd839de1963e7..195eae4ff9fdc 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -39,8 +39,8 @@ import ( ) var ( - blockCacheLimit = 16384 // Maximum number of blocks to cache before throttling the download - maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently + blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download + maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently ) var ( From 8d2775e3d736d90327e817655e24ed91f922b161 Mon Sep 17 00:00:00 2001 From: Fabio Berger Date: Sun, 22 May 2016 16:42:46 -0700 Subject: [PATCH 09/44] [release/1.4.6] core: Simplify bloom9 tests with available convenience method `TestBytes` (cherry picked from commit faf663133b7d784406a6bfc029404b2718abc66d) --- core/types/bloom9_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/types/bloom9_test.go b/core/types/bloom9_test.go index 58e8f707316f6..a28ac0e7afbaa 100644 --- a/core/types/bloom9_test.go +++ b/core/types/bloom9_test.go @@ -39,12 +39,12 @@ func TestBloom(t *testing.T) { } for _, data := range positive { - if !bloom.Test(new(big.Int).SetBytes([]byte(data))) { + if !bloom.TestBytes([]byte(data)) { t.Error("expected", data, "to test true") } } for _, data := range negative { - if bloom.Test(new(big.Int).SetBytes([]byte(data))) { + if bloom.TestBytes([]byte(data)) { t.Error("did not expect", data, "to test true") } } From c45c4240739c6c8acc9c704e78c05a56d3cda715 Mon Sep 17 00:00:00 2001 From: Gianfranco Cecconi Date: Mon, 23 May 2016 14:19:17 +0100 Subject: [PATCH 10/44] [release/1.4.6] Just to make it clear how to build all executables (cherry picked from commit 2e530f48892dd97fab362da7123319dc8c407444) --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index bdd85b3fa46b6..1dcfbf97f6cf0 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,10 @@ Once the dependencies are installed, run make geth +or, to build the full suite of utilities: + + make all + ## Executables The go-ethereum project comes with several wrappers/executables found in the `cmd` directory. From ab522d3bc75639a871d30ce0e2d1b95681178088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Roy?= Date: Tue, 24 May 2016 17:02:00 -0400 Subject: [PATCH 11/44] [release/1.4.6] common/compiler: support relative path to solc (cherry picked from commit 5eb60a6da23296f54081c92a6223935bfce81859) --- common/compiler/solidity.go | 1 - 1 file changed, 1 deletion(-) diff --git a/common/compiler/solidity.go b/common/compiler/solidity.go index ddf7a1ac9630e..6a5bfecd8614a 100644 --- a/common/compiler/solidity.go +++ b/common/compiler/solidity.go @@ -149,7 +149,6 @@ func (sol *Solidity) Compile(source string) (map[string]*Contract, error) { compilerOptions := strings.Join(params, " ") cmd := exec.Command(sol.solcPath, params...) - cmd.Dir = wd cmd.Stdin = strings.NewReader(source) cmd.Stderr = stderr From 7a4073a75883165834edb7a28b52728581f58170 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Roy?= Date: Tue, 24 May 2016 16:44:33 -0400 Subject: [PATCH 12/44] [release/1.4.6] eth/api: fixed GetCompilers when there is no error creating Solc (cherry picked from commit f86ea9aad5b14051d5d311591148387b2e6da725) --- eth/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/api.go b/eth/api.go index d048904f3621a..f5f942c27d62a 100644 --- a/eth/api.go +++ b/eth/api.go @@ -113,7 +113,7 @@ func (s *PublicEthereumAPI) GasPrice() *big.Int { // GetCompilers returns the collection of available smart contract compilers func (s *PublicEthereumAPI) GetCompilers() ([]string, error) { solc, err := s.e.Solc() - if err != nil && solc != nil { + if err == nil && solc != nil { return []string{"Solidity"}, nil } From 8fefee7132e2f3296cb316a8338181d10c6c7cdf Mon Sep 17 00:00:00 2001 From: Obulapathi N Challa Date: Tue, 24 May 2016 22:13:05 -0500 Subject: [PATCH 13/44] [release/1.4.6] misc: fix spelling mistake (cherry picked from commit f3769a97d5b4fa899f0bf673ff4c33a21d12ec38) --- .github/CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 829bf5d43b20f..87801c29f56e6 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -8,7 +8,7 @@ and help. ## Contributing If you'd like to contribute to go-ethereum please fork, fix, commit and -send a pull request. Commits who do not comply with the coding standards +send a pull request. Commits which do not comply with the coding standards are ignored (use gofmt!). If you send pull requests make absolute sure that you commit on the `develop` branch and that you do not merge to master. Commits that are directly based on master are simply ignored. From 9ccb70da7b9128403521a49aba0bf0ed3799aa99 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 24 May 2016 18:49:54 +0200 Subject: [PATCH 14/44] [release/1.4.6] eth: enable bad block reports We used to have reporting of bad blocks, but it was disabled before the Frontier release. We need it back because users are usually unable to provide the full RLP data of a bad block when it occurs. A shortcoming of this particular implementation is that the origin peer is not tracked for blocks received during eth/63 sync. No origin peer info is still better than no report at all though. (cherry picked from commit ca18202eb9a94de1d4b51c1572fa74edfa2773bf) --- cmd/geth/main.go | 6 ++++ core/bad_block.go | 72 ------------------------------------------- core/blockchain.go | 5 +-- core/types/block.go | 6 ++-- eth/bad_block.go | 74 +++++++++++++++++++++++++++++++++++++++++++++ eth/handler.go | 21 +++++++++++-- tests/init.go | 7 ----- 7 files changed, 104 insertions(+), 87 deletions(-) delete mode 100644 core/bad_block.go create mode 100644 eth/bad_block.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e94b76594d512..1047a2bbd0908 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -244,6 +244,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso // Start system runtime metrics collection go metrics.CollectProcessMetrics(3 * time.Second) + // This should be the only place where reporting is enabled + // because it is not intended to run while testing. + // In addition to this check, bad block reports are sent only + // for chains with the main network genesis block and network id 1. + eth.EnableBadBlockReporting = true + utils.SetupNetwork(ctx) // Deprecation warning. diff --git a/core/bad_block.go b/core/bad_block.go deleted file mode 100644 index cd3fb575a8917..0000000000000 --- a/core/bad_block.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/rlp" -) - -// DisabledBadBlockReporting can be set to prevent blocks being reported. -var DisableBadBlockReporting = true - -// ReportBlock reports the block to the block reporting tool found at -// badblocks.ethdev.com -func ReportBlock(block *types.Block, err error) { - if DisableBadBlockReporting { - return - } - - const url = "https://badblocks.ethdev.com" - - blockRlp, _ := rlp.EncodeToBytes(block) - data := map[string]interface{}{ - "block": common.Bytes2Hex(blockRlp), - "errortype": err.Error(), - "hints": map[string]interface{}{ - "receipts": "NYI", - "vmtrace": "NYI", - }, - } - jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "params": []interface{}{data}, "id": "1", "jsonrpc": "2.0"}) - - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) - req.Header.Set("Content-Type", "application/json") - - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - glog.V(logger.Error).Infoln("POST err:", err) - return - } - defer resp.Body.Close() - - if glog.V(logger.Debug) { - glog.Infoln("response Status:", resp.Status) - glog.Infoln("response Headers:", resp.Header) - body, _ := ioutil.ReadAll(resp.Body) - glog.Infoln("response Body:", string(body)) - } -} diff --git a/core/blockchain.go b/core/blockchain.go index 4598800d54cb0..171a49e53d54a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1117,15 +1117,12 @@ func (self *BlockChain) update() { } } -// reportBlock reports the given block and error using the canonical block -// reporting tool. Reporting the block to the service is handled in a separate -// goroutine. +// reportBlock logs a bad block error. func reportBlock(block *types.Block, err error) { if glog.V(logger.Error) { glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex()) glog.Errorf(" %v", err) } - go ReportBlock(block, err) } // InsertHeaderChain attempts to insert the given header chain in to the local diff --git a/core/types/block.go b/core/types/block.go index 387a063aebdcb..37b6f3ec17e86 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -141,8 +141,10 @@ type Block struct { // of the chain up to and including the block. td *big.Int - // ReceivedAt is used by package eth to track block propagation time. - ReceivedAt time.Time + // These fields are used by package eth to track + // inter-peer block relay. + ReceivedAt time.Time + ReceivedFrom interface{} } // DeprecatedTd is an old relic for extracting the TD of a block. It is in the diff --git a/eth/bad_block.go b/eth/bad_block.go new file mode 100644 index 0000000000000..3a6c3d85cb883 --- /dev/null +++ b/eth/bad_block.go @@ -0,0 +1,74 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rlp" +) + +const ( + // The Ethereum main network genesis block. + defaultGenesisHash = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" + badBlocksURL = "https://badblocks.ethdev.com" +) + +var EnableBadBlockReporting = false + +func sendBadBlockReport(block *types.Block, err error) { + if !EnableBadBlockReporting { + return + } + + var ( + blockRLP, _ = rlp.EncodeToBytes(block) + params = map[string]interface{}{ + "block": common.Bytes2Hex(blockRLP), + "blockHash": block.Hash().Hex(), + "errortype": err.Error(), + "client": "go", + } + ) + if !block.ReceivedAt.IsZero() { + params["receivedAt"] = block.ReceivedAt.UTC().String() + } + if p, ok := block.ReceivedFrom.(*peer); ok { + params["receivedFrom"] = map[string]interface{}{ + "enode": fmt.Sprintf("enode://%x@%v", p.ID(), p.RemoteAddr()), + "name": p.Name(), + "protocolVersion": p.version, + } + } + jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "id": "1", "jsonrpc": "2.0", "params": []interface{}{params}}) + client := http.Client{Timeout: 8 * time.Second} + resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr)) + if err != nil { + glog.V(logger.Debug).Infoln(err) + return + } + glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode) + resp.Body.Close() +} diff --git a/eth/handler.go b/eth/handler.go index 202acdc78a8ae..58869a2eec2f8 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -83,6 +83,8 @@ type ProtocolManager struct { // wait group is used for graceful shutdowns during downloading // and processing wg sync.WaitGroup + + badBlockReportingEnabled bool } // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable @@ -150,7 +152,7 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, // Construct the different synchronisation mechanisms manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeader, blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, - blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback, + blockchain.GetTd, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { @@ -159,11 +161,24 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, heighter := func() uint64 { return blockchain.CurrentBlock().NumberU64() } - manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, blockchain.InsertChain, manager.removePeer) + manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, manager.insertChain, manager.removePeer) + + if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 { + glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled") + manager.badBlockReportingEnabled = true + } return manager, nil } +func (pm *ProtocolManager) insertChain(blocks types.Blocks) (i int, err error) { + i, err = pm.blockchain.InsertChain(blocks) + if pm.badBlockReportingEnabled && core.IsValidationErr(err) && i < len(blocks) { + go sendBadBlockReport(blocks[i], err) + } + return i, err +} + func (pm *ProtocolManager) removePeer(id string) { // Short circuit if the peer was already removed peer := pm.peers.Peer(id) @@ -378,6 +393,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Update the receive timestamp of each block for _, block := range blocks { block.ReceivedAt = msg.ReceivedAt + block.ReceivedFrom = p } // Filter out any explicitly requested blocks, deliver the rest to the downloader if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 { @@ -664,6 +680,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "block validation %v: %v", msg, err) } request.Block.ReceivedAt = msg.ReceivedAt + request.Block.ReceivedFrom = p // Mark the peer as owning the block and schedule it for import p.MarkBlock(request.Block.Hash()) diff --git a/tests/init.go b/tests/init.go index 5112b274d0cd1..0c07f8b237dd1 100644 --- a/tests/init.go +++ b/tests/init.go @@ -25,8 +25,6 @@ import ( "net/http" "os" "path/filepath" - - "github.com/ethereum/go-ethereum/core" ) var ( @@ -59,11 +57,6 @@ var ( VmSkipTests = []string{} ) -// Disable reporting bad blocks for the tests -func init() { - core.DisableBadBlockReporting = true -} - func readJson(reader io.Reader, value interface{}) error { data, err := ioutil.ReadAll(reader) if err != nil { From 7632acf6b432795128eb02990fe2cd2b7cea008a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 27 May 2016 10:42:11 +0300 Subject: [PATCH 15/44] [release/1.4.6] core/state: return the starting nonce for non-existent accs (testnet) (cherry picked from commit 8ee84584a407464511b453eebaa31854979aa593) --- core/state/statedb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 22ffa36a062f2..27bc38373345c 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -127,7 +127,7 @@ func (self *StateDB) GetNonce(addr common.Address) uint64 { return stateObject.nonce } - return 0 + return StartingNonce } func (self *StateDB) GetCode(addr common.Address) []byte { From af53767e162e59e45e18aad61fa79b85d74f0aeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 19 May 2016 13:24:14 +0300 Subject: [PATCH 16/44] [release/1.4.6] core, core/state, trie: enterprise hand-tuned multi-level caching (cherry picked from commit 748d1c171d74fbf6b6051fd629d3c2204dd930e3) --- core/blockchain.go | 7 +- core/state/statedb.go | 22 ++++ trie/iterator.go | 24 +++-- trie/node.go | 60 +++++++---- trie/proof.go | 8 +- trie/secure_trie.go | 4 +- trie/sync.go | 13 +-- trie/trie.go | 237 ++++++++++++++++++++++++++---------------- trie/trie_test.go | 2 +- 9 files changed, 244 insertions(+), 133 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 171a49e53d54a..bd84adfe9ab07 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -819,6 +819,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { tstart = time.Now() nonceChecked = make([]bool, len(chain)) + statedb *state.StateDB ) // Start the parallel nonce verifier. @@ -885,7 +886,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { // Create a new statedb using the parent block and report an // error if it fails. - statedb, err := state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb) + if statedb == nil { + statedb, err = state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb) + } else { + err = statedb.Reset(chain[i-1].Root()) + } if err != nil { reportBlock(block, err) return i, err diff --git a/core/state/statedb.go b/core/state/statedb.go index 27bc38373345c..70673799ed24b 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -68,6 +68,28 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) { }, nil } +// Reset clears out all emphemeral state objects from the state db, but keeps +// the underlying state trie to avoid reloading data for the next operations. +func (self *StateDB) Reset(root common.Hash) error { + var ( + err error + tr = self.trie + ) + if self.trie.Hash() != root { + if tr, err = trie.NewSecure(root, self.db); err != nil { + return err + } + } + *self = StateDB{ + db: self.db, + trie: tr, + stateObjects: make(map[string]*StateObject), + refund: new(big.Int), + logs: make(map[common.Hash]vm.Logs), + } + return nil +} + func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) { self.thash = thash self.bhash = bhash diff --git a/trie/iterator.go b/trie/iterator.go index ceef52ec8d040..88c4cee7faa7e 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -62,7 +62,7 @@ func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byt switch node := node.(type) { case fullNode: if len(key) > 0 { - k := self.next(node[key[0]], key[1:], isIterStart) + k := self.next(node.Children[key[0]], key[1:], isIterStart) if k != nil { return append([]byte{key[0]}, k...) } @@ -74,7 +74,7 @@ func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byt } for i := r; i < 16; i++ { - k := self.key(node[i]) + k := self.key(node.Children[i]) if k != nil { return append([]byte{i}, k...) } @@ -130,12 +130,12 @@ func (self *Iterator) key(node interface{}) []byte { } return append(k, self.key(node.Val)...) case fullNode: - if node[16] != nil { - self.Value = node[16].(valueNode) + if node.Children[16] != nil { + self.Value = node.Children[16].(valueNode) return []byte{16} } for i := 0; i < 16; i++ { - k := self.key(node[i]) + k := self.key(node.Children[i]) if k != nil { return append([]byte{byte(i)}, k...) } @@ -175,7 +175,7 @@ type NodeIterator struct { // NewNodeIterator creates an post-order trie iterator. func NewNodeIterator(trie *Trie) *NodeIterator { - if bytes.Compare(trie.Root(), emptyRoot.Bytes()) == 0 { + if trie.Hash() == emptyState { return new(NodeIterator) } return &NodeIterator{trie: trie} @@ -205,9 +205,11 @@ func (it *NodeIterator) step() error { } // Initialize the iterator if we've just started, or pop off the old node otherwise if len(it.stack) == 0 { - it.stack = append(it.stack, &nodeIteratorState{node: it.trie.root, child: -1}) + // Always start with a collapsed root + root := it.trie.Hash() + it.stack = append(it.stack, &nodeIteratorState{node: hashNode(root[:]), child: -1}) if it.stack[0].node == nil { - return fmt.Errorf("root node missing: %x", it.trie.Root()) + return fmt.Errorf("root node missing: %x", it.trie.Hash()) } } else { it.stack = it.stack[:len(it.stack)-1] @@ -225,11 +227,11 @@ func (it *NodeIterator) step() error { } if node, ok := parent.node.(fullNode); ok { // Full node, traverse all children, then the node itself - if parent.child >= len(node) { + if parent.child >= len(node.Children) { break } - for parent.child++; parent.child < len(node); parent.child++ { - if current := node[parent.child]; current != nil { + for parent.child++; parent.child < len(node.Children); parent.child++ { + if current := node.Children[parent.child]; current != nil { it.stack = append(it.stack, &nodeIteratorState{node: current, parent: ancestor, child: -1}) break } diff --git a/trie/node.go b/trie/node.go index 0bfa21dc4362f..b97d370be4ebf 100644 --- a/trie/node.go +++ b/trie/node.go @@ -29,18 +29,36 @@ var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b type node interface { fstring(string) string + cache() (hashNode, bool) } type ( - fullNode [17]node + fullNode struct { + Children [17]node // Actual trie node data to encode/decode (needs custom encoder) + hash hashNode // Cached hash of the node to prevent rehashing (may be nil) + dirty bool // Cached flag whether the node's new or already stored + } shortNode struct { - Key []byte - Val node + Key []byte + Val node + hash hashNode // Cached hash of the node to prevent rehashing (may be nil) + dirty bool // Cached flag whether the node's new or already stored } hashNode []byte valueNode []byte ) +// EncodeRLP encodes a full node into the consensus RLP format. +func (n fullNode) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, n.Children) +} + +// Cache accessors to retrieve precalculated values (avoid lengthy type switches). +func (n fullNode) cache() (hashNode, bool) { return n.hash, n.dirty } +func (n shortNode) cache() (hashNode, bool) { return n.hash, n.dirty } +func (n hashNode) cache() (hashNode, bool) { return nil, true } +func (n valueNode) cache() (hashNode, bool) { return nil, true } + // Pretty printing. func (n fullNode) String() string { return n.fstring("") } func (n shortNode) String() string { return n.fstring("") } @@ -49,7 +67,7 @@ func (n valueNode) String() string { return n.fstring("") } func (n fullNode) fstring(ind string) string { resp := fmt.Sprintf("[\n%s ", ind) - for i, node := range n { + for i, node := range n.Children { if node == nil { resp += fmt.Sprintf("%s: ", indices[i]) } else { @@ -68,16 +86,16 @@ func (n valueNode) fstring(ind string) string { return fmt.Sprintf("%x ", []byte(n)) } -func mustDecodeNode(dbkey, buf []byte) node { - n, err := decodeNode(buf) +func mustDecodeNode(hash, buf []byte) node { + n, err := decodeNode(hash, buf) if err != nil { - panic(fmt.Sprintf("node %x: %v", dbkey, err)) + panic(fmt.Sprintf("node %x: %v", hash, err)) } return n } // decodeNode parses the RLP encoding of a trie node. -func decodeNode(buf []byte) (node, error) { +func decodeNode(hash, buf []byte) (node, error) { if len(buf) == 0 { return nil, io.ErrUnexpectedEOF } @@ -87,18 +105,18 @@ func decodeNode(buf []byte) (node, error) { } switch c, _ := rlp.CountValues(elems); c { case 2: - n, err := decodeShort(elems) + n, err := decodeShort(hash, buf, elems) return n, wrapError(err, "short") case 17: - n, err := decodeFull(elems) + n, err := decodeFull(hash, buf, elems) return n, wrapError(err, "full") default: return nil, fmt.Errorf("invalid number of list elements: %v", c) } } -func decodeShort(buf []byte) (node, error) { - kbuf, rest, err := rlp.SplitString(buf) +func decodeShort(hash, buf, elems []byte) (node, error) { + kbuf, rest, err := rlp.SplitString(elems) if err != nil { return nil, err } @@ -109,30 +127,30 @@ func decodeShort(buf []byte) (node, error) { if err != nil { return nil, fmt.Errorf("invalid value node: %v", err) } - return shortNode{key, valueNode(val)}, nil + return shortNode{key, valueNode(val), hash, false}, nil } r, _, err := decodeRef(rest) if err != nil { return nil, wrapError(err, "val") } - return shortNode{key, r}, nil + return shortNode{key, r, hash, false}, nil } -func decodeFull(buf []byte) (fullNode, error) { - var n fullNode +func decodeFull(hash, buf, elems []byte) (fullNode, error) { + n := fullNode{hash: hash} for i := 0; i < 16; i++ { - cld, rest, err := decodeRef(buf) + cld, rest, err := decodeRef(elems) if err != nil { return n, wrapError(err, fmt.Sprintf("[%d]", i)) } - n[i], buf = cld, rest + n.Children[i], elems = cld, rest } - val, _, err := rlp.SplitString(buf) + val, _, err := rlp.SplitString(elems) if err != nil { return n, err } if len(val) > 0 { - n[16] = valueNode(val) + n.Children[16] = valueNode(val) } return n, nil } @@ -152,7 +170,7 @@ func decodeRef(buf []byte) (node, []byte, error) { err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen) return nil, buf, err } - n, err := decodeNode(buf) + n, err := decodeNode(nil, buf) return n, rest, err case kind == rlp.String && len(val) == 0: // empty node diff --git a/trie/proof.go b/trie/proof.go index 37a70fb34d291..5135de0473159 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -54,7 +54,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue { } nodes = append(nodes, n) case fullNode: - tn = n[key[0]] + tn = n.Children[key[0]] key = key[1:] nodes = append(nodes, n) case hashNode: @@ -77,7 +77,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue { for i, n := range nodes { // Don't bother checking for errors here since hasher panics // if encoding doesn't work and we're not writing to any database. - n, _ = t.hasher.replaceChildren(n, nil) + n, _, _ = t.hasher.hashChildren(n, nil) hn, _ := t.hasher.store(n, nil, false) if _, ok := hn.(hashNode); ok || i == 0 { // If the node's database encoding is a hash (or is the @@ -103,7 +103,7 @@ func VerifyProof(rootHash common.Hash, key []byte, proof []rlp.RawValue) (value if !bytes.Equal(sha.Sum(nil), wantHash) { return nil, fmt.Errorf("bad proof node %d: hash mismatch", i) } - n, err := decodeNode(buf) + n, err := decodeNode(wantHash, buf) if err != nil { return nil, fmt.Errorf("bad proof node %d: %v", i, err) } @@ -139,7 +139,7 @@ func get(tn node, key []byte) ([]byte, node) { tn = n.Val key = key[len(n.Key):] case fullNode: - tn = n[key[0]] + tn = n.Children[key[0]] key = key[1:] case hashNode: return key, n diff --git a/trie/secure_trie.go b/trie/secure_trie.go index be7defe83bc9b..1d027c10274f8 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -162,11 +162,11 @@ func (t *SecureTrie) CommitTo(db DatabaseWriter) (root common.Hash, err error) { } t.secKeyCache = make(map[string][]byte) } - n, err := t.hashRoot(db) + n, clean, err := t.hashRoot(db) if err != nil { return (common.Hash{}), err } - t.root = n + t.root = clean return common.BytesToHash(n.(hashNode)), nil } diff --git a/trie/sync.go b/trie/sync.go index d55399d06b8e8..a35478f837d50 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -75,8 +75,9 @@ func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, c if root == emptyRoot { return } - blob, _ := s.database.Get(root.Bytes()) - if local, err := decodeNode(blob); local != nil && err == nil { + key := root.Bytes() + blob, _ := s.database.Get(key) + if local, err := decodeNode(key, blob); local != nil && err == nil { return } // Assemble the new sub-trie sync request @@ -152,7 +153,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) { continue } // Decode the node data content and update the request - node, err := decodeNode(item.Data) + node, err := decodeNode(item.Hash[:], item.Data) if err != nil { return i, err } @@ -213,9 +214,9 @@ func (s *TrieSync) children(req *request) ([]*request, error) { }} case fullNode: for i := 0; i < 17; i++ { - if node[i] != nil { + if node.Children[i] != nil { children = append(children, child{ - node: &node[i], + node: &node.Children[i], depth: req.depth + 1, }) } @@ -238,7 +239,7 @@ func (s *TrieSync) children(req *request) ([]*request, error) { if node, ok := (*child.node).(hashNode); ok { // Try to resolve the node from the local database blob, _ := s.database.Get(node) - if local, err := decodeNode(blob); local != nil && err == nil { + if local, err := decodeNode(node[:], blob); local != nil && err == nil { *child.node = local continue } diff --git a/trie/trie.go b/trie/trie.go index cc5dcf2a65856..a530e7b2a3bbb 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -129,7 +129,7 @@ func (t *Trie) TryGet(key []byte) ([]byte, error) { tn = n.Val pos += len(n.Key) case fullNode: - tn = n[key[pos]] + tn = n.Children[key[pos]] pos++ case nil: return nil, nil @@ -169,13 +169,13 @@ func (t *Trie) Update(key, value []byte) { func (t *Trie) TryUpdate(key, value []byte) error { k := compactHexDecode(key) if len(value) != 0 { - n, err := t.insert(t.root, nil, k, valueNode(value)) + _, n, err := t.insert(t.root, nil, k, valueNode(value)) if err != nil { return err } t.root = n } else { - n, err := t.delete(t.root, nil, k) + _, n, err := t.delete(t.root, nil, k) if err != nil { return err } @@ -184,9 +184,12 @@ func (t *Trie) TryUpdate(key, value []byte) error { return nil } -func (t *Trie) insert(n node, prefix, key []byte, value node) (node, error) { +func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) { if len(key) == 0 { - return value, nil + if v, ok := n.(valueNode); ok { + return !bytes.Equal(v, value.(valueNode)), value, nil + } + return true, value, nil } switch n := n.(type) { case shortNode: @@ -194,53 +197,63 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (node, error) { // If the whole key matches, keep this short node as is // and only update the value. if matchlen == len(n.Key) { - nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value) + dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value) if err != nil { - return nil, err + return false, nil, err + } + if !dirty { + return false, n, nil } - return shortNode{n.Key, nn}, nil + return true, shortNode{n.Key, nn, nil, true}, nil } // Otherwise branch out at the index where they differ. - var branch fullNode + branch := fullNode{dirty: true} var err error - branch[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val) + _, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val) if err != nil { - return nil, err + return false, nil, err } - branch[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value) + _, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value) if err != nil { - return nil, err + return false, nil, err } // Replace this shortNode with the branch if it occurs at index 0. if matchlen == 0 { - return branch, nil + return true, branch, nil } // Otherwise, replace it with a short node leading up to the branch. - return shortNode{key[:matchlen], branch}, nil + return true, shortNode{key[:matchlen], branch, nil, true}, nil case fullNode: - nn, err := t.insert(n[key[0]], append(prefix, key[0]), key[1:], value) + dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value) if err != nil { - return nil, err + return false, nil, err } - n[key[0]] = nn - return n, nil + if !dirty { + return false, n, nil + } + n.Children[key[0]], n.hash, n.dirty = nn, nil, true + return true, n, nil case nil: - return shortNode{key, value}, nil + return true, shortNode{key, value, nil, true}, nil case hashNode: // We've hit a part of the trie that isn't loaded yet. Load // the node and insert into it. This leaves all child nodes on // the path to the value in the trie. - // - // TODO: track whether insertion changed the value and keep - // n as a hash node if it didn't. rn, err := t.resolveHash(n, prefix, key) if err != nil { - return nil, err + return false, nil, err + } + dirty, nn, err := t.insert(rn, prefix, key, value) + if err != nil { + return false, nil, err } - return t.insert(rn, prefix, key, value) + if !dirty { + return false, rn, nil + } + return true, nn, nil default: panic(fmt.Sprintf("%T: invalid node: %v", n, n)) @@ -258,7 +271,7 @@ func (t *Trie) Delete(key []byte) { // If a node was not found in the database, a MissingNodeError is returned. func (t *Trie) TryDelete(key []byte) error { k := compactHexDecode(key) - n, err := t.delete(t.root, nil, k) + _, n, err := t.delete(t.root, nil, k) if err != nil { return err } @@ -269,23 +282,26 @@ func (t *Trie) TryDelete(key []byte) error { // delete returns the new root of the trie with key deleted. // It reduces the trie to minimal form by simplifying // nodes on the way up after deleting recursively. -func (t *Trie) delete(n node, prefix, key []byte) (node, error) { +func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { switch n := n.(type) { case shortNode: matchlen := prefixLen(key, n.Key) if matchlen < len(n.Key) { - return n, nil // don't replace n on mismatch + return false, n, nil // don't replace n on mismatch } if matchlen == len(key) { - return nil, nil // remove n entirely for whole matches + return true, nil, nil // remove n entirely for whole matches } // The key is longer than n.Key. Remove the remaining suffix // from the subtrie. Child can never be nil here since the // subtrie must contain at least two other values with keys // longer than n.Key. - child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):]) + dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):]) if err != nil { - return nil, err + return false, nil, err + } + if !dirty { + return false, n, nil } switch child := child.(type) { case shortNode: @@ -295,17 +311,21 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) { // always creates a new slice) instead of append to // avoid modifying n.Key since it might be shared with // other nodes. - return shortNode{concat(n.Key, child.Key...), child.Val}, nil + return true, shortNode{concat(n.Key, child.Key...), child.Val, nil, true}, nil default: - return shortNode{n.Key, child}, nil + return true, shortNode{n.Key, child, nil, true}, nil } case fullNode: - nn, err := t.delete(n[key[0]], append(prefix, key[0]), key[1:]) + dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:]) if err != nil { - return nil, err + return false, nil, err } - n[key[0]] = nn + if !dirty { + return false, n, nil + } + n.Children[key[0]], n.hash, n.dirty = nn, nil, true + // Check how many non-nil entries are left after deleting and // reduce the full node to a short node if only one entry is // left. Since n must've contained at least two children @@ -316,7 +336,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) { // value that is left in n or -2 if n contains at least two // values. pos := -1 - for i, cld := range n { + for i, cld := range n.Children { if cld != nil { if pos == -1 { pos = i @@ -334,37 +354,41 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) { // shortNode{..., shortNode{...}}. Since the entry // might not be loaded yet, resolve it just for this // check. - cnode, err := t.resolve(n[pos], prefix, []byte{byte(pos)}) + cnode, err := t.resolve(n.Children[pos], prefix, []byte{byte(pos)}) if err != nil { - return nil, err + return false, nil, err } if cnode, ok := cnode.(shortNode); ok { k := append([]byte{byte(pos)}, cnode.Key...) - return shortNode{k, cnode.Val}, nil + return true, shortNode{k, cnode.Val, nil, true}, nil } } // Otherwise, n is replaced by a one-nibble short node // containing the child. - return shortNode{[]byte{byte(pos)}, n[pos]}, nil + return true, shortNode{[]byte{byte(pos)}, n.Children[pos], nil, true}, nil } // n still contains at least two values and cannot be reduced. - return n, nil + return true, n, nil case nil: - return nil, nil + return false, nil, nil case hashNode: // We've hit a part of the trie that isn't loaded yet. Load // the node and delete from it. This leaves all child nodes on // the path to the value in the trie. - // - // TODO: track whether deletion actually hit a key and keep - // n as a hash node if it didn't. rn, err := t.resolveHash(n, prefix, key) if err != nil { - return nil, err + return false, nil, err } - return t.delete(rn, prefix, key) + dirty, nn, err := t.delete(rn, prefix, key) + if err != nil { + return false, nil, err + } + if !dirty { + return false, rn, nil + } + return true, nn, nil default: panic(fmt.Sprintf("%T: invalid node: %v (%v)", n, n, key)) @@ -413,8 +437,9 @@ func (t *Trie) Root() []byte { return t.Hash().Bytes() } // Hash returns the root hash of the trie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *Trie) Hash() common.Hash { - root, _ := t.hashRoot(nil) - return common.BytesToHash(root.(hashNode)) + hash, cached, _ := t.hashRoot(nil) + t.root = cached + return common.BytesToHash(hash.(hashNode)) } // Commit writes all nodes to the trie's database. @@ -437,17 +462,17 @@ func (t *Trie) Commit() (root common.Hash, err error) { // the changes made to db are written back to the trie's attached // database before using the trie. func (t *Trie) CommitTo(db DatabaseWriter) (root common.Hash, err error) { - n, err := t.hashRoot(db) + hash, cached, err := t.hashRoot(db) if err != nil { return (common.Hash{}), err } - t.root = n - return common.BytesToHash(n.(hashNode)), nil + t.root = cached + return common.BytesToHash(hash.(hashNode)), nil } -func (t *Trie) hashRoot(db DatabaseWriter) (node, error) { +func (t *Trie) hashRoot(db DatabaseWriter) (node, node, error) { if t.root == nil { - return hashNode(emptyRoot.Bytes()), nil + return hashNode(emptyRoot.Bytes()), nil, nil } if t.hasher == nil { t.hasher = newHasher() @@ -464,51 +489,87 @@ func newHasher() *hasher { return &hasher{tmp: new(bytes.Buffer), sha: sha3.NewKeccak256()} } -func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, error) { - hashed, err := h.replaceChildren(n, db) +// hash collapses a node down into a hash node, also returning a copy of the +// original node initialzied with the computed hash to replace the original one. +func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) { + // If we're not storing the node, just hashing, use avaialble cached data + if hash, dirty := n.cache(); hash != nil && (db == nil || !dirty) { + return hash, n, nil + } + // Trie not processed yet or needs storage, walk the children + collapsed, cached, err := h.hashChildren(n, db) if err != nil { - return hashNode{}, err + return hashNode{}, n, err } - if n, err = h.store(hashed, db, force); err != nil { - return hashNode{}, err + hashed, err := h.store(collapsed, db, force) + if err != nil { + return hashNode{}, n, err } - return n, nil + // Cache the hash and RLP blob of the ndoe for later reuse + if hash, ok := hashed.(hashNode); ok && !force { + switch cached := cached.(type) { + case shortNode: + cached.hash = hash + if db != nil { + cached.dirty = false + } + return hashed, cached, nil + case fullNode: + cached.hash = hash + if db != nil { + cached.dirty = false + } + return hashed, cached, nil + } + } + return hashed, cached, nil } -// hashChildren replaces child nodes of n with their hashes if the encoded -// size of the child is larger than a hash. -func (h *hasher) replaceChildren(n node, db DatabaseWriter) (node, error) { +// hashChildren replaces the children of a node with their hashes if the encoded +// size of the child is larger than a hash, returning the collapsed node as well +// as a replacement for the original node with the child hashes cached in. +func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, error) { var err error - switch n := n.(type) { + + switch n := original.(type) { case shortNode: + // Hash the short node's child, caching the newly hashed subtree + cached := n + cached.Key = common.CopyBytes(cached.Key) + n.Key = compactEncode(n.Key) if _, ok := n.Val.(valueNode); !ok { - if n.Val, err = h.hash(n.Val, db, false); err != nil { - return n, err + if n.Val, cached.Val, err = h.hash(n.Val, db, false); err != nil { + return n, original, err } } if n.Val == nil { - // Ensure that nil children are encoded as empty strings. - n.Val = valueNode(nil) + n.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings. } - return n, nil + return n, cached, nil + case fullNode: + // Hash the full node's children, caching the newly hashed subtrees + cached := fullNode{dirty: n.dirty} + for i := 0; i < 16; i++ { - if n[i] != nil { - if n[i], err = h.hash(n[i], db, false); err != nil { - return n, err + if n.Children[i] != nil { + if n.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false); err != nil { + return n, original, err } } else { - // Ensure that nil children are encoded as empty strings. - n[i] = valueNode(nil) + n.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings. } } - if n[16] == nil { - n[16] = valueNode(nil) + cached.Children[16] = n.Children[16] + if n.Children[16] == nil { + n.Children[16] = valueNode(nil) } - return n, nil + return n, cached, nil + default: - return n, nil + // Value and hash nodes don't have children so they're left as were + return n, original, nil } } @@ -517,21 +578,23 @@ func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) { if _, isHash := n.(hashNode); n == nil || isHash { return n, nil } + // Generate the RLP encoding of the node h.tmp.Reset() if err := rlp.Encode(h.tmp, n); err != nil { panic("encode error: " + err.Error()) } if h.tmp.Len() < 32 && !force { - // Nodes smaller than 32 bytes are stored inside their parent. - return n, nil + return n, nil // Nodes smaller than 32 bytes are stored inside their parent } // Larger nodes are replaced by their hash and stored in the database. - h.sha.Reset() - h.sha.Write(h.tmp.Bytes()) - key := hashNode(h.sha.Sum(nil)) + hash, _ := n.cache() + if hash == nil { + h.sha.Reset() + h.sha.Write(h.tmp.Bytes()) + hash = hashNode(h.sha.Sum(nil)) + } if db != nil { - err := db.Put(key, h.tmp.Bytes()) - return key, err + return hash, db.Put(hash, h.tmp.Bytes()) } - return key, nil + return hash, nil } diff --git a/trie/trie_test.go b/trie/trie_test.go index bb761b55519cf..121ba24c1e1d2 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -295,7 +295,7 @@ func TestReplication(t *testing.T) { for _, val := range vals2 { updateString(trie2, val.k, val.v) } - if trie2.Hash() != exp { + if hash := trie2.Hash(); hash != exp { t.Errorf("root failure. expected %x got %x", exp, hash) } } From c39de61a0a3f2e5e9427f842d48e8b325b2afec3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 6 May 2016 12:40:23 +0300 Subject: [PATCH 17/44] [release/1.4.6] cmd, console: split off the console into a reusable package (cherry picked from commit ffaf58f0a98bd987bbe76e8669bb22c405dcd62a) --- cmd/geth/accountcmd.go | 5 +- cmd/geth/chaincmd.go | 3 +- cmd/geth/consolecmd.go | 167 +++++++ cmd/geth/consolecmd_test.go | 152 +++++++ cmd/geth/js.go | 424 ----------------- cmd/geth/js_test.go | 500 --------------------- cmd/geth/main.go | 140 +----- cmd/geth/run_test.go | 9 +- cmd/geth/usage.go | 2 +- cmd/utils/flags.go | 19 +- cmd/utils/input.go | 98 ---- cmd/utils/jeth.go | 301 ------------- console/bridge.go | 317 +++++++++++++ console/console.go | 369 +++++++++++++++ console/console_test.go | 283 ++++++++++++ console/prompter.go | 156 +++++++ console/testdata/exec.js | 1 + console/testdata/preload.js | 1 + {jsre => internal/jsre}/bignumber_js.go | 0 {jsre => internal/jsre}/completion.go | 0 {jsre => internal/jsre}/completion_test.go | 3 +- {jsre => internal/jsre}/ethereum_js.go | 0 {jsre => internal/jsre}/jsre.go | 25 +- {jsre => internal/jsre}/jsre_test.go | 4 +- {jsre => internal/jsre}/pretty.go | 74 +-- rpc/json.go | 14 +- 26 files changed, 1548 insertions(+), 1519 deletions(-) create mode 100644 cmd/geth/consolecmd.go create mode 100644 cmd/geth/consolecmd_test.go delete mode 100644 cmd/geth/js.go delete mode 100644 cmd/geth/js_test.go delete mode 100644 cmd/utils/input.go delete mode 100644 cmd/utils/jeth.go create mode 100644 console/bridge.go create mode 100644 console/console.go create mode 100644 console/console_test.go create mode 100644 console/prompter.go create mode 100644 console/testdata/exec.js create mode 100644 console/testdata/preload.js rename {jsre => internal/jsre}/bignumber_js.go (100%) rename {jsre => internal/jsre}/completion.go (100%) rename {jsre => internal/jsre}/completion_test.go (98%) rename {jsre => internal/jsre}/ethereum_js.go (100%) rename {jsre => internal/jsre}/jsre.go (95%) rename {jsre => internal/jsre}/jsre_test.go (98%) rename {jsre => internal/jsre}/pretty.go (77%) diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go index bf754c72f172a..a9cee20eedc8d 100644 --- a/cmd/geth/accountcmd.go +++ b/cmd/geth/accountcmd.go @@ -23,6 +23,7 @@ import ( "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" @@ -215,12 +216,12 @@ func getPassPhrase(prompt string, confirmation bool, i int, passwords []string) if prompt != "" { fmt.Println(prompt) } - password, err := utils.Stdin.PasswordPrompt("Passphrase: ") + password, err := console.TerminalPrompter.PromptPassword("Passphrase: ") if err != nil { utils.Fatalf("Failed to read passphrase: %v", err) } if confirmation { - confirm, err := utils.Stdin.PasswordPrompt("Repeat passphrase: ") + confirm, err := console.TerminalPrompter.PromptPassword("Repeat passphrase: ") if err != nil { utils.Fatalf("Failed to read passphrase confirmation: %v", err) } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 32eacc99eea77..457dbcfff518f 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -26,6 +26,7 @@ import ( "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -116,7 +117,7 @@ func exportChain(ctx *cli.Context) { } func removeDB(ctx *cli.Context) { - confirm, err := utils.Stdin.ConfirmPrompt("Remove local database?") + confirm, err := console.TerminalPrompter.PromptConfirm("Remove local database?") if err != nil { utils.Fatalf("%v", err) } diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go new file mode 100644 index 0000000000000..8bfe27fef38f4 --- /dev/null +++ b/cmd/geth/consolecmd.go @@ -0,0 +1,167 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "os" + "os/signal" + + "github.com/codegangsta/cli" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/console" +) + +var ( + consoleCommand = cli.Command{ + Action: localConsole, + Name: "console", + Usage: `Geth Console: interactive JavaScript environment`, + Description: ` +The Geth console is an interactive shell for the JavaScript runtime environment +which exposes a node admin interface as well as the Ðapp JavaScript API. +See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console +`, + } + attachCommand = cli.Command{ + Action: remoteConsole, + Name: "attach", + Usage: `Geth Console: interactive JavaScript environment (connect to node)`, + Description: ` +The Geth console is an interactive shell for the JavaScript runtime environment +which exposes a node admin interface as well as the Ðapp JavaScript API. +See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console. +This command allows to open a console on a running geth node. + `, + } + javascriptCommand = cli.Command{ + Action: ephemeralConsole, + Name: "js", + Usage: `executes the given JavaScript files in the Geth JavaScript VM`, + Description: ` +The JavaScript VM exposes a node admin interface as well as the Ðapp +JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console +`, + } +) + +// localConsole starts a new geth node, attaching a JavaScript console to it at the +// same time. +func localConsole(ctx *cli.Context) { + // Create and start the node based on the CLI flags + node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx) + startNode(ctx, node) + defer node.Stop() + + // Attach to the newly started node and start the JavaScript console + client, err := node.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + config := console.Config{ + DataDir: node.DataDir(), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + // If only a short execution was requested, evaluate and return + if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { + console.Evaluate(script) + return + } + // Otherwise print the welcome screen and enter interactive mode + console.Welcome() + console.Interactive() +} + +// remoteConsole will connect to a remote geth instance, attaching a JavaScript +// console to it. +func remoteConsole(ctx *cli.Context) { + // Attach to a remotely running geth instance and start the JavaScript console + client, err := utils.NewRemoteRPCClient(ctx) + if err != nil { + utils.Fatalf("Unable to attach to remote geth: %v", err) + } + config := console.Config{ + DataDir: utils.MustMakeDataDir(ctx), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + // If only a short execution was requested, evaluate and return + if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { + console.Evaluate(script) + return + } + // Otherwise print the welcome screen and enter interactive mode + console.Welcome() + console.Interactive() +} + +// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript +// console to it, and each of the files specified as arguments and tears the +// everything down. +func ephemeralConsole(ctx *cli.Context) { + // Create and start the node based on the CLI flags + node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx) + startNode(ctx, node) + defer node.Stop() + + // Attach to the newly started node and start the JavaScript console + client, err := node.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + config := console.Config{ + DataDir: node.DataDir(), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + // Evaluate each of the specified JavaScript files + for _, file := range ctx.Args() { + if err = console.Execute(file); err != nil { + utils.Fatalf("Failed to execute %s: %v", file, err) + } + } + // Wait for pending callbacks, but stop for Ctrl-C. + abort := make(chan os.Signal, 1) + signal.Notify(abort, os.Interrupt) + + go func() { + <-abort + os.Exit(0) + }() + console.Stop(true) +} diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go new file mode 100644 index 0000000000000..9cfb3e4e35bc8 --- /dev/null +++ b/cmd/geth/consolecmd_test.go @@ -0,0 +1,152 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "math/rand" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/console" + "github.com/ethereum/go-ethereum/rpc" +) + +// Tests that a node embedded within a console can be started up properly and +// then terminated by closing the input stream. +func TestConsoleWelcome(t *testing.T) { + coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" + + // Start a geth console, make sure it's cleaned up and terminate the console + geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "-shh", "console") + defer geth.expectExit() + geth.stdin.Close() + + // Gather all the infos the welcome message needs to contain + geth.setTemplateFunc("goos", func() string { return runtime.GOOS }) + geth.setTemplateFunc("gover", runtime.Version) + geth.setTemplateFunc("gethver", func() string { return verString }) + geth.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) }) + geth.setTemplateFunc("apis", func() []string { + apis := append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi) + sort.Strings(apis) + return apis + }) + geth.setTemplateFunc("prompt", func() string { return console.DefaultPrompt }) + + // Verify the actual welcome message to the required template + geth.expect(` +Welcome to the Geth JavaScript console! + +instance: Geth/v{{gethver}}/{{goos}}/{{gover}} +coinbase: {{.Etherbase}} +at block: 0 ({{niltime}}) + datadir: {{.Datadir}} + modules:{{range apis}} {{.}}:1.0{{end}} + +{{prompt}} +`) +} + +// Tests that a console can be attached to a running node via various means. +func TestIPCAttachWelcome(t *testing.T) { + // Configure the instance for IPC attachement + coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" + + var ipc string + if runtime.GOOS == "windows" { + ipc = `\\.\pipe\geth` + strconv.Itoa(rand.Int()) + } else { + ws := tmpdir(t) + defer os.RemoveAll(ws) + + ipc = filepath.Join(ws, "geth.ipc") + } + // Run the parent geth and attach with a child console + geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "-shh", "--ipcpath", ipc) + defer geth.interrupt() + + time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open + testAttachWelcome(t, geth, "ipc:"+ipc) +} + +func TestHTTPAttachWelcome(t *testing.T) { + coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" + port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P + + geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "--rpc", "--rpcport", port) + defer geth.interrupt() + + time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open + testAttachWelcome(t, geth, "http://localhost:"+port) +} + +func TestWSAttachWelcome(t *testing.T) { + coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" + port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P + + geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "--ws", "--wsport", port) + defer geth.interrupt() + + time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open + testAttachWelcome(t, geth, "ws://localhost:"+port) +} + +func testAttachWelcome(t *testing.T, geth *testgeth, endpoint string) { + // Attach to a running geth note and terminate immediately + attach := runGeth(t, "attach", endpoint) + defer attach.expectExit() + attach.stdin.Close() + + // Gather all the infos the welcome message needs to contain + attach.setTemplateFunc("goos", func() string { return runtime.GOOS }) + attach.setTemplateFunc("gover", runtime.Version) + attach.setTemplateFunc("gethver", func() string { return verString }) + attach.setTemplateFunc("etherbase", func() string { return geth.Etherbase }) + attach.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) }) + attach.setTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") }) + attach.setTemplateFunc("datadir", func() string { return geth.Datadir }) + attach.setTemplateFunc("apis", func() []string { + var apis []string + if strings.HasPrefix(endpoint, "ipc") { + apis = append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi) + } else { + apis = append(strings.Split(rpc.DefaultHTTPApis, ","), rpc.MetadataApi) + } + sort.Strings(apis) + return apis + }) + attach.setTemplateFunc("prompt", func() string { return console.DefaultPrompt }) + + // Verify the actual welcome message to the required template + attach.expect(` +Welcome to the Geth JavaScript console! + +instance: Geth/v{{gethver}}/{{goos}}/{{gover}} +coinbase: {{etherbase}} +at block: 0 ({{niltime}}){{if ipc}} + datadir: {{datadir}}{{end}} + modules:{{range apis}} {{.}}:1.0{{end}} + +{{prompt}} +`) +} diff --git a/cmd/geth/js.go b/cmd/geth/js.go deleted file mode 100644 index 5f455d7a33d23..0000000000000 --- a/cmd/geth/js.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "math/big" - "os" - "os/signal" - "path/filepath" - "regexp" - "sort" - "strings" - - "github.com/codegangsta/cli" - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/registrar" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/internal/web3ext" - re "github.com/ethereum/go-ethereum/jsre" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/rpc" - "github.com/peterh/liner" - "github.com/robertkrimen/otto" -) - -var ( - passwordRegexp = regexp.MustCompile("personal.[nus]") - onlyws = regexp.MustCompile("^\\s*$") - exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$") -) - -type jsre struct { - re *re.JSRE - stack *node.Node - wait chan *big.Int - ps1 string - atexit func() - corsDomain string - client rpc.Client -} - -func makeCompleter(re *jsre) liner.WordCompleter { - return func(line string, pos int) (head string, completions []string, tail string) { - if len(line) == 0 || pos == 0 { - return "", nil, "" - } - // chuck data to relevant part for autocompletion, e.g. in case of nested lines eth.getBalance(eth.coinb - i := 0 - for i = pos - 1; i > 0; i-- { - if line[i] == '.' || (line[i] >= 'a' && line[i] <= 'z') || (line[i] >= 'A' && line[i] <= 'Z') { - continue - } - if i >= 3 && line[i] == '3' && line[i-3] == 'w' && line[i-2] == 'e' && line[i-1] == 'b' { - continue - } - i += 1 - break - } - return line[:i], re.re.CompleteKeywords(line[i:pos]), line[pos:] - } -} - -func newLightweightJSRE(docRoot string, client rpc.Client, datadir string, interactive bool) *jsre { - js := &jsre{ps1: "> "} - js.wait = make(chan *big.Int) - js.client = client - js.re = re.New(docRoot) - if err := js.apiBindings(); err != nil { - utils.Fatalf("Unable to initialize console - %v", err) - } - js.setupInput(datadir) - return js -} - -func newJSRE(stack *node.Node, docRoot, corsDomain string, client rpc.Client, interactive bool) *jsre { - js := &jsre{stack: stack, ps1: "> "} - // set default cors domain used by startRpc from CLI flag - js.corsDomain = corsDomain - js.wait = make(chan *big.Int) - js.client = client - js.re = re.New(docRoot) - if err := js.apiBindings(); err != nil { - utils.Fatalf("Unable to connect - %v", err) - } - js.setupInput(stack.DataDir()) - return js -} - -func (self *jsre) setupInput(datadir string) { - self.withHistory(datadir, func(hist *os.File) { utils.Stdin.ReadHistory(hist) }) - utils.Stdin.SetCtrlCAborts(true) - utils.Stdin.SetWordCompleter(makeCompleter(self)) - utils.Stdin.SetTabCompletionStyle(liner.TabPrints) - self.atexit = func() { - self.withHistory(datadir, func(hist *os.File) { - hist.Truncate(0) - utils.Stdin.WriteHistory(hist) - }) - utils.Stdin.Close() - close(self.wait) - } -} - -func (self *jsre) batch(statement string) { - err := self.re.EvalAndPrettyPrint(statement) - - if err != nil { - fmt.Printf("%v", jsErrorString(err)) - } - - if self.atexit != nil { - self.atexit() - } - - self.re.Stop(false) -} - -// show summary of current geth instance -func (self *jsre) welcome() { - self.re.Run(` - (function () { - console.log('instance: ' + web3.version.node); - console.log("coinbase: " + eth.coinbase); - var ts = 1000 * eth.getBlock(eth.blockNumber).timestamp; - console.log("at block: " + eth.blockNumber + " (" + new Date(ts) + ")"); - console.log(' datadir: ' + admin.datadir); - })(); - `) - if modules, err := self.supportedApis(); err == nil { - loadedModules := make([]string, 0) - for api, version := range modules { - loadedModules = append(loadedModules, fmt.Sprintf("%s:%s", api, version)) - } - sort.Strings(loadedModules) - } -} - -func (self *jsre) supportedApis() (map[string]string, error) { - return self.client.SupportedModules() -} - -func (js *jsre) apiBindings() error { - apis, err := js.supportedApis() - if err != nil { - return err - } - - apiNames := make([]string, 0, len(apis)) - for a, _ := range apis { - apiNames = append(apiNames, a) - } - - jeth := utils.NewJeth(js.re, js.client) - js.re.Set("jeth", struct{}{}) - t, _ := js.re.Get("jeth") - jethObj := t.Object() - - jethObj.Set("send", jeth.Send) - jethObj.Set("sendAsync", jeth.Send) - - err = js.re.Compile("bignumber.js", re.BigNumber_JS) - if err != nil { - utils.Fatalf("Error loading bignumber.js: %v", err) - } - - err = js.re.Compile("web3.js", re.Web3_JS) - if err != nil { - utils.Fatalf("Error loading web3.js: %v", err) - } - - _, err = js.re.Run("var Web3 = require('web3');") - if err != nil { - utils.Fatalf("Error requiring web3: %v", err) - } - - _, err = js.re.Run("var web3 = new Web3(jeth);") - if err != nil { - utils.Fatalf("Error setting web3 provider: %v", err) - } - - // load only supported API's in javascript runtime - shortcuts := "var eth = web3.eth; var personal = web3.personal; " - for _, apiName := range apiNames { - if apiName == "web3" { - continue // manually mapped or ignore - } - - if jsFile, ok := web3ext.Modules[apiName]; ok { - if err = js.re.Compile(fmt.Sprintf("%s.js", apiName), jsFile); err == nil { - shortcuts += fmt.Sprintf("var %s = web3.%s; ", apiName, apiName) - } else { - utils.Fatalf("Error loading %s.js: %v", apiName, err) - } - } - } - - _, err = js.re.Run(shortcuts) - if err != nil { - utils.Fatalf("Error setting namespaces: %v", err) - } - - js.re.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`) - - // overrule some of the methods that require password as input and ask for it interactively - p, err := js.re.Get("personal") - if err != nil { - fmt.Println("Unable to overrule sensitive methods in personal module") - return nil - } - - // Override the unlockAccount and newAccount methods on the personal object since these require user interaction. - // Assign the jeth.unlockAccount and jeth.newAccount in the jsre the original web3 callbacks. These will be called - // by the jeth.* methods after they got the password from the user and send the original web3 request to the backend. - if persObj := p.Object(); persObj != nil { // make sure the personal api is enabled over the interface - js.re.Run(`jeth.unlockAccount = personal.unlockAccount;`) - persObj.Set("unlockAccount", jeth.UnlockAccount) - js.re.Run(`jeth.newAccount = personal.newAccount;`) - persObj.Set("newAccount", jeth.NewAccount) - } - - // The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer. - // Bind these if the admin module is available. - if a, err := js.re.Get("admin"); err == nil { - if adminObj := a.Object(); adminObj != nil { - adminObj.Set("sleepBlocks", jeth.SleepBlocks) - adminObj.Set("sleep", jeth.Sleep) - } - } - - return nil -} - -func (self *jsre) AskPassword() (string, bool) { - pass, err := utils.Stdin.PasswordPrompt("Passphrase: ") - if err != nil { - return "", false - } - return pass, true -} - -func (self *jsre) ConfirmTransaction(tx string) bool { - // Retrieve the Ethereum instance from the node - var ethereum *eth.Ethereum - if err := self.stack.Service(ðereum); err != nil { - return false - } - // If natspec is enabled, ask for permission - if ethereum.NatSpec && false /* disabled for now */ { - // notice := natspec.GetNotice(self.xeth, tx, ethereum.HTTPClient()) - // fmt.Println(notice) - // answer, _ := self.Prompt("Confirm Transaction [y/n]") - // return strings.HasPrefix(strings.Trim(answer, " "), "y") - } - return true -} - -func (self *jsre) UnlockAccount(addr []byte) bool { - fmt.Printf("Please unlock account %x.\n", addr) - pass, err := utils.Stdin.PasswordPrompt("Passphrase: ") - if err != nil { - return false - } - // TODO: allow retry - var ethereum *eth.Ethereum - if err := self.stack.Service(ðereum); err != nil { - return false - } - a := accounts.Account{Address: common.BytesToAddress(addr)} - if err := ethereum.AccountManager().Unlock(a, pass); err != nil { - return false - } else { - fmt.Println("Account is now unlocked for this session.") - return true - } -} - -// preloadJSFiles loads JS files that the user has specified with ctx.PreLoadJSFlag into -// the JSRE. If not all files could be loaded it will return an error describing the error. -func (self *jsre) preloadJSFiles(ctx *cli.Context) error { - if ctx.GlobalString(utils.PreLoadJSFlag.Name) != "" { - assetPath := ctx.GlobalString(utils.JSpathFlag.Name) - jsFiles := strings.Split(ctx.GlobalString(utils.PreLoadJSFlag.Name), ",") - for _, file := range jsFiles { - filename := common.AbsolutePath(assetPath, strings.TrimSpace(file)) - if err := self.re.Exec(filename); err != nil { - return fmt.Errorf("%s: %v", file, jsErrorString(err)) - } - } - } - return nil -} - -// jsErrorString adds a backtrace to errors generated by otto. -func jsErrorString(err error) string { - if ottoErr, ok := err.(*otto.Error); ok { - return ottoErr.String() - } - return err.Error() -} - -func (self *jsre) interactive() { - // Read input lines. - prompt := make(chan string) - inputln := make(chan string) - go func() { - defer close(inputln) - for { - line, err := utils.Stdin.Prompt(<-prompt) - if err != nil { - if err == liner.ErrPromptAborted { // ctrl-C - self.resetPrompt() - inputln <- "" - continue - } - return - } - inputln <- line - } - }() - // Wait for Ctrl-C, too. - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt) - - defer func() { - if self.atexit != nil { - self.atexit() - } - self.re.Stop(false) - }() - for { - prompt <- self.ps1 - select { - case <-sig: - fmt.Println("caught interrupt, exiting") - return - case input, ok := <-inputln: - if !ok || indentCount <= 0 && exit.MatchString(input) { - return - } - if onlyws.MatchString(input) { - continue - } - str += input + "\n" - self.setIndent() - if indentCount <= 0 { - if !excludeFromHistory(str) { - utils.Stdin.AppendHistory(str[:len(str)-1]) - } - self.parseInput(str) - str = "" - } - } - } -} - -func excludeFromHistory(input string) bool { - return len(input) == 0 || input[0] == ' ' || passwordRegexp.MatchString(input) -} - -func (self *jsre) withHistory(datadir string, op func(*os.File)) { - hist, err := os.OpenFile(filepath.Join(datadir, "history"), os.O_RDWR|os.O_CREATE, os.ModePerm) - if err != nil { - fmt.Printf("unable to open history file: %v\n", err) - return - } - op(hist) - hist.Close() -} - -func (self *jsre) parseInput(code string) { - defer func() { - if r := recover(); r != nil { - fmt.Println("[native] error", r) - } - }() - if err := self.re.EvalAndPrettyPrint(code); err != nil { - if ottoErr, ok := err.(*otto.Error); ok { - fmt.Println(ottoErr.String()) - } else { - fmt.Println(err) - } - return - } -} - -var indentCount = 0 -var str = "" - -func (self *jsre) resetPrompt() { - indentCount = 0 - str = "" - self.ps1 = "> " -} - -func (self *jsre) setIndent() { - open := strings.Count(str, "{") - open += strings.Count(str, "(") - closed := strings.Count(str, "}") - closed += strings.Count(str, ")") - indentCount = open - closed - if indentCount <= 0 { - self.ps1 = "> " - } else { - self.ps1 = strings.Join(make([]string, indentCount*2), "..") - self.ps1 += " " - } -} diff --git a/cmd/geth/js_test.go b/cmd/geth/js_test.go deleted file mode 100644 index ddfe0d4000c2b..0000000000000 --- a/cmd/geth/js_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "io/ioutil" - "math/big" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "testing" - "time" - - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/compiler" - "github.com/ethereum/go-ethereum/common/httpclient" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/node" -) - -const ( - testSolcPath = "" - solcVersion = "0.9.23" - testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - testBalance = "10000000000000000000" - // of empty string - testHash = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" -) - -var ( - versionRE = regexp.MustCompile(strconv.Quote(`"compilerVersion":"` + solcVersion + `"`)) - testNodeKey, _ = crypto.HexToECDSA("4b50fa71f5c3eeb8fdc452224b2395af2fcc3d125e06c32c82e048c0559db03f") - testAccount, _ = crypto.HexToECDSA("e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674") - testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}` -) - -type testjethre struct { - *jsre - lastConfirm string - client *httpclient.HTTPClient -} - -// Temporary disabled while natspec hasn't been migrated -//func (self *testjethre) ConfirmTransaction(tx string) bool { -// var ethereum *eth.Ethereum -// self.stack.Service(ðereum) -// -// if ethereum.NatSpec { -// self.lastConfirm = natspec.GetNotice(self.xeth, tx, self.client) -// } -// return true -//} - -func testJEthRE(t *testing.T) (string, *testjethre, *node.Node) { - return testREPL(t, nil) -} - -func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *node.Node) { - tmp, err := ioutil.TempDir("", "geth-test") - if err != nil { - t.Fatal(err) - } - // Create a networkless protocol stack - stack, err := node.New(&node.Config{DataDir: tmp, PrivateKey: testNodeKey, Name: "test", NoDiscovery: true}) - if err != nil { - t.Fatalf("failed to create node: %v", err) - } - // Initialize and register the Ethereum protocol - accman := accounts.NewPlaintextManager(filepath.Join(tmp, "keystore")) - db, _ := ethdb.NewMemDatabase() - core.WriteGenesisBlockForTesting(db, core.GenesisAccount{ - Address: common.HexToAddress(testAddress), - Balance: common.String2Big(testBalance), - }) - ethConf := ð.Config{ - ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)}, - TestGenesisState: db, - AccountManager: accman, - DocRoot: "/", - SolcPath: testSolcPath, - PowTest: true, - } - if config != nil { - config(ethConf) - } - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return eth.New(ctx, ethConf) - }); err != nil { - t.Fatalf("failed to register ethereum protocol: %v", err) - } - // Initialize all the keys for testing - a, err := accman.ImportECDSA(testAccount, "") - if err != nil { - t.Fatal(err) - } - if err := accman.Unlock(a, ""); err != nil { - t.Fatal(err) - } - // Start the node and assemble the REPL tester - if err := stack.Start(); err != nil { - t.Fatalf("failed to start test stack: %v", err) - } - var ethereum *eth.Ethereum - stack.Service(ðereum) - - assetPath := filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "cmd", "mist", "assets", "ext") - client, err := stack.Attach() - if err != nil { - t.Fatalf("failed to attach to node: %v", err) - } - tf := &testjethre{client: ethereum.HTTPClient()} - repl := newJSRE(stack, assetPath, "", client, false) - tf.jsre = repl - return tmp, tf, stack -} - -func TestNodeInfo(t *testing.T) { - t.Skip("broken after p2p update") - tmp, repl, ethereum := testJEthRE(t) - defer ethereum.Stop() - defer os.RemoveAll(tmp) - - want := `{"DiscPort":0,"IP":"0.0.0.0","ListenAddr":"","Name":"test","NodeID":"4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5","NodeUrl":"enode://4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5@0.0.0.0:0","TCPPort":0,"Td":"131072"}` - checkEvalJSON(t, repl, `admin.nodeInfo`, want) -} - -func TestAccounts(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - checkEvalJSON(t, repl, `eth.accounts`, `["`+testAddress+`"]`) - checkEvalJSON(t, repl, `eth.coinbase`, `"`+testAddress+`"`) - val, err := repl.re.Run(`jeth.newAccount("password")`) - if err != nil { - t.Errorf("expected no error, got %v", err) - } - addr := val.String() - if !regexp.MustCompile(`0x[0-9a-f]{40}`).MatchString(addr) { - t.Errorf("address not hex: %q", addr) - } - - checkEvalJSON(t, repl, `eth.accounts`, `["`+testAddress+`","`+addr+`"]`) - -} - -func TestBlockChain(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - // get current block dump before export/import. - val, err := repl.re.Run("JSON.stringify(debug.dumpBlock(eth.blockNumber))") - if err != nil { - t.Errorf("expected no error, got %v", err) - } - beforeExport := val.String() - - // do the export - extmp, err := ioutil.TempDir("", "geth-test-export") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(extmp) - tmpfile := filepath.Join(extmp, "export.chain") - tmpfileq := strconv.Quote(tmpfile) - - var ethereum *eth.Ethereum - node.Service(ðereum) - ethereum.BlockChain().Reset() - - checkEvalJSON(t, repl, `admin.exportChain(`+tmpfileq+`)`, `true`) - if _, err := os.Stat(tmpfile); err != nil { - t.Fatal(err) - } - - // check import, verify that dumpBlock gives the same result. - checkEvalJSON(t, repl, `admin.importChain(`+tmpfileq+`)`, `true`) - checkEvalJSON(t, repl, `debug.dumpBlock(eth.blockNumber)`, beforeExport) -} - -func TestMining(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - checkEvalJSON(t, repl, `eth.mining`, `false`) -} - -func TestRPC(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - checkEvalJSON(t, repl, `admin.startRPC("127.0.0.1", 5004, "*", "web3,eth,net")`, `true`) -} - -func TestCheckTestAccountBalance(t *testing.T) { - t.Skip() // i don't think it tests the correct behaviour here. it's actually testing - // internals which shouldn't be tested. This now fails because of a change in the core - // and i have no means to fix this, sorry - @obscuren - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - repl.re.Run(`primary = "` + testAddress + `"`) - checkEvalJSON(t, repl, `eth.getBalance(primary)`, `"`+testBalance+`"`) -} - -func TestSignature(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - val, err := repl.re.Run(`eth.sign("` + testAddress + `", "` + testHash + `")`) - - // This is a very preliminary test, lacking actual signature verification - if err != nil { - t.Errorf("Error running js: %v", err) - return - } - output := val.String() - t.Logf("Output: %v", output) - - regex := regexp.MustCompile(`^0x[0-9a-f]{130}$`) - if !regex.MatchString(output) { - t.Errorf("Signature is not 65 bytes represented in hexadecimal.") - return - } -} - -func TestContract(t *testing.T) { - t.Skip("contract testing is implemented with mining in ethash test mode. This takes about 7seconds to run. Unskip and run on demand") - coinbase := common.HexToAddress(testAddress) - tmp, repl, ethereum := testREPL(t, func(conf *eth.Config) { - conf.Etherbase = coinbase - conf.PowTest = true - }) - if err := ethereum.Start(); err != nil { - t.Errorf("error starting ethereum: %v", err) - return - } - defer ethereum.Stop() - defer os.RemoveAll(tmp) - - // Temporary disabled while registrar isn't migrated - //reg := registrar.New(repl.xeth) - //_, err := reg.SetGlobalRegistrar("", coinbase) - //if err != nil { - // t.Errorf("error setting HashReg: %v", err) - //} - //_, err = reg.SetHashReg("", coinbase) - //if err != nil { - // t.Errorf("error setting HashReg: %v", err) - //} - //_, err = reg.SetUrlHint("", coinbase) - //if err != nil { - // t.Errorf("error setting HashReg: %v", err) - //} - /* TODO: - * lookup receipt and contract addresses by tx hash - * name registration for HashReg and UrlHint addresses - * mine those transactions - * then set once more SetHashReg SetUrlHint - */ - - source := `contract test {\n` + - " /// @notice Will multiply `a` by 7." + `\n` + - ` function multiply(uint a) returns(uint d) {\n` + - ` return a * 7;\n` + - ` }\n` + - `}\n` - - if checkEvalJSON(t, repl, `admin.stopNatSpec()`, `true`) != nil { - return - } - - contractInfo, err := ioutil.ReadFile("info_test.json") - if err != nil { - t.Fatalf("%v", err) - } - if checkEvalJSON(t, repl, `primary = eth.accounts[0]`, `"`+testAddress+`"`) != nil { - return - } - if checkEvalJSON(t, repl, `source = "`+source+`"`, `"`+source+`"`) != nil { - return - } - - // if solc is found with right version, test it, otherwise read from file - sol, err := compiler.New("") - if err != nil { - t.Logf("solc not found: mocking contract compilation step") - } else if sol.Version() != solcVersion { - t.Logf("WARNING: solc different version found (%v, test written for %v, may need to update)", sol.Version(), solcVersion) - } - - if err != nil { - info, err := ioutil.ReadFile("info_test.json") - if err != nil { - t.Fatalf("%v", err) - } - _, err = repl.re.Run(`contract = JSON.parse(` + strconv.Quote(string(info)) + `)`) - if err != nil { - t.Errorf("%v", err) - } - } else { - if checkEvalJSON(t, repl, `contract = eth.compile.solidity(source).test`, string(contractInfo)) != nil { - return - } - } - - if checkEvalJSON(t, repl, `contract.code`, `"0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"`) != nil { - return - } - - if checkEvalJSON( - t, repl, - `contractaddress = eth.sendTransaction({from: primary, data: contract.code})`, - `"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74"`, - ) != nil { - return - } - - if !processTxs(repl, t, 8) { - return - } - - callSetup := `abiDef = JSON.parse('[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}]'); -Multiply7 = eth.contract(abiDef); -multiply7 = Multiply7.at(contractaddress); -` - _, err = repl.re.Run(callSetup) - if err != nil { - t.Errorf("unexpected error setting up contract, got %v", err) - return - } - - expNotice := "" - if repl.lastConfirm != expNotice { - t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm) - return - } - - if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil { - return - } - if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x4ef9088431a8033e4580d00e4eb2487275e031ff4163c7529df0ef45af17857b"`) != nil { - return - } - - if !processTxs(repl, t, 1) { - return - } - - expNotice = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x87e2802265838c7f14bb69eecd2112911af6767907a702eeaa445239fb20711b'): {"params":[{"to":"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74","data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}]}` - if repl.lastConfirm != expNotice { - t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm) - return - } - - var contentHash = `"0x86d2b7cf1e72e9a7a3f8d96601f0151742a2f780f1526414304fbe413dc7f9bd"` - if sol != nil && solcVersion != sol.Version() { - modContractInfo := versionRE.ReplaceAll(contractInfo, []byte(`"compilerVersion":"`+sol.Version()+`"`)) - fmt.Printf("modified contractinfo:\n%s\n", modContractInfo) - contentHash = `"` + common.ToHex(crypto.Keccak256([]byte(modContractInfo))) + `"` - } - if checkEvalJSON(t, repl, `filename = "/tmp/info.json"`, `"/tmp/info.json"`) != nil { - return - } - if checkEvalJSON(t, repl, `contentHash = admin.saveInfo(contract.info, filename)`, contentHash) != nil { - return - } - if checkEvalJSON(t, repl, `admin.register(primary, contractaddress, contentHash)`, `true`) != nil { - return - } - if checkEvalJSON(t, repl, `admin.registerUrl(primary, contentHash, "file://"+filename)`, `true`) != nil { - return - } - - if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil { - return - } - - if !processTxs(repl, t, 3) { - return - } - - if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x66d7635c12ad0b231e66da2f987ca3dfdca58ffe49c6442aa55960858103fd0c"`) != nil { - return - } - - if !processTxs(repl, t, 1) { - return - } - - expNotice = "Will multiply 6 by 7." - if repl.lastConfirm != expNotice { - t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm) - return - } -} - -func pendingTransactions(repl *testjethre, t *testing.T) (txc int64, err error) { - var ethereum *eth.Ethereum - repl.stack.Service(ðereum) - - txs := ethereum.TxPool().GetTransactions() - return int64(len(txs)), nil -} - -func processTxs(repl *testjethre, t *testing.T, expTxc int) bool { - var txc int64 - var err error - for i := 0; i < 50; i++ { - txc, err = pendingTransactions(repl, t) - if err != nil { - t.Errorf("unexpected error checking pending transactions: %v", err) - return false - } - if expTxc < int(txc) { - t.Errorf("too many pending transactions: expected %v, got %v", expTxc, txc) - return false - } else if expTxc == int(txc) { - break - } - time.Sleep(100 * time.Millisecond) - } - if int(txc) != expTxc { - t.Errorf("incorrect number of pending transactions, expected %v, got %v", expTxc, txc) - return false - } - var ethereum *eth.Ethereum - repl.stack.Service(ðereum) - - err = ethereum.StartMining(runtime.NumCPU(), "") - if err != nil { - t.Errorf("unexpected error mining: %v", err) - return false - } - defer ethereum.StopMining() - - timer := time.NewTimer(100 * time.Second) - blockNr := ethereum.BlockChain().CurrentBlock().Number() - height := new(big.Int).Add(blockNr, big.NewInt(1)) - repl.wait <- height - select { - case <-timer.C: - // if times out make sure the xeth loop does not block - go func() { - select { - case repl.wait <- nil: - case <-repl.wait: - } - }() - case <-repl.wait: - } - txc, err = pendingTransactions(repl, t) - if err != nil { - t.Errorf("unexpected error checking pending transactions: %v", err) - return false - } - if txc != 0 { - t.Errorf("%d trasactions were not mined", txc) - return false - } - return true -} - -func checkEvalJSON(t *testing.T, re *testjethre, expr, want string) error { - val, err := re.re.Run("JSON.stringify(" + expr + ")") - if err == nil && val.String() != want { - err = fmt.Errorf("Output mismatch for `%s`:\ngot: %s\nwant: %s", expr, val.String(), want) - } - if err != nil { - _, file, line, _ := runtime.Caller(1) - file = filepath.Base(file) - fmt.Printf("\t%s:%d: %v\n", file, line, err) - t.Fail() - } - return err -} diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 1047a2bbd0908..3654788ce9d28 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -22,7 +22,6 @@ import ( "fmt" "io/ioutil" "os" - "os/signal" "path/filepath" "runtime" "strconv" @@ -33,6 +32,7 @@ import ( "github.com/ethereum/ethash" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/ethdb" @@ -95,6 +95,9 @@ func init() { monitorCommand, accountCommand, walletCommand, + consoleCommand, + attachCommand, + javascriptCommand, { Action: makedag, Name: "makedag", @@ -138,36 +141,6 @@ The output of this command is supposed to be machine-readable. The init command initialises a new genesis block and definition for the network. This is a destructive action and changes the network in which you will be participating. -`, - }, - { - Action: console, - Name: "console", - Usage: `Geth Console: interactive JavaScript environment`, - Description: ` -The Geth console is an interactive shell for the JavaScript runtime environment -which exposes a node admin interface as well as the Ðapp JavaScript API. -See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console -`, - }, - { - Action: attach, - Name: "attach", - Usage: `Geth Console: interactive JavaScript environment (connect to node)`, - Description: ` - The Geth console is an interactive shell for the JavaScript runtime environment - which exposes a node admin interface as well as the Ðapp JavaScript API. - See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console. - This command allows to open a console on a running geth node. - `, - }, - { - Action: execScripts, - Name: "js", - Usage: `executes the given JavaScript files in the Geth JavaScript VM`, - Description: ` -The JavaScript VM exposes a node admin interface as well as the Ðapp -JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console `, }, } @@ -214,7 +187,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso utils.IPCApiFlag, utils.IPCPathFlag, utils.ExecFlag, - utils.PreLoadJSFlag, + utils.PreloadJSFlag, utils.WhisperEnabledFlag, utils.DevModeFlag, utils.TestNetFlag, @@ -263,7 +236,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso app.After = func(ctx *cli.Context) error { logger.Flush() debug.Exit() - utils.Stdin.Close() // Resets terminal mode. + console.TerminalPrompter.Close() // Resets terminal mode. return nil } } @@ -304,36 +277,6 @@ func geth(ctx *cli.Context) { node.Wait() } -// attach will connect to a running geth instance attaching a JavaScript console and to it. -func attach(ctx *cli.Context) { - // attach to a running geth instance - client, err := utils.NewRemoteRPCClient(ctx) - if err != nil { - utils.Fatalf("Unable to attach to geth: %v", err) - } - - repl := newLightweightJSRE( - ctx.GlobalString(utils.JSpathFlag.Name), - client, - ctx.GlobalString(utils.DataDirFlag.Name), - true, - ) - - // preload user defined JS files into the console - err = repl.preloadJSFiles(ctx) - if err != nil { - utils.Fatalf("unable to preload JS file %v", err) - } - - // in case the exec flag holds a JS statement execute it and return - if ctx.GlobalString(utils.ExecFlag.Name) != "" { - repl.batch(ctx.GlobalString(utils.ExecFlag.Name)) - } else { - repl.welcome() - repl.interactive() - } -} - // initGenesis will initialise the given JSON format genesis file and writes it as // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. func initGenesis(ctx *cli.Context) { @@ -359,77 +302,6 @@ func initGenesis(ctx *cli.Context) { glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash()) } -// console starts a new geth node, attaching a JavaScript console to it at the -// same time. -func console(ctx *cli.Context) { - // Create and start the node based on the CLI flags - node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx) - startNode(ctx, node) - - // Attach to the newly started node, and either execute script or become interactive - client, err := node.Attach() - if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) - } - repl := newJSRE(node, - ctx.GlobalString(utils.JSpathFlag.Name), - ctx.GlobalString(utils.RPCCORSDomainFlag.Name), - client, true) - - // preload user defined JS files into the console - err = repl.preloadJSFiles(ctx) - if err != nil { - utils.Fatalf("%v", err) - } - - // in case the exec flag holds a JS statement execute it and return - if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { - repl.batch(script) - } else { - repl.welcome() - repl.interactive() - } - node.Stop() -} - -// execScripts starts a new geth node based on the CLI flags, and executes each -// of the JavaScript files specified as command arguments. -func execScripts(ctx *cli.Context) { - // Create and start the node based on the CLI flags - node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx) - startNode(ctx, node) - defer node.Stop() - - // Attach to the newly started node and execute the given scripts - client, err := node.Attach() - if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) - } - repl := newJSRE(node, - ctx.GlobalString(utils.JSpathFlag.Name), - ctx.GlobalString(utils.RPCCORSDomainFlag.Name), - client, false) - - // Run all given files. - for _, file := range ctx.Args() { - if err = repl.re.Exec(file); err != nil { - break - } - } - if err != nil { - utils.Fatalf("JavaScript Error: %v", jsErrorString(err)) - } - // JS files loaded successfully. - // Wait for pending callbacks, but stop for Ctrl-C. - abort := make(chan os.Signal, 1) - signal.Notify(abort, os.Interrupt) - go func() { - <-abort - repl.re.Stop(false) - }() - repl.re.Stop(true) -} - // startNode boots up the system node and all registered protocols, after which // it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // miner. diff --git a/cmd/geth/run_test.go b/cmd/geth/run_test.go index ba4ce0c60f24a..f6bc3f869ddeb 100644 --- a/cmd/geth/run_test.go +++ b/cmd/geth/run_test.go @@ -45,6 +45,7 @@ type testgeth struct { // template variables for expect Datadir string Executable string + Etherbase string Func template.FuncMap removeDatadir bool @@ -67,11 +68,15 @@ func init() { func runGeth(t *testing.T, args ...string) *testgeth { tt := &testgeth{T: t, Executable: os.Args[0]} for i, arg := range args { - if arg == "-datadir" || arg == "--datadir" { + switch { + case arg == "-datadir" || arg == "--datadir": if i < len(args)-1 { tt.Datadir = args[i+1] } - break + case arg == "-etherbase" || arg == "--etherbase": + if i < len(args)-1 { + tt.Etherbase = args[i+1] + } } } if tt.Datadir == "" { diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 90019d7b976a8..01a71c1f65a0b 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -101,7 +101,7 @@ var AppHelpFlagGroups = []flagGroup{ utils.RPCCORSDomainFlag, utils.JSpathFlag, utils.ExecFlag, - utils.PreLoadJSFlag, + utils.PreloadJSFlag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 43dbc37f749b3..c476e1c779fe5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -302,7 +302,7 @@ var ( Name: "exec", Usage: "Execute JavaScript statement (only in combination with console/attach)", } - PreLoadJSFlag = cli.StringFlag{ + PreloadJSFlag = cli.StringFlag{ Name: "preload", Usage: "Comma separated list of JavaScript files to preload into the console", } @@ -864,3 +864,20 @@ func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database } return chain, chainDb } + +// MakeConsolePreloads retrieves the absolute paths for the console JavaScript +// scripts to preload before starting. +func MakeConsolePreloads(ctx *cli.Context) []string { + // Skip preloading if there's nothing to preload + if ctx.GlobalString(PreloadJSFlag.Name) == "" { + return nil + } + // Otherwise resolve absolute paths and return them + preloads := []string{} + + assets := ctx.GlobalString(JSpathFlag.Name) + for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") { + preloads = append(preloads, common.AbsolutePath(assets, strings.TrimSpace(file))) + } + return preloads +} diff --git a/cmd/utils/input.go b/cmd/utils/input.go deleted file mode 100644 index 523d5a58706e3..0000000000000 --- a/cmd/utils/input.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package utils - -import ( - "fmt" - "strings" - - "github.com/peterh/liner" -) - -// Holds the stdin line reader. -// Only this reader may be used for input because it keeps -// an internal buffer. -var Stdin = newUserInputReader() - -type userInputReader struct { - *liner.State - warned bool - supported bool - normalMode liner.ModeApplier - rawMode liner.ModeApplier -} - -func newUserInputReader() *userInputReader { - r := new(userInputReader) - // Get the original mode before calling NewLiner. - // This is usually regular "cooked" mode where characters echo. - normalMode, _ := liner.TerminalMode() - // Turn on liner. It switches to raw mode. - r.State = liner.NewLiner() - rawMode, err := liner.TerminalMode() - if err != nil || !liner.TerminalSupported() { - r.supported = false - } else { - r.supported = true - r.normalMode = normalMode - r.rawMode = rawMode - // Switch back to normal mode while we're not prompting. - normalMode.ApplyMode() - } - return r -} - -func (r *userInputReader) Prompt(prompt string) (string, error) { - if r.supported { - r.rawMode.ApplyMode() - defer r.normalMode.ApplyMode() - } else { - // liner tries to be smart about printing the prompt - // and doesn't print anything if input is redirected. - // Un-smart it by printing the prompt always. - fmt.Print(prompt) - prompt = "" - defer fmt.Println() - } - return r.State.Prompt(prompt) -} - -func (r *userInputReader) PasswordPrompt(prompt string) (passwd string, err error) { - if r.supported { - r.rawMode.ApplyMode() - defer r.normalMode.ApplyMode() - return r.State.PasswordPrompt(prompt) - } - if !r.warned { - fmt.Println("!! Unsupported terminal, password will be echoed.") - r.warned = true - } - // Just as in Prompt, handle printing the prompt here instead of relying on liner. - fmt.Print(prompt) - passwd, err = r.State.Prompt("") - fmt.Println() - return passwd, err -} - -func (r *userInputReader) ConfirmPrompt(prompt string) (bool, error) { - prompt = prompt + " [y/N] " - input, err := r.Prompt(prompt) - if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" { - return true, nil - } - return false, err -} diff --git a/cmd/utils/jeth.go b/cmd/utils/jeth.go deleted file mode 100644 index 9410180b01be6..0000000000000 --- a/cmd/utils/jeth.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package utils - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/ethereum/go-ethereum/jsre" - "github.com/ethereum/go-ethereum/rpc" - - "github.com/robertkrimen/otto" -) - -type Jeth struct { - re *jsre.JSRE - client rpc.Client -} - -// NewJeth create a new backend for the JSRE console -func NewJeth(re *jsre.JSRE, client rpc.Client) *Jeth { - return &Jeth{re, client} -} - -// err returns an error object for the given error code and message. -func (self *Jeth) err(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) { - m := rpc.JSONErrResponse{ - Version: "2.0", - Id: id, - Error: rpc.JSONError{ - Code: code, - Message: msg, - }, - } - - errObj, _ := json.Marshal(m.Error) - errRes, _ := json.Marshal(m) - - call.Otto.Run("ret_error = " + string(errObj)) - res, _ := call.Otto.Run("ret_response = " + string(errRes)) - - return res -} - -// UnlockAccount asks the user for the password and than executes the jeth.UnlockAccount callback in the jsre. -// It will need the public address for the account to unlock as first argument. -// The second argument is an optional string with the password. If not given the user is prompted for the password. -// The third argument is an optional integer which specifies for how long the account will be unlocked (in seconds). -func (self *Jeth) UnlockAccount(call otto.FunctionCall) (response otto.Value) { - var account, passwd otto.Value - duration := otto.NullValue() - - if !call.Argument(0).IsString() { - fmt.Println("first argument must be the account to unlock") - return otto.FalseValue() - } - - account = call.Argument(0) - - // if password is not given or as null value -> ask user for password - if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() { - fmt.Printf("Unlock account %s\n", account) - if input, err := Stdin.PasswordPrompt("Passphrase: "); err != nil { - throwJSExeception(err.Error()) - } else { - passwd, _ = otto.ToValue(input) - } - } else { - if !call.Argument(1).IsString() { - throwJSExeception("password must be a string") - } - passwd = call.Argument(1) - } - - // third argument is the duration how long the account must be unlocked. - // verify that its a number. - if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() { - if !call.Argument(2).IsNumber() { - throwJSExeception("unlock duration must be a number") - } - duration = call.Argument(2) - } - - // jeth.unlockAccount will send the request to the backend. - if val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration); err == nil { - return val - } else { - throwJSExeception(err.Error()) - } - - return otto.FalseValue() -} - -// NewAccount asks the user for the password and than executes the jeth.newAccount callback in the jsre -func (self *Jeth) NewAccount(call otto.FunctionCall) (response otto.Value) { - var passwd string - if len(call.ArgumentList) == 0 { - var err error - passwd, err = Stdin.PasswordPrompt("Passphrase: ") - if err != nil { - return otto.FalseValue() - } - passwd2, err := Stdin.PasswordPrompt("Repeat passphrase: ") - if err != nil { - return otto.FalseValue() - } - - if passwd != passwd2 { - fmt.Println("Passphrases don't match") - return otto.FalseValue() - } - } else if len(call.ArgumentList) == 1 && call.Argument(0).IsString() { - passwd, _ = call.Argument(0).ToString() - } else { - fmt.Println("expected 0 or 1 string argument") - return otto.FalseValue() - } - - ret, err := call.Otto.Call("jeth.newAccount", nil, passwd) - if err == nil { - return ret - } - fmt.Println(err) - return otto.FalseValue() -} - -// Send will serialize the first argument, send it to the node and returns the response. -func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) { - // verify we got a batch request (array) or a single request (object) - ro := call.Argument(0).Object() - if ro == nil || (ro.Class() != "Array" && ro.Class() != "Object") { - throwJSExeception("Internal Error: request must be an object or array") - } - - // convert otto vm arguments to go values by JSON serialising and parsing. - data, err := call.Otto.Call("JSON.stringify", nil, ro) - if err != nil { - throwJSExeception(err.Error()) - } - - jsonreq, _ := data.ToString() - - // parse arguments to JSON rpc requests, either to an array (batch) or to a single request. - var reqs []rpc.JSONRequest - batch := true - if err = json.Unmarshal([]byte(jsonreq), &reqs); err != nil { - // single request? - reqs = make([]rpc.JSONRequest, 1) - if err = json.Unmarshal([]byte(jsonreq), &reqs[0]); err != nil { - throwJSExeception("invalid request") - } - batch = false - } - - call.Otto.Set("response_len", len(reqs)) - call.Otto.Run("var ret_response = new Array(response_len);") - - for i, req := range reqs { - if err := self.client.Send(&req); err != nil { - return self.err(call, -32603, err.Error(), req.Id) - } - - result := make(map[string]interface{}) - if err = self.client.Recv(&result); err != nil { - return self.err(call, -32603, err.Error(), req.Id) - } - - id, _ := result["id"] - jsonver, _ := result["jsonrpc"] - - call.Otto.Set("ret_id", id) - call.Otto.Set("ret_jsonrpc", jsonver) - call.Otto.Set("response_idx", i) - - // call was successful - if res, ok := result["result"]; ok { - payload, _ := json.Marshal(res) - call.Otto.Set("ret_result", string(payload)) - response, err = call.Otto.Run(` - ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; - `) - continue - } - - // request returned an error - if res, ok := result["error"]; ok { - payload, _ := json.Marshal(res) - call.Otto.Set("ret_result", string(payload)) - response, err = call.Otto.Run(` - ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, error: JSON.parse(ret_result) }; - `) - continue - } - - return self.err(call, -32603, fmt.Sprintf("Invalid response"), new(int64)) - } - - if !batch { - call.Otto.Run("ret_response = ret_response[0];") - } - - // if a callback was given execute it. - if call.Argument(1).IsObject() { - call.Otto.Set("callback", call.Argument(1)) - call.Otto.Run(` - if (Object.prototype.toString.call(callback) == '[object Function]') { - callback(null, ret_response); - } - `) - } - - return -} - -// throwJSExeception panics on an otto value, the Otto VM will then throw msg as a javascript error. -func throwJSExeception(msg interface{}) otto.Value { - p, _ := otto.ToValue(msg) - panic(p) -} - -// Sleep will halt the console for arg[0] seconds. -func (self *Jeth) Sleep(call otto.FunctionCall) (response otto.Value) { - if len(call.ArgumentList) >= 1 { - if call.Argument(0).IsNumber() { - sleep, _ := call.Argument(0).ToInteger() - time.Sleep(time.Duration(sleep) * time.Second) - return otto.TrueValue() - } - } - return throwJSExeception("usage: sleep()") -} - -// SleepBlocks will wait for a specified number of new blocks or max for a -// given of seconds. sleepBlocks(nBlocks[, maxSleep]). -func (self *Jeth) SleepBlocks(call otto.FunctionCall) (response otto.Value) { - nBlocks := int64(0) - maxSleep := int64(9999999999999999) // indefinitely - - nArgs := len(call.ArgumentList) - - if nArgs == 0 { - throwJSExeception("usage: sleepBlocks([, max sleep in seconds])") - } - - if nArgs >= 1 { - if call.Argument(0).IsNumber() { - nBlocks, _ = call.Argument(0).ToInteger() - } else { - throwJSExeception("expected number as first argument") - } - } - - if nArgs >= 2 { - if call.Argument(1).IsNumber() { - maxSleep, _ = call.Argument(1).ToInteger() - } else { - throwJSExeception("expected number as second argument") - } - } - - // go through the console, this will allow web3 to call the appropriate - // callbacks if a delayed response or notification is received. - currentBlockNr := func() int64 { - result, err := call.Otto.Run("eth.blockNumber") - if err != nil { - throwJSExeception(err.Error()) - } - blockNr, err := result.ToInteger() - if err != nil { - throwJSExeception(err.Error()) - } - return blockNr - } - - targetBlockNr := currentBlockNr() + nBlocks - deadline := time.Now().Add(time.Duration(maxSleep) * time.Second) - - for time.Now().Before(deadline) { - if currentBlockNr() >= targetBlockNr { - return otto.TrueValue() - } - time.Sleep(time.Second) - } - - return otto.FalseValue() -} diff --git a/console/bridge.go b/console/bridge.go new file mode 100644 index 0000000000000..b23e06837d75f --- /dev/null +++ b/console/bridge.go @@ -0,0 +1,317 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package console + +import ( + "encoding/json" + "fmt" + "io" + "time" + + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rpc" + "github.com/robertkrimen/otto" +) + +// bridge is a collection of JavaScript utility methods to bride the .js runtime +// environment and the Go RPC connection backing the remote method calls. +type bridge struct { + client rpc.Client // RPC client to execute Ethereum requests through + prompter UserPrompter // Input prompter to allow interactive user feedback + printer io.Writer // Output writer to serialize any display strings to +} + +// newBridge creates a new JavaScript wrapper around an RPC client. +func newBridge(client rpc.Client, prompter UserPrompter, printer io.Writer) *bridge { + return &bridge{ + client: client, + prompter: prompter, + printer: printer, + } +} + +// NewAccount is a wrapper around the personal.newAccount RPC method that uses a +// non-echoing password prompt to aquire the passphrase and executes the original +// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call. +func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) { + var ( + password string + confirm string + err error + ) + switch { + // No password was specified, prompt the user for it + case len(call.ArgumentList) == 0: + if password, err = b.prompter.PromptPassword("Passphrase: "); err != nil { + throwJSException(err.Error()) + } + if confirm, err = b.prompter.PromptPassword("Repeat passphrase: "); err != nil { + throwJSException(err.Error()) + } + if password != confirm { + throwJSException("passphrases don't match!") + } + + // A single string password was specified, use that + case len(call.ArgumentList) == 1 && call.Argument(0).IsString(): + password, _ = call.Argument(0).ToString() + + // Otherwise fail with some error + default: + throwJSException("expected 0 or 1 string argument") + } + // Password aquired, execute the call and return + ret, err := call.Otto.Call("jeth.newAccount", nil, password) + if err != nil { + throwJSException(err.Error()) + } + return ret +} + +// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that +// uses a non-echoing password prompt to aquire the passphrase and executes the +// original RPC method (saved in jeth.unlockAccount) with it to actually execute +// the RPC call. +func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) { + // Make sure we have an account specified to unlock + if !call.Argument(0).IsString() { + throwJSException("first argument must be the account to unlock") + } + account := call.Argument(0) + + // If password is not given or is the null value, prompt the user for it + var passwd otto.Value + + if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() { + fmt.Fprintf(b.printer, "Unlock account %s\n", account) + if input, err := b.prompter.PromptPassword("Passphrase: "); err != nil { + throwJSException(err.Error()) + } else { + passwd, _ = otto.ToValue(input) + } + } else { + if !call.Argument(1).IsString() { + throwJSException("password must be a string") + } + passwd = call.Argument(1) + } + // Third argument is the duration how long the account must be unlocked. + duration := otto.NullValue() + if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() { + if !call.Argument(2).IsNumber() { + throwJSException("unlock duration must be a number") + } + duration = call.Argument(2) + } + // Send the request to the backend and return + val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration) + if err != nil { + throwJSException(err.Error()) + } + return val +} + +// Sleep will block the console for the specified number of seconds. +func (b *bridge) Sleep(call otto.FunctionCall) (response otto.Value) { + if call.Argument(0).IsNumber() { + sleep, _ := call.Argument(0).ToInteger() + time.Sleep(time.Duration(sleep) * time.Second) + return otto.TrueValue() + } + return throwJSException("usage: sleep()") +} + +// SleepBlocks will block the console for a specified number of new blocks optionally +// until the given timeout is reached. +func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) { + var ( + blocks = int64(0) + sleep = int64(9999999999999999) // indefinitely + ) + // Parse the input parameters for the sleep + nArgs := len(call.ArgumentList) + if nArgs == 0 { + throwJSException("usage: sleepBlocks([, max sleep in seconds])") + } + if nArgs >= 1 { + if call.Argument(0).IsNumber() { + blocks, _ = call.Argument(0).ToInteger() + } else { + throwJSException("expected number as first argument") + } + } + if nArgs >= 2 { + if call.Argument(1).IsNumber() { + sleep, _ = call.Argument(1).ToInteger() + } else { + throwJSException("expected number as second argument") + } + } + // go through the console, this will allow web3 to call the appropriate + // callbacks if a delayed response or notification is received. + blockNumber := func() int64 { + result, err := call.Otto.Run("eth.blockNumber") + if err != nil { + throwJSException(err.Error()) + } + block, err := result.ToInteger() + if err != nil { + throwJSException(err.Error()) + } + return block + } + // Poll the current block number until either it ot a timeout is reached + targetBlockNr := blockNumber() + blocks + deadline := time.Now().Add(time.Duration(sleep) * time.Second) + + for time.Now().Before(deadline) { + if blockNumber() >= targetBlockNr { + return otto.TrueValue() + } + time.Sleep(time.Second) + } + return otto.FalseValue() +} + +// Send will serialize the first argument, send it to the node and returns the response. +func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) { + // Ensure that we've got a batch request (array) or a single request (object) + arg := call.Argument(0).Object() + if arg == nil || (arg.Class() != "Array" && arg.Class() != "Object") { + throwJSException("request must be an object or array") + } + // Convert the otto VM arguments to Go values + data, err := call.Otto.Call("JSON.stringify", nil, arg) + if err != nil { + throwJSException(err.Error()) + } + reqjson, err := data.ToString() + if err != nil { + throwJSException(err.Error()) + } + + var ( + reqs []rpc.JSONRequest + batch = true + ) + if err = json.Unmarshal([]byte(reqjson), &reqs); err != nil { + // single request? + reqs = make([]rpc.JSONRequest, 1) + if err = json.Unmarshal([]byte(reqjson), &reqs[0]); err != nil { + throwJSException("invalid request") + } + batch = false + } + // Iteratively execute the requests + call.Otto.Set("response_len", len(reqs)) + call.Otto.Run("var ret_response = new Array(response_len);") + + for i, req := range reqs { + // Execute the RPC request and parse the reply + if err = b.client.Send(&req); err != nil { + return newErrorResponse(call, -32603, err.Error(), req.Id) + } + result := make(map[string]interface{}) + if err = b.client.Recv(&result); err != nil { + return newErrorResponse(call, -32603, err.Error(), req.Id) + } + // Feed the reply back into the JavaScript runtime environment + id, _ := result["id"] + jsonver, _ := result["jsonrpc"] + + call.Otto.Set("ret_id", id) + call.Otto.Set("ret_jsonrpc", jsonver) + call.Otto.Set("response_idx", i) + + if res, ok := result["result"]; ok { + payload, _ := json.Marshal(res) + call.Otto.Set("ret_result", string(payload)) + response, err = call.Otto.Run(` + ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; + `) + continue + } + if res, ok := result["error"]; ok { + payload, _ := json.Marshal(res) + call.Otto.Set("ret_result", string(payload)) + response, err = call.Otto.Run(` + ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, error: JSON.parse(ret_result) }; + `) + continue + } + return newErrorResponse(call, -32603, fmt.Sprintf("Invalid response"), new(int64)) + } + // Convert single requests back from batch ones + if !batch { + call.Otto.Run("ret_response = ret_response[0];") + } + // Execute any registered callbacks + if call.Argument(1).IsObject() { + call.Otto.Set("callback", call.Argument(1)) + call.Otto.Run(` + if (Object.prototype.toString.call(callback) == '[object Function]') { + callback(null, ret_response); + } + `) + } + return +} + +// throwJSException panics on an otto.Value. The Otto VM will recover from the +// Go panic and throw msg as a JavaScript error. +func throwJSException(msg interface{}) otto.Value { + val, err := otto.ToValue(msg) + if err != nil { + glog.V(logger.Error).Infof("Failed to serialize JavaScript exception %v: %v", msg, err) + } + panic(val) +} + +// newErrorResponse creates a JSON RPC error response for a specific request id, +// containing the specified error code and error message. Beside returning the +// error to the caller, it also sets the ret_error and ret_response JavaScript +// variables. +func newErrorResponse(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) { + // Bundle the error into a JSON RPC call response + res := rpc.JSONErrResponse{ + Version: rpc.JSONRPCVersion, + Id: id, + Error: rpc.JSONError{ + Code: code, + Message: msg, + }, + } + // Serialize the error response into JavaScript variables + errObj, err := json.Marshal(res.Error) + if err != nil { + glog.V(logger.Error).Infof("Failed to serialize JSON RPC error: %v", err) + } + resObj, err := json.Marshal(res) + if err != nil { + glog.V(logger.Error).Infof("Failed to serialize JSON RPC error response: %v", err) + } + + if _, err = call.Otto.Run("ret_error = " + string(errObj)); err != nil { + glog.V(logger.Error).Infof("Failed to set `ret_error` to the occurred error: %v", err) + } + resVal, err := call.Otto.Run("ret_response = " + string(resObj)) + if err != nil { + glog.V(logger.Error).Infof("Failed to set `ret_response` to the JSON RPC response: %v", err) + } + return resVal +} diff --git a/console/console.go b/console/console.go new file mode 100644 index 0000000000000..37c9f0afad244 --- /dev/null +++ b/console/console.go @@ -0,0 +1,369 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package console + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/signal" + "path/filepath" + "regexp" + "sort" + "strings" + + "github.com/ethereum/go-ethereum/internal/jsre" + "github.com/ethereum/go-ethereum/internal/web3ext" + "github.com/ethereum/go-ethereum/rpc" + "github.com/peterh/liner" + "github.com/robertkrimen/otto" +) + +var ( + passwordRegexp = regexp.MustCompile("personal.[nus]") + onlyWhitespace = regexp.MustCompile("^\\s*$") + exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$") +) + +// HistoryFile is the file within the data directory to store input scrollback. +const HistoryFile = "history" + +// DefaultPrompt is the default prompt line prefix to use for user input querying. +const DefaultPrompt = "> " + +// Config is te collection of configurations to fine tune the behavior of the +// JavaScript console. +type Config struct { + DataDir string // Data directory to store the console history at + DocRoot string // Filesystem path from where to load JavaScript files from + Client rpc.Client // RPC client to execute Ethereum requests through + Prompt string // Input prompt prefix string (defaults to DefaultPrompt) + Prompter UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter) + Printer io.Writer // Output writer to serialize any display strings to (defaults to os.Stdout) + Preload []string // Absolute paths to JavaScript files to preload +} + +// Console is a JavaScript interpreted runtime environment. It is a fully fleged +// JavaScript console attached to a running node via an external or in-process RPC +// client. +type Console struct { + client rpc.Client // RPC client to execute Ethereum requests through + jsre *jsre.JSRE // JavaScript runtime environment running the interpreter + prompt string // Input prompt prefix string + prompter UserPrompter // Input prompter to allow interactive user feedback + histPath string // Absolute path to the console scrollback history + history []string // Scroll history maintained by the console + printer io.Writer // Output writer to serialize any display strings to +} + +func New(config Config) (*Console, error) { + // Handle unset config values gracefully + if config.Prompter == nil { + config.Prompter = TerminalPrompter + } + if config.Prompt == "" { + config.Prompt = DefaultPrompt + } + if config.Printer == nil { + config.Printer = os.Stdout + } + // Initialize the console and return + console := &Console{ + client: config.Client, + jsre: jsre.New(config.DocRoot, config.Printer), + prompt: config.Prompt, + prompter: config.Prompter, + printer: config.Printer, + histPath: filepath.Join(config.DataDir, HistoryFile), + } + if err := console.init(config.Preload); err != nil { + return nil, err + } + return console, nil +} + +// init retrieves the available APIs from the remote RPC provider and initializes +// the console's JavaScript namespaces based on the exposed modules. +func (c *Console) init(preload []string) error { + // Initialize the JavaScript <-> Go RPC bridge + bridge := newBridge(c.client, c.prompter, c.printer) + c.jsre.Set("jeth", struct{}{}) + + jethObj, _ := c.jsre.Get("jeth") + jethObj.Object().Set("send", bridge.Send) + jethObj.Object().Set("sendAsync", bridge.Send) + + consoleObj, _ := c.jsre.Get("console") + consoleObj.Object().Set("log", c.consoleOutput) + consoleObj.Object().Set("error", c.consoleOutput) + + // Load all the internal utility JavaScript libraries + if err := c.jsre.Compile("bignumber.js", jsre.BigNumber_JS); err != nil { + return fmt.Errorf("bignumber.js: %v", err) + } + if err := c.jsre.Compile("web3.js", jsre.Web3_JS); err != nil { + return fmt.Errorf("web3.js: %v", err) + } + if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil { + return fmt.Errorf("web3 require: %v", err) + } + if _, err := c.jsre.Run("var web3 = new Web3(jeth);"); err != nil { + return fmt.Errorf("web3 provider: %v", err) + } + // Load the supported APIs into the JavaScript runtime environment + apis, err := c.client.SupportedModules() + if err != nil { + return fmt.Errorf("api modules: %v", err) + } + flatten := "var eth = web3.eth; var personal = web3.personal; " + for api := range apis { + if api == "web3" { + continue // manually mapped or ignore + } + if file, ok := web3ext.Modules[api]; ok { + if err = c.jsre.Compile(fmt.Sprintf("%s.js", api), file); err != nil { + return fmt.Errorf("%s.js: %v", api, err) + } + flatten += fmt.Sprintf("var %s = web3.%s; ", api, api) + } + } + if _, err = c.jsre.Run(flatten); err != nil { + return fmt.Errorf("namespace flattening: %v", err) + } + // Initialize the global name register (disabled for now) + //c.jsre.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`) + + // If the console is in interactive mode, instrument password related methods to query the user + if c.prompter != nil { + // Retrieve the account management object to instrument + personal, err := c.jsre.Get("personal") + if err != nil { + return err + } + // Override the unlockAccount and newAccount methods since these require user interaction. + // Assign the jeth.unlockAccount and jeth.newAccount in the Console the original web3 callbacks. + // These will be called by the jeth.* methods after they got the password from the user and send + // the original web3 request to the backend. + if obj := personal.Object(); obj != nil { // make sure the personal api is enabled over the interface + if _, err = c.jsre.Run(`jeth.unlockAccount = personal.unlockAccount;`); err != nil { + return fmt.Errorf("personal.unlockAccount: %v", err) + } + if _, err = c.jsre.Run(`jeth.newAccount = personal.newAccount;`); err != nil { + return fmt.Errorf("personal.newAccount: %v", err) + } + obj.Set("unlockAccount", bridge.UnlockAccount) + obj.Set("newAccount", bridge.NewAccount) + } + } + // The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer. + admin, err := c.jsre.Get("admin") + if err != nil { + return err + } + if obj := admin.Object(); obj != nil { // make sure the admin api is enabled over the interface + obj.Set("sleepBlocks", bridge.SleepBlocks) + obj.Set("sleep", bridge.Sleep) + } + // Preload any JavaScript files before starting the console + for _, path := range preload { + if err := c.jsre.Exec(path); err != nil { + return fmt.Errorf("%s: %v", path, jsErrorString(err)) + } + } + // Configure the console's input prompter for scrollback and tab completion + if c.prompter != nil { + if content, err := ioutil.ReadFile(c.histPath); err != nil { + c.prompter.SetScrollHistory(nil) + } else { + c.prompter.SetScrollHistory(strings.Split(string(content), "\n")) + } + c.prompter.SetWordCompleter(c.AutoCompleteInput) + } + return nil +} + +// consoleOutput is an override for the console.log and console.error methods to +// stream the output into the configured output stream instead of stdout. +func (c *Console) consoleOutput(call otto.FunctionCall) otto.Value { + output := []string{} + for _, argument := range call.ArgumentList { + output = append(output, fmt.Sprintf("%v", argument)) + } + fmt.Fprintln(c.printer, strings.Join(output, " ")) + return otto.Value{} +} + +// AutoCompleteInput is a pre-assembled word completer to be used by the user +// input prompter to provide hints to the user about the methods available. +func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, string) { + // No completions can be provided for empty inputs + if len(line) == 0 || pos == 0 { + return "", nil, "" + } + // Chunck data to relevant part for autocompletion + // E.g. in case of nested lines eth.getBalance(eth.coinb + start := 0 + for start = pos - 1; start > 0; start-- { + // Skip all methods and namespaces (i.e. including te dot) + if line[start] == '.' || (line[start] >= 'a' && line[start] <= 'z') || (line[start] >= 'A' && line[start] <= 'Z') { + continue + } + // Handle web3 in a special way (i.e. other numbers aren't auto completed) + if start >= 3 && line[start-3:start] == "web3" { + start -= 3 + continue + } + // We've hit an unexpected character, autocomplete form here + start++ + break + } + return line[:start], c.jsre.CompleteKeywords(line[start:pos]), line[pos:] +} + +// Welcome show summary of current Geth instance and some metadata about the +// console's available modules. +func (c *Console) Welcome() { + // Print some generic Geth metadata + c.jsre.Run(` + (function () { + console.log("Welcome to the Geth JavaScript console!\n"); + console.log("instance: " + web3.version.node); + console.log("coinbase: " + eth.coinbase); + console.log("at block: " + eth.blockNumber + " (" + new Date(1000 * eth.getBlock(eth.blockNumber).timestamp) + ")"); + console.log(" datadir: " + admin.datadir); + })(); + `) + // List all the supported modules for the user to call + if apis, err := c.client.SupportedModules(); err == nil { + modules := make([]string, 0, len(apis)) + for api, version := range apis { + modules = append(modules, fmt.Sprintf("%s:%s", api, version)) + } + sort.Strings(modules) + c.jsre.Run("(function () { console.log(' modules: " + strings.Join(modules, " ") + "'); })();") + } + c.jsre.Run("(function () { console.log(); })();") +} + +// Evaluate executes code and pretty prints the result to the specified output +// stream. +func (c *Console) Evaluate(statement string) error { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(c.printer, "[native] error: %v\n", r) + } + }() + if err := c.jsre.Evaluate(statement, c.printer); err != nil { + fmt.Fprintf(c.printer, "%v\n", jsErrorString(err)) + return err + } + return nil +} + +// Interactive starts an interactive user session, where input is propted from +// the configured user prompter. +func (c *Console) Interactive() { + var ( + prompt = c.prompt // Current prompt line (used for multi-line inputs) + indents = 0 // Current number of input indents (used for multi-line inputs) + input = "" // Current user input + scheduler = make(chan string) // Channel to send the next prompt on and receive the input + ) + // Start a goroutine to listen for promt requests and send back inputs + go func() { + for { + // Read the next user input + line, err := c.prompter.PromptInput(<-scheduler) + if err != nil { + // In case of an error, either clear the prompt or fail + if err == liner.ErrPromptAborted { // ctrl-C + prompt, indents, input = c.prompt, 0, "" + scheduler <- "" + continue + } + close(scheduler) + return + } + // User input retrieved, send for interpretation and loop + scheduler <- line + } + }() + // Monitor Ctrl-C too in case the input is empty and we need to bail + abort := make(chan os.Signal, 1) + signal.Notify(abort, os.Interrupt) + + // Start sending prompts to the user and reading back inputs + for { + // Send the next prompt, triggering an input read and process the result + scheduler <- prompt + select { + case <-abort: + // User forcefully quite the console + fmt.Fprintln(c.printer, "caught interrupt, exiting") + return + + case line, ok := <-scheduler: + // User input was returned by the prompter, handle special cases + if !ok || (indents <= 0 && exit.MatchString(input)) { + return + } + if onlyWhitespace.MatchString(line) { + continue + } + // Append the line to the input and check for multi-line interpretation + input += line + "\n" + + indents = strings.Count(input, "{") + strings.Count(input, "(") - strings.Count(input, "}") - strings.Count(input, ")") + if indents <= 0 { + prompt = c.prompt + } else { + prompt = strings.Repeat("..", indents*2) + " " + } + // If all the needed lines are present, save the command and run + if indents <= 0 { + if len(input) != 0 && input[0] != ' ' && !passwordRegexp.MatchString(input) { + c.history = append(c.history, input[:len(input)-1]) + } + c.Evaluate(input) + input = "" + } + } + } +} + +// Execute runs the JavaScript file specified as the argument. +func (c *Console) Execute(path string) error { + return c.jsre.Exec(path) +} + +// Stop cleans up the console and terminates the runtime envorinment. +func (c *Console) Stop(graceful bool) error { + if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), os.ModePerm); err != nil { + return err + } + c.jsre.Stop(graceful) + return nil +} + +// jsErrorString adds a backtrace to errors generated by otto. +func jsErrorString(err error) string { + if ottoErr, ok := err.(*otto.Error); ok { + return ottoErr.String() + } + return err.Error() +} diff --git a/console/console_test.go b/console/console_test.go new file mode 100644 index 0000000000000..5d38331e8ccb5 --- /dev/null +++ b/console/console_test.go @@ -0,0 +1,283 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package console + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "math/big" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/internal/jsre" + "github.com/ethereum/go-ethereum/node" +) + +const ( + testInstance = "console-tester" + testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" +) + +// hookedPrompter implements UserPrompter to simulate use input via channels. +type hookedPrompter struct { + scheduler chan string +} + +func (p *hookedPrompter) PromptInput(prompt string) (string, error) { + // Send the prompt to the tester + select { + case p.scheduler <- prompt: + case <-time.After(time.Second): + return "", errors.New("prompt timeout") + } + // Retrieve the response and feed to the console + select { + case input := <-p.scheduler: + return input, nil + case <-time.After(time.Second): + return "", errors.New("input timeout") + } +} + +func (p *hookedPrompter) PromptPassword(prompt string) (string, error) { + return "", errors.New("not implemented") +} +func (p *hookedPrompter) PromptConfirm(prompt string) (bool, error) { + return false, errors.New("not implemented") +} +func (p *hookedPrompter) SetScrollHistory(history []string) {} +func (p *hookedPrompter) SetWordCompleter(completer WordCompleter) {} + +// tester is a console test environment for the console tests to operate on. +type tester struct { + workspace string + stack *node.Node + ethereum *eth.Ethereum + console *Console + input *hookedPrompter + output *bytes.Buffer + + lastConfirm string +} + +// newTester creates a test environment based on which the console can operate. +// Please ensure you call Close() on the returned tester to avoid leaks. +func newTester(t *testing.T, confOverride func(*eth.Config)) *tester { + // Create a temporary storage for the node keys and initialize it + workspace, err := ioutil.TempDir("", "console-tester-") + if err != nil { + t.Fatalf("failed to create temporary keystore: %v", err) + } + accman := accounts.NewPlaintextManager(filepath.Join(workspace, "keystore")) + + // Create a networkless protocol stack and start an Ethereum service within + stack, err := node.New(&node.Config{DataDir: workspace, Name: testInstance, NoDiscovery: true}) + if err != nil { + t.Fatalf("failed to create node: %v", err) + } + ethConf := ð.Config{ + ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)}, + Etherbase: common.HexToAddress(testAddress), + AccountManager: accman, + PowTest: true, + } + if confOverride != nil { + confOverride(ethConf) + } + if err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return eth.New(ctx, ethConf) }); err != nil { + t.Fatalf("failed to register Ethereum protocol: %v", err) + } + // Start the node and assemble the JavaScript console around it + if err = stack.Start(); err != nil { + t.Fatalf("failed to start test stack: %v", err) + } + client, err := stack.Attach() + if err != nil { + t.Fatalf("failed to attach to node: %v", err) + } + prompter := &hookedPrompter{scheduler: make(chan string)} + printer := new(bytes.Buffer) + + console, err := New(Config{ + DataDir: stack.DataDir(), + DocRoot: "testdata", + Client: client, + Prompter: prompter, + Printer: printer, + Preload: []string{"preload.js"}, + }) + if err != nil { + t.Fatalf("failed to create JavaScript console: %v", err) + } + // Create the final tester and return + var ethereum *eth.Ethereum + stack.Service(ðereum) + + return &tester{ + workspace: workspace, + stack: stack, + ethereum: ethereum, + console: console, + input: prompter, + output: printer, + } +} + +// Close cleans up any temporary data folders and held resources. +func (env *tester) Close(t *testing.T) { + if err := env.console.Stop(false); err != nil { + t.Errorf("failed to stop embedded console: %v", err) + } + if err := env.stack.Stop(); err != nil { + t.Errorf("failed to stop embedded node: %v", err) + } + os.RemoveAll(env.workspace) +} + +// Tests that the node lists the correct welcome message, notably that it contains +// the instance name, coinbase account, block number, data directory and supported +// console modules. +func TestWelcome(t *testing.T) { + tester := newTester(t, nil) + defer tester.Close(t) + + tester.console.Welcome() + + output := string(tester.output.Bytes()) + if want := "Welcome"; !strings.Contains(output, want) { + t.Fatalf("console output missing welcome message: have\n%s\nwant also %s", output, want) + } + if want := fmt.Sprintf("instance: %s", testInstance); !strings.Contains(output, want) { + t.Fatalf("console output missing instance: have\n%s\nwant also %s", output, want) + } + if want := fmt.Sprintf("coinbase: %s", testAddress); !strings.Contains(output, want) { + t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want) + } + if want := "at block: 0"; !strings.Contains(output, want) { + t.Fatalf("console output missing sync status: have\n%s\nwant also %s", output, want) + } + if want := fmt.Sprintf("datadir: %s", tester.workspace); !strings.Contains(output, want) { + t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want) + } +} + +// Tests that JavaScript statement evaluation works as intended. +func TestEvaluate(t *testing.T) { + tester := newTester(t, nil) + defer tester.Close(t) + + tester.console.Evaluate("2 + 2") + if output := string(tester.output.Bytes()); !strings.Contains(output, "4") { + t.Fatalf("statement evaluation failed: have %s, want %s", output, "4") + } +} + +// Tests that the console can be used in interactive mode. +func TestInteractive(t *testing.T) { + // Create a tester and run an interactive console in the background + tester := newTester(t, nil) + defer tester.Close(t) + + go tester.console.Interactive() + + // Wait for a promt and send a statement back + select { + case <-tester.input.scheduler: + case <-time.After(time.Second): + t.Fatalf("initial prompt timeout") + } + select { + case tester.input.scheduler <- "2+2": + case <-time.After(time.Second): + t.Fatalf("input feedback timeout") + } + // Wait for the second promt and ensure first statement was evaluated + select { + case <-tester.input.scheduler: + case <-time.After(time.Second): + t.Fatalf("secondary prompt timeout") + } + if output := string(tester.output.Bytes()); !strings.Contains(output, "4") { + t.Fatalf("statement evaluation failed: have %s, want %s", output, "4") + } +} + +// Tests that preloaded JavaScript files have been executed before user is given +// input. +func TestPreload(t *testing.T) { + tester := newTester(t, nil) + defer tester.Close(t) + + tester.console.Evaluate("preloaded") + if output := string(tester.output.Bytes()); !strings.Contains(output, "some-preloaded-string") { + t.Fatalf("preloaded variable missing: have %s, want %s", output, "some-preloaded-string") + } +} + +// Tests that JavaScript scripts can be executes from the configured asset path. +func TestExecute(t *testing.T) { + tester := newTester(t, nil) + defer tester.Close(t) + + tester.console.Execute("exec.js") + + tester.console.Evaluate("execed") + if output := string(tester.output.Bytes()); !strings.Contains(output, "some-executed-string") { + t.Fatalf("execed variable missing: have %s, want %s", output, "some-executed-string") + } +} + +// Tests that the JavaScript objects returned by statement executions are properly +// pretty printed instead of just displaing "[object]". +func TestPrettyPrint(t *testing.T) { + tester := newTester(t, nil) + defer tester.Close(t) + + tester.console.Evaluate("obj = {int: 1, string: 'two', list: [3, 3, 3], obj: {null: null, func: function(){}}}") + + // Define some specially formatted fields + var ( + one = jsre.NumberColor("1") + two = jsre.StringColor("\"two\"") + three = jsre.NumberColor("3") + null = jsre.SpecialColor("null") + fun = jsre.FunctionColor("function()") + ) + // Assemble the actual output we're after and verify + want := `{ + int: ` + one + `, + list: [` + three + `, ` + three + `, ` + three + `], + obj: { + null: ` + null + `, + func: ` + fun + ` + }, + string: ` + two + ` +} +` + if output := string(tester.output.Bytes()); output != want { + t.Fatalf("pretty print mismatch: have %s, want %s", output, want) + } +} diff --git a/console/prompter.go b/console/prompter.go new file mode 100644 index 0000000000000..5039e8b1c153c --- /dev/null +++ b/console/prompter.go @@ -0,0 +1,156 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package console + +import ( + "fmt" + "strings" + + "github.com/peterh/liner" +) + +// TerminalPrompter holds the stdin line reader (also using stdout for printing +// prompts). Only this reader may be used for input because it keeps an internal +// buffer. +var TerminalPrompter = newTerminalPrompter() + +// UserPrompter defines the methods needed by the console to promt the user for +// various types of inputs. +type UserPrompter interface { + // PromptInput displays the given prompt to the user and requests some textual + // data to be entered, returning the input of the user. + PromptInput(prompt string) (string, error) + + // PromptPassword displays the given prompt to the user and requests some textual + // data to be entered, but one which must not be echoed out into the terminal. + // The method returns the input provided by the user. + PromptPassword(prompt string) (string, error) + + // PromptConfirm displays the given prompt to the user and requests a boolean + // choice to be made, returning that choice. + PromptConfirm(prompt string) (bool, error) + + // SetScrollHistory sets the the input scrollback history that the prompter will + // allow the user to scoll back to. + SetScrollHistory(history []string) + + // SetWordCompleter sets the completion function that the prompter will call to + // fetch completion candidates when the user presses tab. + SetWordCompleter(completer WordCompleter) +} + +// WordCompleter takes the currently edited line with the cursor position and +// returns the completion candidates for the partial word to be completed. If +// the line is "Hello, wo!!!" and the cursor is before the first '!', ("Hello, +// wo!!!", 9) is passed to the completer which may returns ("Hello, ", {"world", +// "Word"}, "!!!") to have "Hello, world!!!". +type WordCompleter func(line string, pos int) (string, []string, string) + +// terminalPrompter is a UserPrompter backed by the liner package. It supports +// prompting the user for various input, among others for non-echoing password +// input. +type terminalPrompter struct { + *liner.State + warned bool + supported bool + normalMode liner.ModeApplier + rawMode liner.ModeApplier +} + +// newTerminalPrompter creates a liner based user input prompter working off the +// standard input and output streams. +func newTerminalPrompter() *terminalPrompter { + r := new(terminalPrompter) + // Get the original mode before calling NewLiner. + // This is usually regular "cooked" mode where characters echo. + normalMode, _ := liner.TerminalMode() + // Turn on liner. It switches to raw mode. + r.State = liner.NewLiner() + rawMode, err := liner.TerminalMode() + if err != nil || !liner.TerminalSupported() { + r.supported = false + } else { + r.supported = true + r.normalMode = normalMode + r.rawMode = rawMode + // Switch back to normal mode while we're not prompting. + normalMode.ApplyMode() + } + r.SetCtrlCAborts(true) + r.SetTabCompletionStyle(liner.TabPrints) + + return r +} + +// PromptInput displays the given prompt to the user and requests some textual +// data to be entered, returning the input of the user. +func (r *terminalPrompter) PromptInput(prompt string) (string, error) { + if r.supported { + r.rawMode.ApplyMode() + defer r.normalMode.ApplyMode() + } else { + // liner tries to be smart about printing the prompt + // and doesn't print anything if input is redirected. + // Un-smart it by printing the prompt always. + fmt.Print(prompt) + prompt = "" + defer fmt.Println() + } + return r.State.Prompt(prompt) +} + +// PromptPassword displays the given prompt to the user and requests some textual +// data to be entered, but one which must not be echoed out into the terminal. +// The method returns the input provided by the user. +func (r *terminalPrompter) PromptPassword(prompt string) (passwd string, err error) { + if r.supported { + r.rawMode.ApplyMode() + defer r.normalMode.ApplyMode() + return r.State.PasswordPrompt(prompt) + } + if !r.warned { + fmt.Println("!! Unsupported terminal, password will be echoed.") + r.warned = true + } + // Just as in Prompt, handle printing the prompt here instead of relying on liner. + fmt.Print(prompt) + passwd, err = r.State.Prompt("") + fmt.Println() + return passwd, err +} + +// PromptConfirm displays the given prompt to the user and requests a boolean +// choice to be made, returning that choice. +func (r *terminalPrompter) PromptConfirm(prompt string) (bool, error) { + input, err := r.Prompt(prompt + " [y/N] ") + if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" { + return true, nil + } + return false, err +} + +// SetScrollHistory sets the the input scrollback history that the prompter will +// allow the user to scoll back to. +func (r *terminalPrompter) SetScrollHistory(history []string) { + r.State.ReadHistory(strings.NewReader(strings.Join(history, "\n"))) +} + +// SetWordCompleter sets the completion function that the prompter will call to +// fetch completion candidates when the user presses tab. +func (r *terminalPrompter) SetWordCompleter(completer WordCompleter) { + r.State.SetWordCompleter(liner.WordCompleter(completer)) +} diff --git a/console/testdata/exec.js b/console/testdata/exec.js new file mode 100644 index 0000000000000..59e34d7c40334 --- /dev/null +++ b/console/testdata/exec.js @@ -0,0 +1 @@ +var execed = "some-executed-string"; diff --git a/console/testdata/preload.js b/console/testdata/preload.js new file mode 100644 index 0000000000000..556793970fc4e --- /dev/null +++ b/console/testdata/preload.js @@ -0,0 +1 @@ +var preloaded = "some-preloaded-string"; diff --git a/jsre/bignumber_js.go b/internal/jsre/bignumber_js.go similarity index 100% rename from jsre/bignumber_js.go rename to internal/jsre/bignumber_js.go diff --git a/jsre/completion.go b/internal/jsre/completion.go similarity index 100% rename from jsre/completion.go rename to internal/jsre/completion.go diff --git a/jsre/completion_test.go b/internal/jsre/completion_test.go similarity index 98% rename from jsre/completion_test.go rename to internal/jsre/completion_test.go index 92af5ddb64709..ccbd73dccc881 100644 --- a/jsre/completion_test.go +++ b/internal/jsre/completion_test.go @@ -17,12 +17,13 @@ package jsre import ( + "os" "reflect" "testing" ) func TestCompleteKeywords(t *testing.T) { - re := New("") + re := New("", os.Stdout) re.Run(` function theClass() { this.foo = 3; diff --git a/jsre/ethereum_js.go b/internal/jsre/ethereum_js.go similarity index 100% rename from jsre/ethereum_js.go rename to internal/jsre/ethereum_js.go diff --git a/jsre/jsre.go b/internal/jsre/jsre.go similarity index 95% rename from jsre/jsre.go rename to internal/jsre/jsre.go index 59730bc0da7e3..8d8f4fc2a9358 100644 --- a/jsre/jsre.go +++ b/internal/jsre/jsre.go @@ -21,6 +21,7 @@ import ( crand "crypto/rand" "encoding/binary" "fmt" + "io" "io/ioutil" "math/rand" "sync" @@ -40,6 +41,7 @@ It provides some helper functions to */ type JSRE struct { assetPath string + output io.Writer evalQueue chan *evalReq stopEventLoop chan bool loopWg sync.WaitGroup @@ -60,9 +62,10 @@ type evalReq struct { } // runtime must be stopped with Stop() after use and cannot be used after stopping -func New(assetPath string) *JSRE { +func New(assetPath string, output io.Writer) *JSRE { re := &JSRE{ assetPath: assetPath, + output: output, evalQueue: make(chan *evalReq), stopEventLoop: make(chan bool), } @@ -292,19 +295,21 @@ func (self *JSRE) loadScript(call otto.FunctionCall) otto.Value { return otto.TrueValue() } -// EvalAndPrettyPrint evaluates code and pretty prints the result to -// standard output. -func (self *JSRE) EvalAndPrettyPrint(code string) (err error) { +// Evaluate executes code and pretty prints the result to the specified output +// stream. +func (self *JSRE) Evaluate(code string, w io.Writer) error { + var fail error + self.Do(func(vm *otto.Otto) { - var val otto.Value - val, err = vm.Run(code) + val, err := vm.Run(code) if err != nil { - return + fail = err + } else { + prettyPrint(vm, val, w) + fmt.Fprintln(w) } - prettyPrint(vm, val) - fmt.Println() }) - return err + return fail } // Compile compiles and then runs a piece of JS code. diff --git a/jsre/jsre_test.go b/internal/jsre/jsre_test.go similarity index 98% rename from jsre/jsre_test.go rename to internal/jsre/jsre_test.go index ffb6999db71da..bcb6e0dd23fd7 100644 --- a/jsre/jsre_test.go +++ b/internal/jsre/jsre_test.go @@ -51,7 +51,7 @@ func newWithTestJS(t *testing.T, testjs string) (*JSRE, string) { t.Fatal("cannot create test.js:", err) } } - return New(dir), dir + return New(dir, os.Stdout), dir } func TestExec(t *testing.T) { @@ -102,7 +102,7 @@ func TestNatto(t *testing.T) { } func TestBind(t *testing.T) { - jsre := New("") + jsre := New("", os.Stdout) defer jsre.Stop(false) jsre.Bind("no", &testNativeObjectBinding{}) diff --git a/jsre/pretty.go b/internal/jsre/pretty.go similarity index 77% rename from jsre/pretty.go rename to internal/jsre/pretty.go index cd7fa5232be6a..cf4bf2cf89ebb 100644 --- a/jsre/pretty.go +++ b/internal/jsre/pretty.go @@ -18,6 +18,7 @@ package jsre import ( "fmt" + "io" "sort" "strconv" "strings" @@ -32,10 +33,10 @@ const ( ) var ( - functionColor = color.New(color.FgMagenta) - specialColor = color.New(color.Bold) - numberColor = color.New(color.FgRed) - stringColor = color.New(color.FgGreen) + FunctionColor = color.New(color.FgMagenta).SprintfFunc() + SpecialColor = color.New(color.Bold).SprintfFunc() + NumberColor = color.New(color.FgRed).SprintfFunc() + StringColor = color.New(color.FgGreen).SprintfFunc() ) // these fields are hidden when printing objects. @@ -50,19 +51,22 @@ var boringKeys = map[string]bool{ } // prettyPrint writes value to standard output. -func prettyPrint(vm *otto.Otto, value otto.Value) { - ppctx{vm}.printValue(value, 0, false) +func prettyPrint(vm *otto.Otto, value otto.Value, w io.Writer) { + ppctx{vm: vm, w: w}.printValue(value, 0, false) } -func prettyPrintJS(call otto.FunctionCall) otto.Value { +func prettyPrintJS(call otto.FunctionCall, w io.Writer) otto.Value { for _, v := range call.ArgumentList { - prettyPrint(call.Otto, v) - fmt.Println() + prettyPrint(call.Otto, v, w) + fmt.Fprintln(w) } return otto.UndefinedValue() } -type ppctx struct{ vm *otto.Otto } +type ppctx struct { + vm *otto.Otto + w io.Writer +} func (ctx ppctx) indent(level int) string { return strings.Repeat(indentString, level) @@ -73,22 +77,22 @@ func (ctx ppctx) printValue(v otto.Value, level int, inArray bool) { case v.IsObject(): ctx.printObject(v.Object(), level, inArray) case v.IsNull(): - specialColor.Print("null") + fmt.Fprint(ctx.w, SpecialColor("null")) case v.IsUndefined(): - specialColor.Print("undefined") + fmt.Fprint(ctx.w, SpecialColor("undefined")) case v.IsString(): s, _ := v.ToString() - stringColor.Printf("%q", s) + fmt.Fprint(ctx.w, StringColor("%q", s)) case v.IsBoolean(): b, _ := v.ToBoolean() - specialColor.Printf("%t", b) + fmt.Fprint(ctx.w, SpecialColor("%t", b)) case v.IsNaN(): - numberColor.Printf("NaN") + fmt.Fprint(ctx.w, NumberColor("NaN")) case v.IsNumber(): s, _ := v.ToString() - numberColor.Printf("%s", s) + fmt.Fprint(ctx.w, NumberColor("%s", s)) default: - fmt.Printf("") + fmt.Fprint(ctx.w, "") } } @@ -98,75 +102,75 @@ func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) { lv, _ := obj.Get("length") len, _ := lv.ToInteger() if len == 0 { - fmt.Printf("[]") + fmt.Fprintf(ctx.w, "[]") return } if level > maxPrettyPrintLevel { - fmt.Print("[...]") + fmt.Fprint(ctx.w, "[...]") return } - fmt.Print("[") + fmt.Fprint(ctx.w, "[") for i := int64(0); i < len; i++ { el, err := obj.Get(strconv.FormatInt(i, 10)) if err == nil { ctx.printValue(el, level+1, true) } if i < len-1 { - fmt.Printf(", ") + fmt.Fprintf(ctx.w, ", ") } } - fmt.Print("]") + fmt.Fprint(ctx.w, "]") case "Object": // Print values from bignumber.js as regular numbers. if ctx.isBigNumber(obj) { - numberColor.Print(toString(obj)) + fmt.Fprint(ctx.w, NumberColor("%s", toString(obj))) return } // Otherwise, print all fields indented, but stop if we're too deep. keys := ctx.fields(obj) if len(keys) == 0 { - fmt.Print("{}") + fmt.Fprint(ctx.w, "{}") return } if level > maxPrettyPrintLevel { - fmt.Print("{...}") + fmt.Fprint(ctx.w, "{...}") return } - fmt.Println("{") + fmt.Fprintln(ctx.w, "{") for i, k := range keys { v, _ := obj.Get(k) - fmt.Printf("%s%s: ", ctx.indent(level+1), k) + fmt.Fprintf(ctx.w, "%s%s: ", ctx.indent(level+1), k) ctx.printValue(v, level+1, false) if i < len(keys)-1 { - fmt.Printf(",") + fmt.Fprintf(ctx.w, ",") } - fmt.Println() + fmt.Fprintln(ctx.w) } if inArray { level-- } - fmt.Printf("%s}", ctx.indent(level)) + fmt.Fprintf(ctx.w, "%s}", ctx.indent(level)) case "Function": // Use toString() to display the argument list if possible. if robj, err := obj.Call("toString"); err != nil { - functionColor.Print("function()") + fmt.Fprint(ctx.w, FunctionColor("function()")) } else { desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n") desc = strings.Replace(desc, " (", "(", 1) - functionColor.Print(desc) + fmt.Fprint(ctx.w, FunctionColor("%s", desc)) } case "RegExp": - stringColor.Print(toString(obj)) + fmt.Fprint(ctx.w, StringColor("%s", toString(obj))) default: if v, _ := obj.Get("toString"); v.IsFunction() && level <= maxPrettyPrintLevel { s, _ := obj.Call("toString") - fmt.Printf("<%s %s>", obj.Class(), s.String()) + fmt.Fprintf(ctx.w, "<%s %s>", obj.Class(), s.String()) } else { - fmt.Printf("<%s>", obj.Class()) + fmt.Fprintf(ctx.w, "<%s>", obj.Class()) } } } diff --git a/rpc/json.go b/rpc/json.go index 8a3bea2eeba02..151ed546e7571 100644 --- a/rpc/json.go +++ b/rpc/json.go @@ -30,7 +30,7 @@ import ( ) const ( - jsonRPCVersion = "2.0" + JSONRPCVersion = "2.0" serviceMethodSeparator = "_" subscribeMethod = "eth_subscribe" unsubscribeMethod = "eth_unsubscribe" @@ -302,31 +302,31 @@ func parsePositionalArguments(args json.RawMessage, callbackArgs []reflect.Type) // CreateResponse will create a JSON-RPC success response with the given id and reply as result. func (c *jsonCodec) CreateResponse(id interface{}, reply interface{}) interface{} { if isHexNum(reflect.TypeOf(reply)) { - return &JSONSuccessResponse{Version: jsonRPCVersion, Id: id, Result: fmt.Sprintf(`%#x`, reply)} + return &JSONSuccessResponse{Version: JSONRPCVersion, Id: id, Result: fmt.Sprintf(`%#x`, reply)} } - return &JSONSuccessResponse{Version: jsonRPCVersion, Id: id, Result: reply} + return &JSONSuccessResponse{Version: JSONRPCVersion, Id: id, Result: reply} } // CreateErrorResponse will create a JSON-RPC error response with the given id and error. func (c *jsonCodec) CreateErrorResponse(id interface{}, err RPCError) interface{} { - return &JSONErrResponse{Version: jsonRPCVersion, Id: id, Error: JSONError{Code: err.Code(), Message: err.Error()}} + return &JSONErrResponse{Version: JSONRPCVersion, Id: id, Error: JSONError{Code: err.Code(), Message: err.Error()}} } // CreateErrorResponseWithInfo will create a JSON-RPC error response with the given id and error. // info is optional and contains additional information about the error. When an empty string is passed it is ignored. func (c *jsonCodec) CreateErrorResponseWithInfo(id interface{}, err RPCError, info interface{}) interface{} { - return &JSONErrResponse{Version: jsonRPCVersion, Id: id, + return &JSONErrResponse{Version: JSONRPCVersion, Id: id, Error: JSONError{Code: err.Code(), Message: err.Error(), Data: info}} } // CreateNotification will create a JSON-RPC notification with the given subscription id and event as params. func (c *jsonCodec) CreateNotification(subid string, event interface{}) interface{} { if isHexNum(reflect.TypeOf(event)) { - return &jsonNotification{Version: jsonRPCVersion, Method: notificationMethod, + return &jsonNotification{Version: JSONRPCVersion, Method: notificationMethod, Params: jsonSubscription{Subscription: subid, Result: fmt.Sprintf(`%#x`, event)}} } - return &jsonNotification{Version: jsonRPCVersion, Method: notificationMethod, + return &jsonNotification{Version: JSONRPCVersion, Method: notificationMethod, Params: jsonSubscription{Subscription: subid, Result: event}} } From 7c90a2e42e153f650122af73f05d651b04e9d553 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 11 May 2016 17:28:29 +0300 Subject: [PATCH 18/44] [release/1.4.6] console, internal/jsre: colorize JavaScript exceptions too (cherry picked from commit 14ae5708d61059d424c9be9822b85a3f4bb392b3) --- console/console.go | 15 +++++---------- console/console_test.go | 12 ++++++++++++ internal/jsre/jsre.go | 4 ++-- internal/jsre/pretty.go | 18 ++++++++++++++++++ 4 files changed, 37 insertions(+), 12 deletions(-) diff --git a/console/console.go b/console/console.go index 37c9f0afad244..d10353093f2ef 100644 --- a/console/console.go +++ b/console/console.go @@ -182,7 +182,11 @@ func (c *Console) init(preload []string) error { // Preload any JavaScript files before starting the console for _, path := range preload { if err := c.jsre.Exec(path); err != nil { - return fmt.Errorf("%s: %v", path, jsErrorString(err)) + failure := err.Error() + if ottoErr, ok := err.(*otto.Error); ok { + failure = ottoErr.String() + } + return fmt.Errorf("%s: %v", path, failure) } } // Configure the console's input prompter for scrollback and tab completion @@ -269,7 +273,6 @@ func (c *Console) Evaluate(statement string) error { } }() if err := c.jsre.Evaluate(statement, c.printer); err != nil { - fmt.Fprintf(c.printer, "%v\n", jsErrorString(err)) return err } return nil @@ -359,11 +362,3 @@ func (c *Console) Stop(graceful bool) error { c.jsre.Stop(graceful) return nil } - -// jsErrorString adds a backtrace to errors generated by otto. -func jsErrorString(err error) string { - if ottoErr, ok := err.(*otto.Error); ok { - return ottoErr.String() - } - return err.Error() -} diff --git a/console/console_test.go b/console/console_test.go index 5d38331e8ccb5..72d3a2df69121 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -281,3 +281,15 @@ func TestPrettyPrint(t *testing.T) { t.Fatalf("pretty print mismatch: have %s, want %s", output, want) } } + +// Tests that the JavaScript exceptions are properly formatted and colored. +func TestPrettyError(t *testing.T) { + tester := newTester(t, nil) + defer tester.Close(t) + tester.console.Evaluate("throw 'hello'") + + want := jsre.ErrorColor("hello") + "\n" + if output := string(tester.output.Bytes()); output != want { + t.Fatalf("pretty error mismatch: have %s, want %s", output, want) + } +} diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go index 8d8f4fc2a9358..a95efd379dbcf 100644 --- a/internal/jsre/jsre.go +++ b/internal/jsre/jsre.go @@ -303,11 +303,11 @@ func (self *JSRE) Evaluate(code string, w io.Writer) error { self.Do(func(vm *otto.Otto) { val, err := vm.Run(code) if err != nil { - fail = err + prettyError(vm, err, w) } else { prettyPrint(vm, val, w) - fmt.Fprintln(w) } + fmt.Fprintln(w) }) return fail } diff --git a/internal/jsre/pretty.go b/internal/jsre/pretty.go index cf4bf2cf89ebb..30d8660ff6ec2 100644 --- a/internal/jsre/pretty.go +++ b/internal/jsre/pretty.go @@ -37,6 +37,7 @@ var ( SpecialColor = color.New(color.Bold).SprintfFunc() NumberColor = color.New(color.FgRed).SprintfFunc() StringColor = color.New(color.FgGreen).SprintfFunc() + ErrorColor = color.New(color.FgHiRed).SprintfFunc() ) // these fields are hidden when printing objects. @@ -55,6 +56,23 @@ func prettyPrint(vm *otto.Otto, value otto.Value, w io.Writer) { ppctx{vm: vm, w: w}.printValue(value, 0, false) } +// prettyError writes err to standard output. +func prettyError(vm *otto.Otto, err error, w io.Writer) { + failure := err.Error() + if ottoErr, ok := err.(*otto.Error); ok { + failure = ottoErr.String() + } + fmt.Fprint(w, ErrorColor("%s", failure)) +} + +// jsErrorString adds a backtrace to errors generated by otto. +func jsErrorString(err error) string { + if ottoErr, ok := err.(*otto.Error); ok { + return ottoErr.String() + } + return err.Error() +} + func prettyPrintJS(call otto.FunctionCall, w io.Writer) otto.Value { for _, v := range call.ArgumentList { prettyPrint(call.Otto, v, w) From 5904d58a967ba2bd11f700f654f828c00049a1f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 30 May 2016 17:30:17 +0300 Subject: [PATCH 19/44] [release/1.4.6] cmd/geth, console: fix reviewer issues (cherry picked from commit da729e5b386ca0fd32344dcc1fd63d14c0bb39ab) --- cmd/geth/accountcmd.go | 4 +- cmd/geth/chaincmd.go | 2 +- cmd/geth/main.go | 2 +- console/console.go | 23 ++++++++---- console/console_test.go | 3 +- console/prompter.go | 83 +++++++++++++++++++++++------------------ 6 files changed, 68 insertions(+), 49 deletions(-) diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go index a9cee20eedc8d..0f9d95c2c5712 100644 --- a/cmd/geth/accountcmd.go +++ b/cmd/geth/accountcmd.go @@ -216,12 +216,12 @@ func getPassPhrase(prompt string, confirmation bool, i int, passwords []string) if prompt != "" { fmt.Println(prompt) } - password, err := console.TerminalPrompter.PromptPassword("Passphrase: ") + password, err := console.Stdin.PromptPassword("Passphrase: ") if err != nil { utils.Fatalf("Failed to read passphrase: %v", err) } if confirmation { - confirm, err := console.TerminalPrompter.PromptPassword("Repeat passphrase: ") + confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ") if err != nil { utils.Fatalf("Failed to read passphrase confirmation: %v", err) } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 457dbcfff518f..4f47de5d7095c 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -117,7 +117,7 @@ func exportChain(ctx *cli.Context) { } func removeDB(ctx *cli.Context) { - confirm, err := console.TerminalPrompter.PromptConfirm("Remove local database?") + confirm, err := console.Stdin.PromptConfirm("Remove local database?") if err != nil { utils.Fatalf("%v", err) } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 3654788ce9d28..cf7d6944e19ac 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -236,7 +236,7 @@ participating. app.After = func(ctx *cli.Context) error { logger.Flush() debug.Exit() - console.TerminalPrompter.Close() // Resets terminal mode. + console.Stdin.Close() // Resets terminal mode. return nil } } diff --git a/console/console.go b/console/console.go index d10353093f2ef..a19b267bcbcf5 100644 --- a/console/console.go +++ b/console/console.go @@ -74,7 +74,7 @@ type Console struct { func New(config Config) (*Console, error) { // Handle unset config values gracefully if config.Prompter == nil { - config.Prompter = TerminalPrompter + config.Prompter = Stdin } if config.Prompt == "" { config.Prompt = DefaultPrompt @@ -192,9 +192,10 @@ func (c *Console) init(preload []string) error { // Configure the console's input prompter for scrollback and tab completion if c.prompter != nil { if content, err := ioutil.ReadFile(c.histPath); err != nil { - c.prompter.SetScrollHistory(nil) + c.prompter.SetHistory(nil) } else { - c.prompter.SetScrollHistory(strings.Split(string(content), "\n")) + c.history = strings.Split(string(content), "\n") + c.prompter.SetHistory(c.history) } c.prompter.SetWordCompleter(c.AutoCompleteInput) } @@ -322,7 +323,7 @@ func (c *Console) Interactive() { case line, ok := <-scheduler: // User input was returned by the prompter, handle special cases - if !ok || (indents <= 0 && exit.MatchString(input)) { + if !ok || (indents <= 0 && exit.MatchString(line)) { return } if onlyWhitespace.MatchString(line) { @@ -339,8 +340,13 @@ func (c *Console) Interactive() { } // If all the needed lines are present, save the command and run if indents <= 0 { - if len(input) != 0 && input[0] != ' ' && !passwordRegexp.MatchString(input) { - c.history = append(c.history, input[:len(input)-1]) + if len(input) > 0 && input[0] != ' ' && !passwordRegexp.MatchString(input) { + if command := strings.TrimSpace(input); len(c.history) == 0 || command != c.history[len(c.history)-1] { + c.history = append(c.history, command) + if c.prompter != nil { + c.prompter.AppendHistory(command) + } + } } c.Evaluate(input) input = "" @@ -356,7 +362,10 @@ func (c *Console) Execute(path string) error { // Stop cleans up the console and terminates the runtime envorinment. func (c *Console) Stop(graceful bool) error { - if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), os.ModePerm); err != nil { + if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil { + return err + } + if err := os.Chmod(c.histPath, 0600); err != nil { // Force 0600, even if it was different previously return err } c.jsre.Stop(graceful) diff --git a/console/console_test.go b/console/console_test.go index 72d3a2df69121..91108782424c1 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -68,7 +68,8 @@ func (p *hookedPrompter) PromptPassword(prompt string) (string, error) { func (p *hookedPrompter) PromptConfirm(prompt string) (bool, error) { return false, errors.New("not implemented") } -func (p *hookedPrompter) SetScrollHistory(history []string) {} +func (p *hookedPrompter) SetHistory(history []string) {} +func (p *hookedPrompter) AppendHistory(command string) {} func (p *hookedPrompter) SetWordCompleter(completer WordCompleter) {} // tester is a console test environment for the console tests to operate on. diff --git a/console/prompter.go b/console/prompter.go index 5039e8b1c153c..0e4a8a53ec997 100644 --- a/console/prompter.go +++ b/console/prompter.go @@ -23,10 +23,9 @@ import ( "github.com/peterh/liner" ) -// TerminalPrompter holds the stdin line reader (also using stdout for printing -// prompts). Only this reader may be used for input because it keeps an internal -// buffer. -var TerminalPrompter = newTerminalPrompter() +// Stdin holds the stdin line reader (also using stdout for printing prompts). +// Only this reader may be used for input because it keeps an internal buffer. +var Stdin = newTerminalPrompter() // UserPrompter defines the methods needed by the console to promt the user for // various types of inputs. @@ -44,9 +43,13 @@ type UserPrompter interface { // choice to be made, returning that choice. PromptConfirm(prompt string) (bool, error) - // SetScrollHistory sets the the input scrollback history that the prompter will - // allow the user to scoll back to. - SetScrollHistory(history []string) + // SetHistory sets the the input scrollback history that the prompter will allow + // the user to scoll back to. + SetHistory(history []string) + + // AppendHistory appends an entry to the scrollback history. It should be called + // if and only if the prompt to append was a valid command. + AppendHistory(command string) // SetWordCompleter sets the completion function that the prompter will call to // fetch completion candidates when the user presses tab. @@ -74,34 +77,34 @@ type terminalPrompter struct { // newTerminalPrompter creates a liner based user input prompter working off the // standard input and output streams. func newTerminalPrompter() *terminalPrompter { - r := new(terminalPrompter) + p := new(terminalPrompter) // Get the original mode before calling NewLiner. // This is usually regular "cooked" mode where characters echo. normalMode, _ := liner.TerminalMode() // Turn on liner. It switches to raw mode. - r.State = liner.NewLiner() + p.State = liner.NewLiner() rawMode, err := liner.TerminalMode() if err != nil || !liner.TerminalSupported() { - r.supported = false + p.supported = false } else { - r.supported = true - r.normalMode = normalMode - r.rawMode = rawMode + p.supported = true + p.normalMode = normalMode + p.rawMode = rawMode // Switch back to normal mode while we're not prompting. normalMode.ApplyMode() } - r.SetCtrlCAborts(true) - r.SetTabCompletionStyle(liner.TabPrints) + p.SetCtrlCAborts(true) + p.SetTabCompletionStyle(liner.TabPrints) - return r + return p } // PromptInput displays the given prompt to the user and requests some textual // data to be entered, returning the input of the user. -func (r *terminalPrompter) PromptInput(prompt string) (string, error) { - if r.supported { - r.rawMode.ApplyMode() - defer r.normalMode.ApplyMode() +func (p *terminalPrompter) PromptInput(prompt string) (string, error) { + if p.supported { + p.rawMode.ApplyMode() + defer p.normalMode.ApplyMode() } else { // liner tries to be smart about printing the prompt // and doesn't print anything if input is redirected. @@ -110,47 +113,53 @@ func (r *terminalPrompter) PromptInput(prompt string) (string, error) { prompt = "" defer fmt.Println() } - return r.State.Prompt(prompt) + return p.State.Prompt(prompt) } // PromptPassword displays the given prompt to the user and requests some textual // data to be entered, but one which must not be echoed out into the terminal. // The method returns the input provided by the user. -func (r *terminalPrompter) PromptPassword(prompt string) (passwd string, err error) { - if r.supported { - r.rawMode.ApplyMode() - defer r.normalMode.ApplyMode() - return r.State.PasswordPrompt(prompt) +func (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err error) { + if p.supported { + p.rawMode.ApplyMode() + defer p.normalMode.ApplyMode() + return p.State.PasswordPrompt(prompt) } - if !r.warned { + if !p.warned { fmt.Println("!! Unsupported terminal, password will be echoed.") - r.warned = true + p.warned = true } // Just as in Prompt, handle printing the prompt here instead of relying on liner. fmt.Print(prompt) - passwd, err = r.State.Prompt("") + passwd, err = p.State.Prompt("") fmt.Println() return passwd, err } // PromptConfirm displays the given prompt to the user and requests a boolean // choice to be made, returning that choice. -func (r *terminalPrompter) PromptConfirm(prompt string) (bool, error) { - input, err := r.Prompt(prompt + " [y/N] ") +func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) { + input, err := p.Prompt(prompt + " [y/N] ") if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" { return true, nil } return false, err } -// SetScrollHistory sets the the input scrollback history that the prompter will -// allow the user to scoll back to. -func (r *terminalPrompter) SetScrollHistory(history []string) { - r.State.ReadHistory(strings.NewReader(strings.Join(history, "\n"))) +// SetHistory sets the the input scrollback history that the prompter will allow +// the user to scoll back to. +func (p *terminalPrompter) SetHistory(history []string) { + p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n"))) +} + +// AppendHistory appends an entry to the scrollback history. It should be called +// if and only if the prompt to append was a valid command. +func (p *terminalPrompter) AppendHistory(command string) { + p.State.AppendHistory(command) } // SetWordCompleter sets the completion function that the prompter will call to // fetch completion candidates when the user presses tab. -func (r *terminalPrompter) SetWordCompleter(completer WordCompleter) { - r.State.SetWordCompleter(liner.WordCompleter(completer)) +func (p *terminalPrompter) SetWordCompleter(completer WordCompleter) { + p.State.SetWordCompleter(liner.WordCompleter(completer)) } From 4918c820c6d9b64f26b302ed4d1778d94cc7172c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 27 May 2016 14:26:00 +0300 Subject: [PATCH 20/44] [release/1.4.6] eth/downloader, trie: pull head state concurrently with chain (cherry picked from commit 4f1d92b3329572d75a20b9f9e1cccdf74aa7c79f) --- eth/downloader/downloader.go | 56 ++++++++++++++++++------------------ eth/downloader/queue.go | 8 +++++- trie/sync.go | 7 ++++- 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 74bff2b66f79f..8cb0d21f7a316 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" "github.com/rcrowley/go-metrics" ) @@ -114,7 +115,6 @@ type Downloader struct { // Statistics syncStatsChainOrigin uint64 // Origin block number where syncing started at syncStatsChainHeight uint64 // Highest block number known when syncing started - syncStatsStateTotal uint64 // Total number of node state entries known so far syncStatsStateDone uint64 // Number of state trie entries already pulled syncStatsLock sync.RWMutex // Lock protecting the sync stats fields @@ -321,12 +321,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode empty = true } } - // Reset any ephemeral sync statistics - d.syncStatsLock.Lock() - d.syncStatsStateTotal = 0 - d.syncStatsStateDone = 0 - d.syncStatsLock.Unlock() - // Create cancel channel for aborting mid-flight d.cancelLock.Lock() d.cancelCh = make(chan struct{}) @@ -382,7 +376,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e d.syncStatsLock.Unlock() // Initiate the sync using a concurrent hash and block retrieval algorithm - d.queue.Prepare(origin+1, d.mode, 0) + d.queue.Prepare(origin+1, d.mode, 0, nil) if d.syncInitHook != nil { d.syncInitHook(origin, latest) } @@ -397,7 +391,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e if err != nil { return err } - origin, err := d.findAncestor(p, latest) + height := latest.Number.Uint64() + + origin, err := d.findAncestor(p, height) if err != nil { return err } @@ -405,22 +401,22 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { d.syncStatsChainOrigin = origin } - d.syncStatsChainHeight = latest + d.syncStatsChainHeight = height d.syncStatsLock.Unlock() // Initiate the sync using a concurrent header and content retrieval algorithm pivot := uint64(0) switch d.mode { case LightSync: - pivot = latest + pivot = height case FastSync: // Calculate the new fast/slow sync pivot point pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval))) if err != nil { panic(fmt.Sprintf("Failed to access crypto random source: %v", err)) } - if latest > uint64(fsMinFullBlocks)+pivotOffset.Uint64() { - pivot = latest - uint64(fsMinFullBlocks) - pivotOffset.Uint64() + if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() { + pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64() } // If the point is below the origin, move origin back to ensure state download if pivot < origin { @@ -432,9 +428,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e } glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot) } - d.queue.Prepare(origin+1, d.mode, pivot) + d.queue.Prepare(origin+1, d.mode, pivot, latest) if d.syncInitHook != nil { - d.syncInitHook(origin, latest) + d.syncInitHook(origin, height) } return d.spawnSync(origin+1, func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved @@ -952,7 +948,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // fetchHeight retrieves the head header of the remote peer to aid in estimating // the total time a pending synchronisation would take. -func (d *Downloader) fetchHeight(p *peer) (uint64, error) { +func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) { glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p) // Request the advertised remote head block and wait for the response @@ -962,7 +958,7 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) { for { select { case <-d.cancelCh: - return 0, errCancelBlockFetch + return nil, errCancelBlockFetch case packet := <-d.headerCh: // Discard anything not from the origin peer @@ -974,13 +970,13 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) { headers := packet.(*headerPack).headers if len(headers) != 1 { glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers)) - return 0, errBadPeer + return nil, errBadPeer } - return headers[0].Number.Uint64(), nil + return headers[0], nil case <-timeout: glog.V(logger.Debug).Infof("%v: head header timeout", p) - return 0, errTimeout + return nil, errTimeout case <-d.bodyCh: case <-d.stateCh: @@ -1369,10 +1365,10 @@ func (d *Downloader) fetchNodeData() error { deliver = func(packet dataPack) (int, error) { start := time.Now() return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) { - // If the peer gave us nothing, stalling fast sync, drop - if delivered == 0 { - glog.V(logger.Debug).Infof("peer %s: stalling state delivery, dropping", packet.PeerId()) - d.dropPeer(packet.PeerId()) + // If the peer returned old-requested data, forgive + if err == trie.ErrNotRequested { + glog.V(logger.Info).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId()) + return } if err != nil { // If the node data processing failed, the root hash is very wrong, abort @@ -1381,17 +1377,21 @@ func (d *Downloader) fetchNodeData() error { return } // Processing succeeded, notify state fetcher of continuation - if d.queue.PendingNodeData() > 0 { + pending := d.queue.PendingNodeData() + if pending > 0 { select { case d.stateWakeCh <- true: default: } } - // Log a message to the user and return d.syncStatsLock.Lock() - defer d.syncStatsLock.Unlock() d.syncStatsStateDone += uint64(delivered) - glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d in total", delivered, time.Since(start), d.syncStatsStateDone) + d.syncStatsLock.Unlock() + + // Log a message to the user and return + if delivered > 0 { + glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d, pending at least %d", delivered, time.Since(start), d.syncStatsStateDone, pending) + } }) } expire = func() map[string]int { return d.queue.ExpireNodeData(stateTTL) } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 195eae4ff9fdc..01897af6d4378 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -1262,13 +1262,19 @@ func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(error, // Prepare configures the result cache to allow accepting and caching inbound // fetch results. -func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64) { +func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) { q.lock.Lock() defer q.lock.Unlock() + // Prepare the queue for sync results if q.resultOffset < offset { q.resultOffset = offset } q.fastSyncPivot = pivot q.mode = mode + + // If long running fast sync, also start up a head stateretrieval immediately + if mode == FastSync && pivot > 0 { + q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase) + } } diff --git a/trie/sync.go b/trie/sync.go index a35478f837d50..6e9e029b931ae 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -17,6 +17,7 @@ package trie import ( + "errors" "fmt" "github.com/ethereum/go-ethereum/common" @@ -24,6 +25,10 @@ import ( "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) +// ErrNotRequested is returned by the trie sync when it's requested to process a +// node it did not request. +var ErrNotRequested = errors.New("not requested") + // request represents a scheduled or already in-flight state retrieval request. type request struct { hash common.Hash // Hash of the node data content to retrieve @@ -144,7 +149,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) { // If the item was not requested, bail out request := s.requests[item.Hash] if request == nil { - return i, fmt.Errorf("not requested: %x", item.Hash) + return i, ErrNotRequested } // If the item is a raw entry request, commit directly if request.object == nil { From cd134178f7f13ac7d162cf76971bed6c6706d339 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 30 May 2016 12:01:50 +0300 Subject: [PATCH 21/44] [release/1.4.6] eth/downloader: ensure cancel channel is closed post sync (cherry picked from commit 4496a44f68246bf128bb822c3f3a98f38adfdbdd) --- eth/downloader/downloader.go | 2 ++ eth/downloader/downloader_test.go | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 8cb0d21f7a316..f6dbb4610f5d8 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -326,6 +326,8 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode d.cancelCh = make(chan struct{}) d.cancelLock.Unlock() + defer d.cancel() // No matter what, we can't leave the cancel channel open + // Set the requested sync mode, unless it's forbidden d.mode = mode if d.mode == FastSync && d.noFast { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 4ea8a8abe3a81..1cf0e7cd31913 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -188,7 +188,17 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { } } dl.lock.RUnlock() - return dl.downloader.synchronise(id, hash, td, mode) + + // Synchronise with the chosen peer and ensure proper cleanup afterwards + err := dl.downloader.synchronise(id, hash, td, mode) + select { + case <-dl.downloader.cancelCh: + // Ok, downloader fully cancelled after sync cycle + default: + // Downloader is still accepting packets, can block a peer up + panic("downloader active post sync cycle") // panic will be caught by tester + } + return err } // hasHeader checks if a header is present in the testers canonical chain. From c046126c875f8c997b9f6b500b0489f74d23b9a4 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 2 Jun 2016 21:20:32 +0200 Subject: [PATCH 22/44] [release/1.4.6] internal/jsre: ensure Stop can be called more than once This makes "geth js file.js" terminate again. (cherry picked from commit fdba0cb03cf4e5a87cdcc2ebc0f381ad32f5ad3e) --- internal/jsre/jsre.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go index a95efd379dbcf..4813893047ce1 100644 --- a/internal/jsre/jsre.go +++ b/internal/jsre/jsre.go @@ -24,7 +24,6 @@ import ( "io" "io/ioutil" "math/rand" - "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -44,7 +43,7 @@ type JSRE struct { output io.Writer evalQueue chan *evalReq stopEventLoop chan bool - loopWg sync.WaitGroup + closed chan struct{} } // jsTimer is a single timer instance with a callback function @@ -66,10 +65,10 @@ func New(assetPath string, output io.Writer) *JSRE { re := &JSRE{ assetPath: assetPath, output: output, + closed: make(chan struct{}), evalQueue: make(chan *evalReq), stopEventLoop: make(chan bool), } - re.loopWg.Add(1) go re.runEventLoop() re.Set("loadScript", re.loadScript) re.Set("inspect", prettyPrintJS) @@ -98,6 +97,8 @@ func randomSource() *rand.Rand { // functions should be used if and only if running a routine that was already // called from JS through an RPC call. func (self *JSRE) runEventLoop() { + defer close(self.closed) + vm := otto.New() r := randomSource() vm.SetRandomSource(r.Float64) @@ -213,8 +214,6 @@ loop: timer.timer.Stop() delete(registry, timer) } - - self.loopWg.Done() } // Do executes the given function on the JS event loop. @@ -227,8 +226,11 @@ func (self *JSRE) Do(fn func(*otto.Otto)) { // stops the event loop before exit, optionally waits for all timers to expire func (self *JSRE) Stop(waitForCallbacks bool) { - self.stopEventLoop <- waitForCallbacks - self.loopWg.Wait() + select { + case <-self.closed: + case self.stopEventLoop <- waitForCallbacks: + <-self.closed + } } // Exec(file) loads and runs the contents of a file From 8b90a49f3d60a30f0e89d62ca573886e0e6b4558 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 2 Jun 2016 22:33:11 +0200 Subject: [PATCH 23/44] [release/1.4.6] console: remove unnecessary JS evaluation in Welcome (cherry picked from commit ad0e6e971e7d03c07842cc236fec09c73f93f465) --- console/console.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/console/console.go b/console/console.go index a19b267bcbcf5..baa9cf545729d 100644 --- a/console/console.go +++ b/console/console.go @@ -244,15 +244,13 @@ func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, str // console's available modules. func (c *Console) Welcome() { // Print some generic Geth metadata + fmt.Fprintf(c.printer, "Welcome to the Geth JavaScript console!\n\n") c.jsre.Run(` - (function () { - console.log("Welcome to the Geth JavaScript console!\n"); - console.log("instance: " + web3.version.node); - console.log("coinbase: " + eth.coinbase); - console.log("at block: " + eth.blockNumber + " (" + new Date(1000 * eth.getBlock(eth.blockNumber).timestamp) + ")"); - console.log(" datadir: " + admin.datadir); - })(); - `) + console.log("instance: " + web3.version.node); + console.log("coinbase: " + eth.coinbase); + console.log("at block: " + eth.blockNumber + " (" + new Date(1000 * eth.getBlock(eth.blockNumber).timestamp) + ")"); + console.log(" datadir: " + admin.datadir); + `) // List all the supported modules for the user to call if apis, err := c.client.SupportedModules(); err == nil { modules := make([]string, 0, len(apis)) @@ -260,9 +258,9 @@ func (c *Console) Welcome() { modules = append(modules, fmt.Sprintf("%s:%s", api, version)) } sort.Strings(modules) - c.jsre.Run("(function () { console.log(' modules: " + strings.Join(modules, " ") + "'); })();") + fmt.Fprintln(c.printer, " modules:", strings.Join(modules, " ")) } - c.jsre.Run("(function () { console.log(); })();") + fmt.Fprintln(c.printer) } // Evaluate executes code and pretty prints the result to the specified output From 3d69970c1555f8a730682039763d575c916fa5f6 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 2 Jun 2016 22:33:57 +0200 Subject: [PATCH 24/44] [release/1.4.6] cmd/geth: make console tests more robust * use --port 0 to avoid p2p port conflicts * use --maxpeers 0 so it doesn't connect to bootstrap nodes * use geth.expectExit() to wait for termination (cherry picked from commit b57b6e341e3865de3b3a5aa108e642d1a6a5b590) --- cmd/geth/consolecmd_test.go | 46 ++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 9cfb3e4e35bc8..e59fe1415b025 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -27,7 +27,6 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/rpc" ) @@ -37,9 +36,10 @@ func TestConsoleWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" // Start a geth console, make sure it's cleaned up and terminate the console - geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "-shh", "console") - defer geth.expectExit() - geth.stdin.Close() + geth := runGeth(t, + "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", + "--etherbase", coinbase, "--shh", + "console") // Gather all the infos the welcome message needs to contain geth.setTemplateFunc("goos", func() string { return runtime.GOOS }) @@ -51,7 +51,6 @@ func TestConsoleWelcome(t *testing.T) { sort.Strings(apis) return apis }) - geth.setTemplateFunc("prompt", func() string { return console.DefaultPrompt }) // Verify the actual welcome message to the required template geth.expect(` @@ -63,52 +62,63 @@ at block: 0 ({{niltime}}) datadir: {{.Datadir}} modules:{{range apis}} {{.}}:1.0{{end}} -{{prompt}} +> {{.InputLine "exit"}} `) + geth.expectExit() } // Tests that a console can be attached to a running node via various means. func TestIPCAttachWelcome(t *testing.T) { // Configure the instance for IPC attachement coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - var ipc string if runtime.GOOS == "windows" { ipc = `\\.\pipe\geth` + strconv.Itoa(rand.Int()) } else { ws := tmpdir(t) defer os.RemoveAll(ws) - ipc = filepath.Join(ws, "geth.ipc") } - // Run the parent geth and attach with a child console - geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "-shh", "--ipcpath", ipc) - defer geth.interrupt() + // Note: we need --shh because testAttachWelcome checks for default + // list of ipc modules and shh is included there. + geth := runGeth(t, + "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", + "--etherbase", coinbase, "--shh", "--ipcpath", ipc) time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open testAttachWelcome(t, geth, "ipc:"+ipc) + + geth.interrupt() + geth.expectExit() } func TestHTTPAttachWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P - - geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "--rpc", "--rpcport", port) - defer geth.interrupt() + geth := runGeth(t, + "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", + "--etherbase", coinbase, "--rpc", "--rpcport", port) time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open testAttachWelcome(t, geth, "http://localhost:"+port) + + geth.interrupt() + geth.expectExit() } func TestWSAttachWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P - geth := runGeth(t, "--nat", "none", "--nodiscover", "--etherbase", coinbase, "--ws", "--wsport", port) - defer geth.interrupt() + geth := runGeth(t, + "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", + "--etherbase", coinbase, "--ws", "--wsport", port) time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open testAttachWelcome(t, geth, "ws://localhost:"+port) + + geth.interrupt() + geth.expectExit() } func testAttachWelcome(t *testing.T, geth *testgeth, endpoint string) { @@ -135,7 +145,6 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint string) { sort.Strings(apis) return apis }) - attach.setTemplateFunc("prompt", func() string { return console.DefaultPrompt }) // Verify the actual welcome message to the required template attach.expect(` @@ -147,6 +156,7 @@ at block: 0 ({{niltime}}){{if ipc}} datadir: {{datadir}}{{end}} modules:{{range apis}} {{.}}:1.0{{end}} -{{prompt}} +> {{.InputLine "exit" }} `) + attach.expectExit() } From 44b912ec64b0d732a528e61ff6c899e3e51eddf7 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 2 Jun 2016 20:33:45 +0200 Subject: [PATCH 25/44] [release/1.4.6] core: add missing lock in TxPool.{GetTransaction,RemoveTx} Fixes #2650 (cherry picked from commit fc85dd175ebeef4996e5d370a7a2f085c922196d) --- core/tx_pool.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index f2eb2bbdd3a75..59635637702e1 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -368,6 +368,9 @@ func (self *TxPool) AddTransactions(txs []*types.Transaction) { // GetTransaction returns a transaction if it is contained in the pool // and nil otherwise. func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction { + tp.mu.RLock() + defer tp.mu.RUnlock() + // check the txs first if tx, ok := tp.pending[hash]; ok { return tx @@ -421,12 +424,18 @@ func (self *TxPool) RemoveTransactions(txs types.Transactions) { self.mu.Lock() defer self.mu.Unlock() for _, tx := range txs { - self.RemoveTx(tx.Hash()) + self.removeTx(tx.Hash()) } } // RemoveTx removes the transaction with the given hash from the pool. func (pool *TxPool) RemoveTx(hash common.Hash) { + pool.mu.Lock() + defer pool.mu.Unlock() + pool.removeTx(hash) +} + +func (pool *TxPool) removeTx(hash common.Hash) { // delete from pending pool delete(pool.pending, hash) // delete from queue From a29bdf547ceff7f8928c527964db50289d33de3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 2 Jun 2016 12:37:14 +0300 Subject: [PATCH 26/44] [release/1.4.6] eth/downloader: make fast sync resilient to critical section fails (cherry picked from commit 61ee9f299deb2bace8aa37778d248822793dd5ce) --- eth/downloader/downloader.go | 65 +++++++++++++++++++++------- eth/downloader/downloader_test.go | 70 +++++++++++++++++++++++++------ 2 files changed, 109 insertions(+), 26 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f6dbb4610f5d8..0bc8b4acf1440 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -73,6 +73,7 @@ var ( fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it fsPivotInterval = 512 // Number of headers out of which to randomize the pivot point fsMinFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync + fsCriticalTrials = 10 // Number of times to retry in the cricical section before bailing ) var ( @@ -103,13 +104,15 @@ var ( ) type Downloader struct { - mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) - noFast bool // Flag to disable fast syncing in case of a security error - mux *event.TypeMux // Event multiplexer to announce sync operation events + mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) + mux *event.TypeMux // Event multiplexer to announce sync operation events queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed + fsPivotLock *types.Header // Pivot header on critical section entry (cannot change between retries) + fsPivotFails int // Number of fast sync failures in the critical section + interrupt int32 // Atomic boolean to signal termination // Statistics @@ -314,6 +317,15 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode default: } } + for _, ch := range []chan dataPack{d.hashCh, d.blockCh, d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} { + for empty := false; !empty; { + select { + case <-ch: + default: + empty = true + } + } + } for empty := false; !empty; { select { case <-d.headerProcCh: @@ -330,7 +342,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode // Set the requested sync mode, unless it's forbidden d.mode = mode - if d.mode == FastSync && d.noFast { + if d.mode == FastSync && d.fsPivotFails >= fsCriticalTrials { d.mode = FullSync } // Retrieve the origin peer and initiate the downloading process @@ -413,12 +425,17 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e pivot = height case FastSync: // Calculate the new fast/slow sync pivot point - pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval))) - if err != nil { - panic(fmt.Sprintf("Failed to access crypto random source: %v", err)) - } - if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() { - pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64() + if d.fsPivotLock == nil { + pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval))) + if err != nil { + panic(fmt.Sprintf("Failed to access crypto random source: %v", err)) + } + if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() { + pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64() + } + } else { + // Pivot point locked in, use this and do not pick a new one! + pivot = d.fsPivotLock.Number.Uint64() } // If the point is below the origin, move origin back to ensure state download if pivot < origin { @@ -1218,8 +1235,12 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error { // If no more headers are inbound, notify the content fetchers and return if packet.Items() == 0 { glog.V(logger.Debug).Infof("%v: no available headers", p) - d.headerProcCh <- nil - return nil + select { + case d.headerProcCh <- nil: + return nil + case <-d.cancelCh: + return errCancelHeaderFetch + } } headers := packet.(*headerPack).headers @@ -1611,9 +1632,18 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, d.headFastBlock().Number(), lastBlock, d.headBlock().Number()) - // If we're already past the pivot point, this could be an attack, disable fast sync + // If we're already past the pivot point, this could be an attack, thread carefully if rollback[len(rollback)-1].Number.Uint64() > pivot { - d.noFast = true + // If we didn't ever fail, lock in te pivot header (must! not! change!) + if d.fsPivotFails == 0 { + for _, header := range rollback { + if header.Number.Uint64() == pivot { + glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]) + d.fsPivotLock = header + } + } + } + d.fsPivotFails++ } } }() @@ -1712,6 +1742,13 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) } } + // If we're fast syncing and just pulled in the pivot, make sure it's the one locked in + if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot { + if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() { + glog.V(logger.Warn).Infof("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]) + return errInvalidChain + } + } // Unless we're doing light chains, schedule the headers for associated content retrieval if d.mode == FullSync || d.mode == FastSync { // If we've reached the allowed number of pending headers, stall a bit diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 1cf0e7cd31913..f3a0e38f1ae50 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -149,22 +149,25 @@ type downloadTester struct { peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains + peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return + lock sync.RWMutex } // newTester creates a new downloader test mocker. func newTester() *downloadTester { tester := &downloadTester{ - ownHashes: []common.Hash{genesis.Hash()}, - ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, - ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, - ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil}, - ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, - peerHashes: make(map[string][]common.Hash), - peerHeaders: make(map[string]map[common.Hash]*types.Header), - peerBlocks: make(map[string]map[common.Hash]*types.Block), - peerReceipts: make(map[string]map[common.Hash]types.Receipts), - peerChainTds: make(map[string]map[common.Hash]*big.Int), + ownHashes: []common.Hash{genesis.Hash()}, + ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, + ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, + ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil}, + ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, + peerHashes: make(map[string][]common.Hash), + peerHeaders: make(map[string]map[common.Hash]*types.Header), + peerBlocks: make(map[string]map[common.Hash]*types.Block), + peerReceipts: make(map[string]map[common.Hash]types.Receipts), + peerChainTds: make(map[string]map[common.Hash]*big.Int), + peerMissingStates: make(map[string]map[common.Hash]bool), } tester.stateDb, _ = ethdb.NewMemDatabase() tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00}) @@ -408,6 +411,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha dl.peerBlocks[id] = make(map[common.Hash]*types.Block) dl.peerReceipts[id] = make(map[common.Hash]types.Receipts) dl.peerChainTds[id] = make(map[common.Hash]*big.Int) + dl.peerMissingStates[id] = make(map[common.Hash]bool) genesis := hashes[len(hashes)-1] if header := headers[genesis]; header != nil { @@ -648,7 +652,9 @@ func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func results := make([][]byte, 0, len(hashes)) for _, hash := range hashes { if data, err := testdb.Get(hash.Bytes()); err == nil { - results = append(results, data) + if !dl.peerMissingStates[id][hash] { + results = append(results, data) + } } } go dl.downloader.DeliverNodeData(id, results) @@ -1288,7 +1294,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts) missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - tester.downloader.noFast = false + tester.downloader.fsPivotFails = 0 tester.downloader.syncInitHook = func(uint64, uint64) { for i := missing; i <= len(hashes); i++ { delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i]) @@ -1307,6 +1313,8 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { t.Errorf("fast sync pivot block #%d not rolled back", head) } } + tester.downloader.fsPivotFails = fsCriticalTrials + // Synchronise with the valid peer and make sure sync succeeds. Since the last // rollback should also disable fast syncing for this process, verify that we // did a fresh full sync. Note, we can't assert anything about the receipts @@ -1749,3 +1757,41 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { } } } + +// Tests that if fast sync aborts in the critical section, it can restart a few +// times before giving up. +func TestFastCriticalRestarts63(t *testing.T) { testFastCriticalRestarts(t, 63) } +func TestFastCriticalRestarts64(t *testing.T) { testFastCriticalRestarts(t, 64) } + +func testFastCriticalRestarts(t *testing.T, protocol int) { + t.Parallel() + + // Create a large enough blockchin to actually fast sync on + targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15 + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) + + // Create a tester peer with the critical section state roots missing (force failures) + tester := newTester() + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) + + for i := 0; i < fsPivotInterval; i++ { + tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true + } + // Synchronise with the peer a few times and make sure they fail until the retry limit + for i := 0; i < fsCriticalTrials; i++ { + // Attempt a sync and ensure it fails properly + if err := tester.sync("peer", nil, FastSync); err == nil { + t.Fatalf("failing fast sync succeeded: %v", err) + } + // If it's the first failure, pivot should be locked => reenable all others to detect pivot changes + if i == 0 { + tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true} + } + time.Sleep(100 * time.Millisecond) // Make sure no in-flight requests remain + } + // Retry limit exhausted, downloader will switch to full sync, should succeed + if err := tester.sync("peer", nil, FastSync); err != nil { + t.Fatalf("failed to synchronise blocks in slow sync: %v", err) + } + assertOwnChain(t, tester, targetBlocks+1) +} From 058c5fe9602fd8d4db8d4cb84fa2ee5b8b1592f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 1 Jun 2016 18:07:25 +0300 Subject: [PATCH 27/44] [release/1.4.6] eth/downloader: adaptive quality of service tuning (cherry picked from commit 88f174a014c1f2f99fa6d6a8054ada28a0b43504) --- eth/downloader/downloader.go | 144 ++++++++++++++++++++++++------ eth/downloader/downloader_test.go | 58 +++++++++++- eth/downloader/peer.go | 81 +++++++++++++---- 3 files changed, 235 insertions(+), 48 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 0bc8b4acf1440..92124cfeb2dfb 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -54,14 +54,15 @@ var ( blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired - headerTargetRTT = time.Second // [eth/62] Target time for completing a header retrieval request (only for measurements for now) - headerTTL = 3 * time.Second // [eth/62] Time it takes for a header request to time out - bodyTargetRTT = 3 * time.Second / 2 // [eth/62] Target time for completing a block body retrieval request - bodyTTL = 3 * bodyTargetRTT // [eth/62] Maximum time allowance before a block body request is considered expired - receiptTargetRTT = 3 * time.Second / 2 // [eth/63] Target time for completing a receipt retrieval request - receiptTTL = 3 * receiptTargetRTT // [eth/63] Maximum time allowance before a receipt request is considered expired - stateTargetRTT = 2 * time.Second / 2 // [eth/63] Target time for completing a state trie retrieval request - stateTTL = 3 * stateTargetRTT // [eth/63] Maximum time allowance before a node data request is considered expired + rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests + rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests + rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value + ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion + ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts + + qosTuningPeers = 5 // Number of peers to tune based on (best peers) + qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence + qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value maxQueuedHashes = 32 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection) maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) @@ -113,7 +114,8 @@ type Downloader struct { fsPivotLock *types.Header // Pivot header on critical section entry (cannot change between retries) fsPivotFails int // Number of fast sync failures in the critical section - interrupt int32 // Atomic boolean to signal termination + rttEstimate uint64 // Round trip time to target for download requests + rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) // Statistics syncStatsChainOrigin uint64 // Origin block number where syncing started at @@ -159,6 +161,9 @@ type Downloader struct { cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers + quitCh chan struct{} // Quit channel to signal termination + quitLock sync.RWMutex // Lock to prevent double closes + // Testing hooks syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch @@ -172,11 +177,13 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader { - return &Downloader{ + dl := &Downloader{ mode: FullSync, mux: mux, queue: newQueue(stateDb), peers: newPeerSet(), + rttEstimate: uint64(rttMaxEstimate), + rttConfidence: uint64(1000000), hasHeader: hasHeader, hasBlockAndState: hasBlockAndState, getHeader: getHeader, @@ -203,7 +210,10 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha receiptWakeCh: make(chan bool, 1), stateWakeCh: make(chan bool, 1), headerProcCh: make(chan []*types.Header, 1), + quitCh: make(chan struct{}), } + go dl.qosTuner() + return dl } // Progress retrieves the synchronisation boundaries, specifically the origin @@ -250,6 +260,8 @@ func (d *Downloader) RegisterPeer(id string, version int, head common.Hash, glog.V(logger.Error).Infoln("Register failed:", err) return err } + d.qosReduceConfidence() + return nil } @@ -515,7 +527,16 @@ func (d *Downloader) cancel() { // Terminate interrupts the downloader, canceling all pending operations. // The downloader cannot be reused after calling Terminate. func (d *Downloader) Terminate() { - atomic.StoreInt32(&d.interrupt, 1) + // Close the termination channel (make sure double close is allowed) + d.quitLock.Lock() + select { + case <-d.quitCh: + default: + close(d.quitCh) + } + d.quitLock.Unlock() + + // Cancel any pending download requests d.cancel() } @@ -932,7 +953,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // Reserve a chunk of hashes for a peer. A nil can mean either that // no more hashes are available, or that the peer is known not to // have them. - request := d.queue.ReserveBlocks(peer, peer.BlockCapacity()) + request := d.queue.ReserveBlocks(peer, peer.BlockCapacity(blockTargetRTT)) if request == nil { continue } @@ -973,7 +994,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) { // Request the advertised remote head block and wait for the response go p.getRelHeaders(p.head, 1, 0, false) - timeout := time.After(headerTTL) + timeout := time.After(d.requestTTL()) for { select { case <-d.cancelCh: @@ -1041,7 +1062,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { // Wait for the remote response to the head fetch number, hash := uint64(0), common.Hash{} - timeout := time.After(hashTTL) + timeout := time.After(d.requestTTL()) for finished := false; !finished; { select { @@ -1118,7 +1139,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { // Split our chain interval in two, and request the hash to cross check check := (start + end) / 2 - timeout := time.After(hashTTL) + timeout := time.After(d.requestTTL()) go p.getAbsHeaders(uint64(check), 1, 0, false) // Wait until a reply arrives to this request @@ -1199,7 +1220,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error { getHeaders := func(from uint64) { request = time.Now() - timeout.Reset(headerTTL) + timeout.Reset(d.requestTTL()) if skeleton { glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from) @@ -1311,13 +1332,13 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ( pack := packet.(*headerPack) return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) } - expire = func() map[string]int { return d.queue.ExpireHeaders(headerTTL) } + expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } throttle = func() bool { return false } reserve = func(p *peer, count int) (*fetchRequest, bool, error) { return d.queue.ReserveHeaders(p, count), false, nil } fetch = func(p *peer, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } - capacity = func(p *peer) int { return p.HeaderCapacity() } + capacity = func(p *peer) int { return p.HeaderCapacity(d.requestRTT()) } setIdle = func(p *peer, accepted int) { p.SetHeadersIdle(accepted) } ) err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, @@ -1341,9 +1362,9 @@ func (d *Downloader) fetchBodies(from uint64) error { pack := packet.(*bodyPack) return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) } - expire = func() map[string]int { return d.queue.ExpireBodies(bodyTTL) } + expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) } - capacity = func(p *peer) int { return p.BlockCapacity() } + capacity = func(p *peer) int { return p.BlockCapacity(d.requestRTT()) } setIdle = func(p *peer, accepted int) { p.SetBodiesIdle(accepted) } ) err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, @@ -1365,9 +1386,9 @@ func (d *Downloader) fetchReceipts(from uint64) error { pack := packet.(*receiptPack) return d.queue.DeliverReceipts(pack.peerId, pack.receipts) } - expire = func() map[string]int { return d.queue.ExpireReceipts(receiptTTL) } + expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) } - capacity = func(p *peer) int { return p.ReceiptCapacity() } + capacity = func(p *peer) int { return p.ReceiptCapacity(d.requestRTT()) } setIdle = func(p *peer, accepted int) { p.SetReceiptsIdle(accepted) } ) err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, @@ -1417,13 +1438,13 @@ func (d *Downloader) fetchNodeData() error { } }) } - expire = func() map[string]int { return d.queue.ExpireNodeData(stateTTL) } + expire = func() map[string]int { return d.queue.ExpireNodeData(d.requestTTL()) } throttle = func() bool { return false } reserve = func(p *peer, count int) (*fetchRequest, bool, error) { return d.queue.ReserveNodeData(p, count), false, nil } fetch = func(p *peer, req *fetchRequest) error { return p.FetchNodeData(req) } - capacity = func(p *peer) int { return p.NodeDataCapacity() } + capacity = func(p *peer) int { return p.NodeDataCapacity(d.requestRTT()) } setIdle = func(p *peer, accepted int) { p.SetNodeDataIdle(accepted) } ) err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire, @@ -1799,8 +1820,10 @@ func (d *Downloader) processContent() error { } for len(results) != 0 { // Check for any termination requests - if atomic.LoadInt32(&d.interrupt) == 1 { + select { + case <-d.quitCh: return errCancelContentProcessing + default: } // Retrieve the a batch of results to import var ( @@ -1901,3 +1924,74 @@ func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, i return errNoSyncActive } } + +// qosTuner is the quality of service tuning loop that occasionally gathers the +// peer latency statistics and updates the estimated request round trip time. +func (d *Downloader) qosTuner() { + for { + // Retrieve the current median RTT and integrate into the previoust target RTT + rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) + atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) + + // A new RTT cycle passed, increase our confidence in the estimated RTT + conf := atomic.LoadUint64(&d.rttConfidence) + conf = conf + (1000000-conf)/2 + atomic.StoreUint64(&d.rttConfidence, conf) + + // Log the new QoS values and sleep until the next RTT + glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()) + select { + case <-d.quitCh: + return + case <-time.After(rtt): + } + } +} + +// qosReduceConfidence is meant to be called when a new peer joins the downloader's +// peer set, needing to reduce the confidence we have in out QoS estimates. +func (d *Downloader) qosReduceConfidence() { + // If we have a single peer, confidence is always 1 + peers := uint64(d.peers.Len()) + if peers == 1 { + atomic.StoreUint64(&d.rttConfidence, 1000000) + return + } + // If we have a ton of peers, don't drop confidence) + if peers >= uint64(qosConfidenceCap) { + return + } + // Otherwise drop the confidence factor + conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers + if float64(conf)/1000000 < rttMinConfidence { + conf = uint64(rttMinConfidence * 1000000) + } + atomic.StoreUint64(&d.rttConfidence, conf) + + rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) + glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()) +} + +// requestRTT returns the current target round trip time for a download request +// to complete in. +// +// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that +// the downloader tries to adapt queries to the RTT, so multiple RTT values can +// be adapted to, but smaller ones are preffered (stabler download stream). +func (d *Downloader) requestRTT() time.Duration { + return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 +} + +// requestTTL returns the current timeout allowance for a single download request +// to finish under. +func (d *Downloader) requestTTL() time.Duration { + var ( + rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) + conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 + ) + ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) + if ttl > ttlLimit { + ttl = ttlLimit + } + return ttl +} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index f3a0e38f1ae50..a9c069a926bc6 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -179,6 +179,12 @@ func newTester() *downloadTester { return tester } +// terminate aborts any operations on the embedded downloader and releases all +// held resources. +func (dl *downloadTester) terminate() { + dl.downloader.Terminate() +} + // sync starts synchronizing with a remote peer, blocking until it completes. func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { dl.lock.RLock() @@ -740,6 +746,8 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Synchronise with the peer and make sure all relevant data was retrieved @@ -764,6 +772,8 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Wrap the importer to allow stepping @@ -851,6 +861,8 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) { hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true) tester := newTester() + defer tester.terminate() + tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) @@ -885,6 +897,8 @@ func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false) tester := newTester() + defer tester.terminate() + tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA) tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB) @@ -934,6 +948,8 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true) tester := newTester() + defer tester.terminate() + tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB) @@ -968,6 +984,8 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false) tester := newTester() + defer tester.terminate() + tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit @@ -987,7 +1005,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { // bodies. func TestInactiveDownloader62(t *testing.T) { t.Parallel() + tester := newTester() + defer tester.terminate() // Check that neither block headers nor bodies are accepted if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { @@ -1002,7 +1022,9 @@ func TestInactiveDownloader62(t *testing.T) { // bodies and receipts. func TestInactiveDownloader63(t *testing.T) { t.Parallel() + tester := newTester() + defer tester.terminate() // Check that neither block headers nor bodies are accepted if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { @@ -1039,6 +1061,8 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Make sure canceling works with a pristine downloader @@ -1074,6 +1098,8 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() + for i := 0; i < targetPeers; i++ { id := fmt.Sprintf("peer #%d", i) tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts) @@ -1103,6 +1129,8 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { // Create peers of every type tester := newTester() + defer tester.terminate() + tester.newPeer("peer 61", 61, hashes, nil, blocks, nil) tester.newPeer("peer 62", 62, hashes, headers, blocks, nil) tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts) @@ -1140,6 +1168,8 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Instrument the downloader to signal body requests @@ -1193,6 +1223,7 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() // Attempt a full sync with an attacker feeding gapped headers tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) @@ -1225,6 +1256,7 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() // Attempt a full sync with an attacker feeding shifted headers tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) @@ -1256,6 +1288,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) tester := newTester() + defer tester.terminate() // Attempt to sync with an attacker that feeds junk during the fast sync phase. // This should result in the last fsHeaderSafetyNet headers being rolled back. @@ -1347,9 +1380,11 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { t.Parallel() tester := newTester() - hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false) + defer tester.terminate() + hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false) tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts) + if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } @@ -1392,6 +1427,8 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { } // Run the tests and check disconnection status tester := newTester() + defer tester.terminate() + for i, tt := range tests { // Register a new peer and ensure it's presence id := fmt.Sprintf("test %d", i) @@ -1433,6 +1470,8 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { progress := make(chan struct{}) tester := newTester() + defer tester.terminate() + tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -1505,6 +1544,8 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { progress := make(chan struct{}) tester := newTester() + defer tester.terminate() + tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -1580,6 +1621,8 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { progress := make(chan struct{}) tester := newTester() + defer tester.terminate() + tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -1656,6 +1699,8 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { progress := make(chan struct{}) tester := newTester() + defer tester.terminate() + tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -1742,7 +1787,7 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { impl := tester.peerGetAbsHeadersFn("peer", 0) go impl(from, count, skip, reverse) // None of the extra deliveries should block. - timeout := time.After(5 * time.Second) + timeout := time.After(15 * time.Second) for i := 0; i < cap(deliveriesDone); i++ { select { case <-deliveriesDone: @@ -1755,6 +1800,7 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { if err := tester.sync("peer", nil, mode); err != nil { t.Errorf("sync failed: %v", err) } + tester.terminate() } } @@ -1772,8 +1818,9 @@ func testFastCriticalRestarts(t *testing.T, protocol int) { // Create a tester peer with the critical section state roots missing (force failures) tester := newTester() - tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) + defer tester.terminate() + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) for i := 0; i < fsPivotInterval; i++ { tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true } @@ -1783,11 +1830,14 @@ func testFastCriticalRestarts(t *testing.T, protocol int) { if err := tester.sync("peer", nil, FastSync); err == nil { t.Fatalf("failing fast sync succeeded: %v", err) } + time.Sleep(500 * time.Millisecond) // Make sure no in-flight requests remain + // If it's the first failure, pivot should be locked => reenable all others to detect pivot changes if i == 0 { + tester.lock.Lock() tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true} + tester.lock.Unlock() } - time.Sleep(100 * time.Millisecond) // Make sure no in-flight requests remain } // Retry limit exhausted, downloader will switch to full sync, should succeed if err := tester.sync("peer", nil, FastSync); err != nil { diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 6aab907d7e135..94d44fca46502 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -23,6 +23,8 @@ import ( "errors" "fmt" "math" + "sort" + "strings" "sync" "sync/atomic" "time" @@ -31,8 +33,8 @@ import ( ) const ( - maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items - throughputImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. + maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items + measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. ) // Hash and block fetchers belonging to eth/61 and below @@ -68,6 +70,8 @@ type peer struct { receiptThroughput float64 // Number of receipts measured to be retrievable per second stateThroughput float64 // Number of node data pieces measured to be retrievable per second + rtt time.Duration // Request round trip time to track responsiveness (QoS) + headerStarted time.Time // Time instance when the last header fetch was started blockStarted time.Time // Time instance when the last block (body) fetch was started receiptStarted time.Time // Time instance when the last receipt fetch was started @@ -290,44 +294,47 @@ func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, id return } // Otherwise update the throughput with a new measurement - measured := float64(delivered) / (float64(time.Since(started)+1) / float64(time.Second)) // +1 (ns) to ensure non-zero divisor - *throughput = (1-throughputImpact)*(*throughput) + throughputImpact*measured + elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor + measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) + + *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured + p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) } // HeaderCapacity retrieves the peers header download allowance based on its // previously discovered throughput. -func (p *peer) HeaderCapacity() int { +func (p *peer) HeaderCapacity(targetRTT time.Duration) int { p.lock.RLock() defer p.lock.RUnlock() - return int(math.Max(1, math.Min(p.headerThroughput*float64(headerTargetRTT)/float64(time.Second), float64(MaxHeaderFetch)))) + return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) } // BlockCapacity retrieves the peers block download allowance based on its // previously discovered throughput. -func (p *peer) BlockCapacity() int { +func (p *peer) BlockCapacity(targetRTT time.Duration) int { p.lock.RLock() defer p.lock.RUnlock() - return int(math.Max(1, math.Min(p.blockThroughput*float64(blockTargetRTT)/float64(time.Second), float64(MaxBlockFetch)))) + return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) } // ReceiptCapacity retrieves the peers receipt download allowance based on its // previously discovered throughput. -func (p *peer) ReceiptCapacity() int { +func (p *peer) ReceiptCapacity(targetRTT time.Duration) int { p.lock.RLock() defer p.lock.RUnlock() - return int(math.Max(1, math.Min(p.receiptThroughput*float64(receiptTargetRTT)/float64(time.Second), float64(MaxReceiptFetch)))) + return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) } // NodeDataCapacity retrieves the peers state download allowance based on its // previously discovered throughput. -func (p *peer) NodeDataCapacity() int { +func (p *peer) NodeDataCapacity(targetRTT time.Duration) int { p.lock.RLock() defer p.lock.RUnlock() - return int(math.Max(1, math.Min(p.stateThroughput*float64(stateTargetRTT)/float64(time.Second), float64(MaxStateFetch)))) + return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) } // MarkLacking appends a new entity to the set of items (blocks, receipts, states) @@ -361,13 +368,14 @@ func (p *peer) String() string { p.lock.RLock() defer p.lock.RUnlock() - return fmt.Sprintf("Peer %s [%s]", p.id, - fmt.Sprintf("headers %3.2f/s, ", p.headerThroughput)+ - fmt.Sprintf("blocks %3.2f/s, ", p.blockThroughput)+ - fmt.Sprintf("receipts %3.2f/s, ", p.receiptThroughput)+ - fmt.Sprintf("states %3.2f/s, ", p.stateThroughput)+ - fmt.Sprintf("lacking %4d", len(p.lacking)), - ) + return fmt.Sprintf("Peer %s [%s]", p.id, strings.Join([]string{ + fmt.Sprintf("hs %3.2f/s", p.headerThroughput), + fmt.Sprintf("bs %3.2f/s", p.blockThroughput), + fmt.Sprintf("rs %3.2f/s", p.receiptThroughput), + fmt.Sprintf("ss %3.2f/s", p.stateThroughput), + fmt.Sprintf("miss %4d", len(p.lacking)), + fmt.Sprintf("rtt %v", p.rtt), + }, ", ")) } // peerSet represents the collection of active peer participating in the chain @@ -402,6 +410,10 @@ func (ps *peerSet) Reset() { // average of all existing peers, to give it a realistic chance of being used // for data retrievals. func (ps *peerSet) Register(p *peer) error { + // Retrieve the current median RTT as a sane default + p.rtt = ps.medianRTT() + + // Register the new peer with some meaningful defaults ps.lock.Lock() defer ps.lock.Unlock() @@ -564,3 +576,34 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) } return idle, total } + +// medianRTT returns the median RTT of te peerset, considering only the tuning +// peers if there are more peers available. +func (ps *peerSet) medianRTT() time.Duration { + // Gather all the currnetly measured round trip times + ps.lock.RLock() + defer ps.lock.RUnlock() + + rtts := make([]float64, 0, len(ps.peers)) + for _, p := range ps.peers { + p.lock.RLock() + rtts = append(rtts, float64(p.rtt)) + p.lock.RUnlock() + } + sort.Float64s(rtts) + + median := rttMaxEstimate + if qosTuningPeers <= len(rtts) { + median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers + } else if len(rtts) > 0 { + median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) + } + // Restrict the RTT into some QoS defaults, irrelevant of true RTT + if median < rttMinEstimate { + median = rttMinEstimate + } + if median > rttMaxEstimate { + median = rttMaxEstimate + } + return median +} From ecb8e23e882362fddf46bbf9cf1da4e8e4271fa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 2 Jun 2016 15:54:07 +0300 Subject: [PATCH 28/44] [release/1.4.6] eth: don't accept transactions until we sync up with the network (cherry picked from commit 32559ccad1e0519ce1decc4b90df021fe215a811) --- eth/handler.go | 14 ++++++++++---- eth/protocol_test.go | 1 + eth/sync.go | 2 ++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index 58869a2eec2f8..1e4dc1289eea5 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -59,7 +59,9 @@ type blockFetcherFn func([]common.Hash) error type ProtocolManager struct { networkId int - fastSync uint32 + fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) + synced uint32 // Flag whether we're considered synchronised (enables transaction processing) + txpool txPool blockchain *core.BlockChain chaindb ethdb.Database @@ -161,7 +163,11 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, heighter := func() uint64 { return blockchain.CurrentBlock().NumberU64() } - manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, manager.insertChain, manager.removePeer) + inserter := func(blocks types.Blocks) (int, error) { + atomic.StoreUint32(&manager.synced, 1) // Mark initial sync done on any fetcher import + return manager.insertChain(blocks) + } + manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer) if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 { glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled") @@ -698,8 +704,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } case msg.Code == TxMsg: - // Transactions arrived, make sure we have a valid chain to handle them - if atomic.LoadUint32(&pm.fastSync) == 1 { + // Transactions arrived, make sure we have a valid and fresh chain to handle them + if atomic.LoadUint32(&pm.synced) == 0 { break } // Transactions can be processed, parse all of them and deliver to the pool diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 0a82e2e7955d2..f860d0a35afb8 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -97,6 +97,7 @@ func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) } func testRecvTransactions(t *testing.T, protocol int) { txAdded := make(chan []*types.Transaction) pm := newTestProtocolManagerMust(t, false, 0, nil, txAdded) + pm.synced = 1 // mark synced to accept transactions p, _ := newTestPeer("peer", protocol, pm, true) defer pm.Stop() defer p.close() diff --git a/eth/sync.go b/eth/sync.go index 4b16c1322668b..52f7e90e7b7a3 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -174,6 +174,8 @@ func (pm *ProtocolManager) synchronise(peer *peer) { if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil { return } + atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done + // If fast sync was enabled, and we synced up, disable it if atomic.LoadUint32(&pm.fastSync) == 1 { // Disable fast sync if we indeed have something in our chain From 71a89b7c751b96b98a91641090b06a51e1b95f72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 6 Jun 2016 16:23:33 +0300 Subject: [PATCH 29/44] VERSION, cmd/geth: bumped version 1.4.6 --- VERSION | 2 +- cmd/geth/main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index e516bb9d963a0..c514bd85c2ece 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.4.5 +1.4.6 diff --git a/cmd/geth/main.go b/cmd/geth/main.go index cf7d6944e19ac..2639147c44868 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -50,7 +50,7 @@ const ( clientIdentifier = "Geth" // Client identifier to advertise over the network versionMajor = 1 // Major version component of the current release versionMinor = 4 // Minor version component of the current release - versionPatch = 5 // Patch version component of the current release + versionPatch = 6 // Patch version component of the current release versionMeta = "stable" // Version metadata to append to the version string versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle From 688fbab5d52d9c8bc6d73abdf83839aa7358c23e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 8 Jun 2016 13:12:15 +0300 Subject: [PATCH 30/44] [release/1.4.7] console: fix windows color transformation issue (cherry picked from commit d251d48439bf85d0fb0b52dd931e5ec6aea8b412) --- console/console.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/console/console.go b/console/console.go index baa9cf545729d..ab0c1ea5855d4 100644 --- a/console/console.go +++ b/console/console.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/internal/jsre" "github.com/ethereum/go-ethereum/internal/web3ext" "github.com/ethereum/go-ethereum/rpc" + "github.com/mattn/go-colorable" "github.com/peterh/liner" "github.com/robertkrimen/otto" ) @@ -80,7 +81,7 @@ func New(config Config) (*Console, error) { config.Prompt = DefaultPrompt } if config.Printer == nil { - config.Printer = os.Stdout + config.Printer = colorable.NewColorableStdout() } // Initialize the console and return console := &Console{ From 7fb72dbcbf5e0c7fbd9c5f132e9236a49c415861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 8 Jun 2016 13:53:07 +0300 Subject: [PATCH 31/44] [release/1.4.7] cmd/geth: truly randomize console test RPC endpoints (cherry picked from commit 32258af87be39994baca95aa92cb5565ee69571a) --- cmd/geth/consolecmd_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index e59fe1415b025..e0e549e1242dc 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -17,7 +17,8 @@ package main import ( - "math/rand" + "crypto/rand" + "math/big" "os" "path/filepath" "runtime" @@ -73,7 +74,7 @@ func TestIPCAttachWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" var ipc string if runtime.GOOS == "windows" { - ipc = `\\.\pipe\geth` + strconv.Itoa(rand.Int()) + ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999)) } else { ws := tmpdir(t) defer os.RemoveAll(ws) @@ -94,7 +95,7 @@ func TestIPCAttachWelcome(t *testing.T) { func TestHTTPAttachWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P + port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P geth := runGeth(t, "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--etherbase", coinbase, "--rpc", "--rpcport", port) @@ -108,7 +109,7 @@ func TestHTTPAttachWelcome(t *testing.T) { func TestWSAttachWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P + port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P geth := runGeth(t, "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", @@ -160,3 +161,10 @@ at block: 0 ({{niltime}}){{if ipc}} `) attach.expectExit() } + +// trulyRandInt generates a crypto random integer used by the console tests to +// not clash network ports with other tests running cocurrently. +func trulyRandInt(lo, hi int) int { + num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo))) + return int(num.Int64()) + lo +} From a93d63d5767cca58dd14bce7f659de099ee561c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 8 Jun 2016 17:00:18 +0300 Subject: [PATCH 32/44] [release/1.4.7] cmd/geth: fix the keystore path in the accounts help text (cherry picked from commit 7c0eb47dfb957c045361cf7297b2935fd3e6a25a) --- cmd/geth/accountcmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go index 0f9d95c2c5712..2c2308514f0bc 100644 --- a/cmd/geth/accountcmd.go +++ b/cmd/geth/accountcmd.go @@ -70,7 +70,7 @@ either new or import). Without it you are not able to unlock your account. Note that exporting your key in unencrypted format is NOT supported. -Keys are stored under /keys. +Keys are stored under /keystore. It is safe to transfer the entire directory or the individual keys therein between ethereum nodes by simply copying. Make sure you backup your keys regularly. From 71b577f83953838d79cf78b77ab0d9d671bd5edf Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Wed, 8 Jun 2016 12:17:38 +0200 Subject: [PATCH 33/44] [release/1.4.7] console: ignore round and curly brackets in strings when determining indentation level (cherry picked from commit dbcdf83ed8aca3f0b84d67b944fff2f3a8bc7769) --- console/console.go | 47 +++++++++++++++++++++++++++++++++++++++-- console/console_test.go | 46 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 2 deletions(-) diff --git a/console/console.go b/console/console.go index ab0c1ea5855d4..00d1fea1d8525 100644 --- a/console/console.go +++ b/console/console.go @@ -331,11 +331,11 @@ func (c *Console) Interactive() { // Append the line to the input and check for multi-line interpretation input += line + "\n" - indents = strings.Count(input, "{") + strings.Count(input, "(") - strings.Count(input, "}") - strings.Count(input, ")") + indents = countIndents(input) if indents <= 0 { prompt = c.prompt } else { - prompt = strings.Repeat("..", indents*2) + " " + prompt = strings.Repeat(".", indents*3) + " " } // If all the needed lines are present, save the command and run if indents <= 0 { @@ -354,6 +354,49 @@ func (c *Console) Interactive() { } } +// countIndents returns the number of identations for the given input. +// In case of invalid input such as var a = } the result can be negative. +func countIndents(input string) int { + var ( + indents = 0 + inString = false + strOpenChar = ' ' // keep track of the string open char to allow var str = "I'm ...."; + charEscaped = false // keep track if the previous char was the '\' char, allow var str = "abc\"def"; + ) + + for _, c := range input { + switch c { + case '\\': + // indicate next char as escaped when in string and previous char isn't escaping this backslash + if !charEscaped && inString { + charEscaped = true + } + case '\'', '"': + if inString && !charEscaped && strOpenChar == c { // end string + inString = false + } else if !inString && !charEscaped { // begin string + inString = true + strOpenChar = c + } + charEscaped = false + case '{', '(': + if !inString { // ignore brackets when in string, allow var str = "a{"; without indenting + indents++ + } + charEscaped = false + case '}', ')': + if !inString { + indents-- + } + charEscaped = false + default: + charEscaped = false + } + } + + return indents +} + // Execute runs the JavaScript file specified as the argument. func (c *Console) Execute(path string) error { return c.jsre.Exec(path) diff --git a/console/console_test.go b/console/console_test.go index 91108782424c1..7738d0c44208b 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -294,3 +294,49 @@ func TestPrettyError(t *testing.T) { t.Fatalf("pretty error mismatch: have %s, want %s", output, want) } } + +// Tests that tests if the number of indents for JS input is calculated correct. +func TestIndenting(t *testing.T) { + testCases := []struct { + input string + expectedIndentCount int + }{ + {`var a = 1;`, 0}, + {`"some string"`, 0}, + {`"some string with (parentesis`, 0}, + {`"some string with newline + ("`, 0}, + {`function v(a,b) {}`, 0}, + {`function f(a,b) { var str = "asd("; };`, 0}, + {`function f(a) {`, 1}, + {`function f(a, function(b) {`, 2}, + {`function f(a, function(b) { + var str = "a)}"; + });`, 0}, + {`function f(a,b) { + var str = "a{b(" + a, ", " + b; + }`, 0}, + {`var str = "\"{"`, 0}, + {`var str = "'("`, 0}, + {`var str = "\\{"`, 0}, + {`var str = "\\\\{"`, 0}, + {`var str = 'a"{`, 0}, + {`var obj = {`, 1}, + {`var obj = { {a:1`, 2}, + {`var obj = { {a:1}`, 1}, + {`var obj = { {a:1}, b:2}`, 0}, + {`var obj = {}`, 0}, + {`var obj = { + a: 1, b: 2 + }`, 0}, + {`var test = }`, -1}, + {`var str = "a\""; var obj = {`, 1}, + } + + for i, tt := range testCases { + counted := countIndents(tt.input) + if counted != tt.expectedIndentCount { + t.Errorf("test %d: invalid indenting: have %d, want %d", i, counted, tt.expectedIndentCount) + } + } +} From 39f1d909d185816bade35c7336364fe5e842c7c5 Mon Sep 17 00:00:00 2001 From: Thomas Bocek Date: Thu, 2 Jun 2016 18:43:27 +0200 Subject: [PATCH 34/44] [release/1.4.7] accounts/abi: Negative numbers not properly converted in ABI encoding When converting a negative number e.g., -2, the resulting ABI encoding should look as follows: fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe. However, since the check of the type is for an uint instead of an int, it results in the following ABI encoding: 0101010101010101010101010101010101010101010101010101010101010102. The Ethereum ABI (https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) says, that signed integers are stored in two's complement which should be of the form ffffff.... and not 01010101..... for e.g. -1. Thus, I removed the type check in numbers.go as well as the function S256 as I don't think they are correct. Or maybe I'm missing something? (cherry picked from commit 89c6c5bb85ff24c152218f245fa366e733c951a7) --- accounts/abi/method.go | 2 +- accounts/abi/numbers.go | 42 ++++-------------------------------- accounts/abi/numbers_test.go | 26 +++------------------- accounts/abi/packing.go | 4 ++-- 4 files changed, 10 insertions(+), 64 deletions(-) diff --git a/accounts/abi/method.go b/accounts/abi/method.go index f3d1a44b5558c..d56f3bc3dd93a 100644 --- a/accounts/abi/method.go +++ b/accounts/abi/method.go @@ -62,7 +62,7 @@ func (m Method) pack(method Method, args ...interface{}) ([]byte, error) { // calculate the offset offset := len(method.Inputs)*32 + len(variableInput) // set the offset - ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...) + ret = append(ret, packNum(reflect.ValueOf(offset))...) // Append the packed output to the variable input. The variable input // will be appended at the end of the input. variableInput = append(variableInput, packed...) diff --git a/accounts/abi/numbers.go b/accounts/abi/numbers.go index 5a31cf2b5b963..06c4422f93d23 100644 --- a/accounts/abi/numbers.go +++ b/accounts/abi/numbers.go @@ -61,54 +61,20 @@ func U256(n *big.Int) []byte { return common.LeftPadBytes(common.U256(n).Bytes(), 32) } -func S256(n *big.Int) []byte { - sint := common.S256(n) - ret := common.LeftPadBytes(sint.Bytes(), 32) - if sint.Cmp(common.Big0) < 0 { - for i, b := range ret { - if b == 0 { - ret[i] = 1 - continue - } - break - } - } - - return ret -} - // S256 will ensure signed 256bit on big nums func U2U256(n uint64) []byte { return U256(big.NewInt(int64(n))) } -func S2S256(n int64) []byte { - return S256(big.NewInt(n)) -} - // packNum packs the given number (using the reflect value) and will cast it to appropriate number representation -func packNum(value reflect.Value, to byte) []byte { +func packNum(value reflect.Value) []byte { switch kind := value.Kind(); kind { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if to == UintTy { - return U2U256(value.Uint()) - } else { - return S2S256(int64(value.Uint())) - } + return U2U256(value.Uint()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if to == UintTy { - return U2U256(uint64(value.Int())) - } else { - return S2S256(value.Int()) - } + return U2U256(uint64(value.Int())) case reflect.Ptr: - // This only takes care of packing and casting. No type checking is done here. It should be done prior to using this function. - if to == UintTy { - return U256(value.Interface().(*big.Int)) - } else { - return S256(value.Interface().(*big.Int)) - } - + return U256(value.Interface().(*big.Int)) } return nil diff --git a/accounts/abi/numbers_test.go b/accounts/abi/numbers_test.go index d66a43258bb0d..f409aa60fb79c 100644 --- a/accounts/abi/numbers_test.go +++ b/accounts/abi/numbers_test.go @@ -26,48 +26,28 @@ import ( func TestNumberTypes(t *testing.T) { ubytes := make([]byte, 32) ubytes[31] = 1 - sbytesmin := []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} unsigned := U256(big.NewInt(1)) if !bytes.Equal(unsigned, ubytes) { t.Errorf("expected %x got %x", ubytes, unsigned) } - - signed := S256(big.NewInt(1)) - if !bytes.Equal(signed, ubytes) { - t.Errorf("expected %x got %x", ubytes, unsigned) - } - - signed = S256(big.NewInt(-1)) - if !bytes.Equal(signed, sbytesmin) { - t.Errorf("expected %x got %x", ubytes, unsigned) - } } func TestPackNumber(t *testing.T) { ubytes := make([]byte, 32) ubytes[31] = 1 - sbytesmin := []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} maxunsigned := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} - packed := packNum(reflect.ValueOf(1), IntTy) - if !bytes.Equal(packed, ubytes) { - t.Errorf("expected %x got %x", ubytes, packed) - } - packed = packNum(reflect.ValueOf(-1), IntTy) - if !bytes.Equal(packed, sbytesmin) { - t.Errorf("expected %x got %x", ubytes, packed) - } - packed = packNum(reflect.ValueOf(1), UintTy) + packed := packNum(reflect.ValueOf(1)) if !bytes.Equal(packed, ubytes) { t.Errorf("expected %x got %x", ubytes, packed) } - packed = packNum(reflect.ValueOf(-1), UintTy) + packed = packNum(reflect.ValueOf(-1)) if !bytes.Equal(packed, maxunsigned) { t.Errorf("expected %x got %x", maxunsigned, packed) } - packed = packNum(reflect.ValueOf("string"), UintTy) + packed = packNum(reflect.ValueOf("string")) if packed != nil { t.Errorf("expected 'string' to pack to nil. got %x instead", packed) } diff --git a/accounts/abi/packing.go b/accounts/abi/packing.go index c765dfdf30285..0c37edf17426c 100644 --- a/accounts/abi/packing.go +++ b/accounts/abi/packing.go @@ -25,7 +25,7 @@ import ( // packBytesSlice packs the given bytes as [L, V] as the canonical representation // bytes slice func packBytesSlice(bytes []byte, l int) []byte { - len := packNum(reflect.ValueOf(l), UintTy) + len := packNum(reflect.ValueOf(l)) return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...) } @@ -34,7 +34,7 @@ func packBytesSlice(bytes []byte, l int) []byte { func packElement(t Type, reflectValue reflect.Value) []byte { switch t.T { case IntTy, UintTy: - return packNum(reflectValue, t.T) + return packNum(reflectValue) case StringTy: return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len()) case AddressTy: From c89fa789b79bdbe25c703aa3d3bf753070682db7 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Thu, 9 Jun 2016 11:44:42 +0200 Subject: [PATCH 35/44] [release/1.4.7] cmd/geth: codegansta/cli package renamed to urfave/cli (cherry picked from commit 861add3d72bcfc6c6a8976eb82dc3e7b5288883e) --- Godeps/Godeps.json | 15 +- .../cli/autocomplete/bash_autocomplete | 14 - .../cli/autocomplete/zsh_autocomplete | 5 - .../src/github.com/rs/xhandler/.travis.yml | 7 - .../src/github.com/rs/xhandler/LICENSE | 19 -- .../src/github.com/rs/xhandler/README.md | 134 -------- .../src/github.com/rs/xhandler/chain.go | 93 ------ .../src/github.com/rs/xhandler/middleware.go | 59 ---- .../src/github.com/rs/xhandler/xhandler.go | 42 --- .../urfave/cli.v1}/.travis.yml | 10 +- .../src/gopkg.in/urfave/cli.v1/CHANGELOG.md | 310 ++++++++++++++++++ .../cli => gopkg.in/urfave/cli.v1}/LICENSE | 0 .../cli => gopkg.in/urfave/cli.v1}/README.md | 291 ++++++++++++++-- .../cli => gopkg.in/urfave/cli.v1}/app.go | 225 ++++++++++--- .../urfave/cli.v1}/appveyor.yml | 0 .../src/gopkg.in/urfave/cli.v1/category.go | 44 +++ .../cli => gopkg.in/urfave/cli.v1}/cli.go | 23 +- .../cli => gopkg.in/urfave/cli.v1}/command.go | 73 +++-- .../cli => gopkg.in/urfave/cli.v1}/context.go | 116 +++++-- .../src/gopkg.in/urfave/cli.v1/errors.go | 92 ++++++ .../cli => gopkg.in/urfave/cli.v1}/flag.go | 199 ++++++++--- .../src/gopkg.in/urfave/cli.v1/funcs.go | 28 ++ .../cli => gopkg.in/urfave/cli.v1}/help.go | 112 ++++--- cmd/ethtest/main.go | 2 +- cmd/evm/main.go | 2 +- cmd/geth/accountcmd.go | 2 +- cmd/geth/chaincmd.go | 2 +- cmd/geth/consolecmd.go | 2 +- cmd/geth/main.go | 2 +- cmd/geth/monitorcmd.go | 2 +- cmd/geth/usage.go | 2 +- cmd/utils/client.go | 2 +- cmd/utils/customflags.go | 2 +- cmd/utils/flags.go | 2 +- internal/debug/flags.go | 2 +- 35 files changed, 1309 insertions(+), 626 deletions(-) delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete delete mode 100644 Godeps/_workspace/src/github.com/rs/xhandler/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/rs/xhandler/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/rs/xhandler/README.md delete mode 100644 Godeps/_workspace/src/github.com/rs/xhandler/chain.go delete mode 100644 Godeps/_workspace/src/github.com/rs/xhandler/middleware.go delete mode 100644 Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/.travis.yml (50%) create mode 100644 Godeps/_workspace/src/gopkg.in/urfave/cli.v1/CHANGELOG.md rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/LICENSE (100%) rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/README.md (52%) rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/app.go (53%) rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/appveyor.yml (100%) create mode 100644 Godeps/_workspace/src/gopkg.in/urfave/cli.v1/category.go rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/cli.go (60%) rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/command.go (77%) rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/context.go (66%) create mode 100644 Godeps/_workspace/src/gopkg.in/urfave/cli.v1/errors.go rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/flag.go (71%) create mode 100644 Godeps/_workspace/src/gopkg.in/urfave/cli.v1/funcs.go rename Godeps/_workspace/src/{github.com/codegangsta/cli => gopkg.in/urfave/cli.v1}/help.go (57%) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index f2694a05cdc29..36104b4560baf 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,7 @@ { "ImportPath": "github.com/ethereum/go-ethereum", "GoVersion": "go1.5.2", + "GodepVersion": "v74", "Packages": [ "./..." ], @@ -13,11 +14,6 @@ "ImportPath": "github.com/cespare/cp", "Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c" }, - { - "ImportPath": "github.com/codegangsta/cli", - "Comment": "1.2.0-215-g0ab42fd", - "Rev": "0ab42fd482c27cf2c95e7794ad3bb2082c2ab2d7" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" @@ -155,6 +151,10 @@ "ImportPath": "github.com/rs/cors", "Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379" }, + { + "ImportPath": "github.com/rs/xhandler", + "Rev": "d9d9599b6aaf6a058cb7b1f48291ded2cbd13390" + }, { "ImportPath": "github.com/syndtr/goleveldb/leveldb", "Rev": "917f41c560270110ceb73c5b38be2a9127387071" @@ -319,6 +319,11 @@ { "ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque", "Rev": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57" + }, + { + "ImportPath": "gopkg.in/urfave/cli.v1", + "Comment": "v1.17.0", + "Rev": "01857ac33766ce0c93856370626f9799281c14f4" } ] } diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete deleted file mode 100644 index 21a232f1f554f..0000000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -: ${PROG:=$(basename ${BASH_SOURCE})} - -_cli_bash_autocomplete() { - local cur opts base - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) - return 0 - } - - complete -F _cli_bash_autocomplete $PROG diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete deleted file mode 100644 index 5430a18f9576d..0000000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete +++ /dev/null @@ -1,5 +0,0 @@ -autoload -U compinit && compinit -autoload -U bashcompinit && bashcompinit - -script_dir=$(dirname $0) -source ${script_dir}/bash_autocomplete diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/.travis.yml b/Godeps/_workspace/src/github.com/rs/xhandler/.travis.yml deleted file mode 100644 index b65c7a9f1eca2..0000000000000 --- a/Godeps/_workspace/src/github.com/rs/xhandler/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: -- 1.5 -- tip -matrix: - allow_failures: - - go: tip diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/LICENSE b/Godeps/_workspace/src/github.com/rs/xhandler/LICENSE deleted file mode 100644 index 47c5e9d2d2f7e..0000000000000 --- a/Godeps/_workspace/src/github.com/rs/xhandler/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/README.md b/Godeps/_workspace/src/github.com/rs/xhandler/README.md deleted file mode 100644 index 91c594bd25deb..0000000000000 --- a/Godeps/_workspace/src/github.com/rs/xhandler/README.md +++ /dev/null @@ -1,134 +0,0 @@ -# XHandler - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xhandler) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xhandler/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xhandler.svg?branch=master)](https://travis-ci.org/rs/xhandler) [![Coverage](http://gocover.io/_badge/github.com/rs/xhandler)](http://gocover.io/github.com/rs/xhandler) - -XHandler is a bridge between [net/context](https://godoc.org/golang.org/x/net/context) and `http.Handler`. - -It lets you enforce `net/context` in your handlers without sacrificing compatibility with existing `http.Handlers` nor imposing a specific router. - -Thanks to `net/context` deadline management, `xhandler` is able to enforce a per request deadline and will cancel the context when the client closes the connection unexpectedly. - -You may create your own `net/context` aware handler pretty much the same way as you would do with http.Handler. - -Read more about xhandler on [Dailymotion engineering blog](http://engineering.dailymotion.com/our-way-to-go/). - -## Installing - - go get -u github.com/rs/xhandler - -## Usage - -```go -package main - -import ( - "log" - "net/http" - "time" - - "github.com/rs/cors" - "github.com/rs/xhandler" - "golang.org/x/net/context" -) - -type myMiddleware struct { - next xhandler.HandlerC -} - -func (h myMiddleware) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) { - ctx = context.WithValue(ctx, "test", "World") - h.next.ServeHTTPC(ctx, w, r) -} - -func main() { - c := xhandler.Chain{} - - // Add close notifier handler so context is cancelled when the client closes - // the connection - c.UseC(xhandler.CloseHandler) - - // Add timeout handler - c.UseC(xhandler.TimeoutHandler(2 * time.Second)) - - // Middleware putting something in the context - c.UseC(func(next xhandler.HandlerC) xhandler.HandlerC { - return myMiddleware{next: next} - }) - - // Mix it with a non-context-aware middleware handler - c.Use(cors.Default().Handler) - - // Final handler (using handlerFuncC), reading from the context - xh := xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - value := ctx.Value("test").(string) - w.Write([]byte("Hello " + value)) - }) - - // Bridge context aware handlers with http.Handler using xhandler.Handle() - http.Handle("/test", c.Handler(xh)) - - if err := http.ListenAndServe(":8080", nil); err != nil { - log.Fatal(err) - } -} -``` - -### Using xmux - -Xhandler comes with an optional context aware [muxer](https://github.com/rs/xmux) forked from [httprouter](https://github.com/julienschmidt/httprouter): - -```go -package main - -import ( - "fmt" - "log" - "net/http" - "time" - - "github.com/rs/xhandler" - "github.com/rs/xmux" - "golang.org/x/net/context" -) - -func main() { - c := xhandler.Chain{} - - // Append a context-aware middleware handler - c.UseC(xhandler.CloseHandler) - - // Another context-aware middleware handler - c.UseC(xhandler.TimeoutHandler(2 * time.Second)) - - mux := xmux.New() - - // Use c.Handler to terminate the chain with your final handler - mux.GET("/welcome/:name", xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome %s!", xmux.Params(ctx).Get("name")) - })) - - if err := http.ListenAndServe(":8080", c.Handler(mux)); err != nil { - log.Fatal(err) - } -} -``` - -See [xmux](https://github.com/rs/xmux) for more examples. - -## Context Aware Middleware - -Here is a list of `net/context` aware middleware handlers implementing `xhandler.HandlerC` interface. - -Feel free to put up a PR linking your middleware if you have built one: - -| Middleware | Author | Description | -| ---------- | ------ | ----------- | -| [xmux](https://github.com/rs/xmux) | [Olivier Poitrey](https://github.com/rs) | HTTP request muxer | -| [xlog](https://github.com/rs/xlog) | [Olivier Poitrey](https://github.com/rs) | HTTP handler logger | -| [xstats](https://github.com/rs/xstats) | [Olivier Poitrey](https://github.com/rs) | A generic client for service instrumentation | -| [xaccess](https://github.com/rs/xaccess) | [Olivier Poitrey](https://github.com/rs) | HTTP handler access logger with [xlog](https://github.com/rs/xlog) and [xstats](https://github.com/rs/xstats) | -| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support | - -## Licenses - -All source code is licensed under the [MIT License](https://raw.github.com/rs/xhandler/master/LICENSE). diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/chain.go b/Godeps/_workspace/src/github.com/rs/xhandler/chain.go deleted file mode 100644 index 042274d17c34f..0000000000000 --- a/Godeps/_workspace/src/github.com/rs/xhandler/chain.go +++ /dev/null @@ -1,93 +0,0 @@ -package xhandler - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// Chain is an helper to chain middleware handlers together for an easier -// management. -type Chain []func(next HandlerC) HandlerC - -// UseC appends a context-aware handler to the middleware chain. -func (c *Chain) UseC(f func(next HandlerC) HandlerC) { - *c = append(*c, f) -} - -// Use appends a standard http.Handler to the middleware chain without -// lossing track of the context when inserted between two context aware handlers. -// -// Caveat: the f function will be called on each request so you are better to put -// any initialization sequence outside of this function. -func (c *Chain) Use(f func(next http.Handler) http.Handler) { - xf := func(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - n := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTPC(ctx, w, r) - }) - f(n).ServeHTTP(w, r) - }) - } - *c = append(*c, xf) -} - -// Handler wraps the provided final handler with all the middleware appended to -// the chain and return a new standard http.Handler instance. -// The context.Background() context is injected automatically. -func (c Chain) Handler(xh HandlerC) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, xh) -} - -// HandlerFC is an helper to provide a function (HandlerFuncC) to Handler(). -// -// HandlerFC is equivalent to: -// c.Handler(xhandler.HandlerFuncC(xhc)) -func (c Chain) HandlerFC(xhf HandlerFuncC) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, HandlerFuncC(xhf)) -} - -// HandlerH is an helper to provide a standard http handler (http.HandlerFunc) -// to Handler(). Your final handler won't have access the context though. -func (c Chain) HandlerH(h http.Handler) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - h.ServeHTTP(w, r) - })) -} - -// HandlerF is an helper to provide a standard http handler function -// (http.HandlerFunc) to Handler(). Your final handler won't have access -// the context though. -func (c Chain) HandlerF(hf http.HandlerFunc) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - hf(w, r) - })) -} - -// HandlerCtx wraps the provided final handler with all the middleware appended to -// the chain and return a new standard http.Handler instance. -func (c Chain) HandlerCtx(ctx context.Context, xh HandlerC) http.Handler { - return New(ctx, c.HandlerC(xh)) -} - -// HandlerC wraps the provided final handler with all the middleware appended to -// the chain and returns a HandlerC instance. -func (c Chain) HandlerC(xh HandlerC) HandlerC { - for i := len(c) - 1; i >= 0; i-- { - xh = c[i](xh) - } - return xh -} - -// HandlerCF wraps the provided final handler func with all the middleware appended to -// the chain and returns a HandlerC instance. -// -// HandlerCF is equivalent to: -// c.HandlerC(xhandler.HandlerFuncC(xhc)) -func (c Chain) HandlerCF(xhc HandlerFuncC) HandlerC { - return c.HandlerC(HandlerFuncC(xhc)) -} diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/middleware.go b/Godeps/_workspace/src/github.com/rs/xhandler/middleware.go deleted file mode 100644 index 5de136419609c..0000000000000 --- a/Godeps/_workspace/src/github.com/rs/xhandler/middleware.go +++ /dev/null @@ -1,59 +0,0 @@ -package xhandler - -import ( - "net/http" - "time" - - "golang.org/x/net/context" -) - -// CloseHandler returns a Handler cancelling the context when the client -// connection close unexpectedly. -func CloseHandler(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - // Cancel the context if the client closes the connection - if wcn, ok := w.(http.CloseNotifier); ok { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - - notify := wcn.CloseNotify() - go func() { - select { - case <-notify: - cancel() - case <-ctx.Done(): - } - }() - } - - next.ServeHTTPC(ctx, w, r) - }) -} - -// TimeoutHandler returns a Handler which adds a timeout to the context. -// -// Child handlers have the responsability to obey the context deadline and to return -// an appropriate error (or not) response in case of timeout. -func TimeoutHandler(timeout time.Duration) func(next HandlerC) HandlerC { - return func(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - ctx, _ = context.WithTimeout(ctx, timeout) - next.ServeHTTPC(ctx, w, r) - }) - } -} - -// If is a special handler that will skip insert the condNext handler only if a condition -// applies at runtime. -func If(cond func(ctx context.Context, w http.ResponseWriter, r *http.Request) bool, condNext func(next HandlerC) HandlerC) func(next HandlerC) HandlerC { - return func(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - if cond(ctx, w, r) { - condNext(next).ServeHTTPC(ctx, w, r) - } else { - next.ServeHTTPC(ctx, w, r) - } - }) - } -} diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go b/Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go deleted file mode 100644 index 718c253223db1..0000000000000 --- a/Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package xhandler provides a bridge between http.Handler and net/context. -// -// xhandler enforces net/context in your handlers without sacrificing -// compatibility with existing http.Handlers nor imposing a specific router. -// -// Thanks to net/context deadline management, xhandler is able to enforce -// a per request deadline and will cancel the context in when the client close -// the connection unexpectedly. -// -// You may create net/context aware middlewares pretty much the same way as -// you would do with http.Handler. -package xhandler - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// HandlerC is a net/context aware http.Handler -type HandlerC interface { - ServeHTTPC(context.Context, http.ResponseWriter, *http.Request) -} - -// HandlerFuncC type is an adapter to allow the use of ordinary functions -// as a xhandler.Handler. If f is a function with the appropriate signature, -// xhandler.HandlerFuncC(f) is a xhandler.Handler object that calls f. -type HandlerFuncC func(context.Context, http.ResponseWriter, *http.Request) - -// ServeHTTPC calls f(ctx, w, r). -func (f HandlerFuncC) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) { - f(ctx, w, r) -} - -// New creates a conventional http.Handler injecting the provided root -// context to sub handlers. This handler is used as a bridge between conventional -// http.Handler and context aware handlers. -func New(ctx context.Context, h HandlerC) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - h.ServeHTTPC(ctx, w, r) - }) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/.travis.yml similarity index 50% rename from Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/.travis.yml index 87ba52f98eae9..76f38a482bec2 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/.travis.yml @@ -2,18 +2,22 @@ language: go sudo: false go: -- 1.0.3 - 1.1.2 - 1.2.2 - 1.3.3 -- 1.4.2 -- 1.5.1 +- 1.4 +- 1.5.4 +- 1.6.2 - tip matrix: allow_failures: - go: tip +before_script: +- go get github.com/meatballhat/gfmxr/... + script: - go vet ./... - go test -v ./... +- gfmxr -c $(grep -c 'package main' README.md) -s README.md diff --git a/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/CHANGELOG.md b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/CHANGELOG.md new file mode 100644 index 0000000000000..f623e59b7e8dc --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/CHANGELOG.md @@ -0,0 +1,310 @@ +# Change Log + +**ATTN**: This project uses [semantic versioning](http://semver.org/). + +## [Unreleased] + +## [1.17.0] - 2016-05-09 +### Added +- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` +- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` +- Support for hiding commands by setting `Hidden: true` -- this will hide the + commands in help output + +### Changed +- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer + quoted in help text output. +- All flag types now include `(default: {value})` strings following usage when a + default value can be (reasonably) detected. +- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent + with non-slice flag types +- Apps now exit with a code of 3 if an unknown subcommand is specified + (previously they printed "No help topic for...", but still exited 0. This + makes it easier to script around apps built using `cli` since they can trust + that a 0 exit code indicated a successful execution. +- cleanups based on [Go Report Card + feedback](https://goreportcard.com/report/github.com/codegangsta/cli) + +## [1.16.0] - 2016-05-02 +### Added +- `Hidden` field on all flag struct types to omit from generated help text + +### Changed +- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from +generated help text via the `Hidden` field + +### Fixed +- handling of error values in `HandleAction` and `HandleExitCoder` + +## [1.15.0] - 2016-04-30 +### Added +- This file! +- Support for placeholders in flag usage strings +- `App.Metadata` map for arbitrary data/state management +- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after +parsing. +- Support for nested lookup of dot-delimited keys in structures loaded from +YAML. + +### Changed +- The `App.Action` and `Command.Action` now prefer a return signature of +`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil +`error` is returned, there may be two outcomes: + - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called + automatically + - Else the error is bubbled up and returned from `App.Run` +- Specifying an `Action` with the legacy return signature of +`func(*cli.Context)` will produce a deprecation message to stderr +- Specifying an `Action` that is not a `func` type will produce a non-zero exit +from `App.Run` +- Specifying an `Action` func that has an invalid (input) signature will +produce a non-zero exit from `App.Run` + +### Deprecated +- +`cli.App.RunAndExitOnError`, which should now be done by returning an error +that fulfills `cli.ExitCoder` to `cli.App.Run`. +- the legacy signature for +`cli.App.Action` of `func(*cli.Context)`, which should now have a return +signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. + +### Fixed +- Added missing `*cli.Context.GlobalFloat64` method + +## [1.14.0] - 2016-04-03 (backfilled 2016-04-25) +### Added +- Codebeat badge +- Support for categorization via `CategorizedHelp` and `Categories` on app. + +### Changed +- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`. + +### Fixed +- Ensure version is not shown in help text when `HideVersion` set. + +## [1.13.0] - 2016-03-06 (backfilled 2016-04-25) +### Added +- YAML file input support. +- `NArg` method on context. + +## [1.12.0] - 2016-02-17 (backfilled 2016-04-25) +### Added +- Custom usage error handling. +- Custom text support in `USAGE` section of help output. +- Improved help messages for empty strings. +- AppVeyor CI configuration. + +### Changed +- Removed `panic` from default help printer func. +- De-duping and optimizations. + +### Fixed +- Correctly handle `Before`/`After` at command level when no subcommands. +- Case of literal `-` argument causing flag reordering. +- Environment variable hints on Windows. +- Docs updates. + +## [1.11.1] - 2015-12-21 (backfilled 2016-04-25) +### Changed +- Use `path.Base` in `Name` and `HelpName` +- Export `GetName` on flag types. + +### Fixed +- Flag parsing when skipping is enabled. +- Test output cleanup. +- Move completion check to account for empty input case. + +## [1.11.0] - 2015-11-15 (backfilled 2016-04-25) +### Added +- Destination scan support for flags. +- Testing against `tip` in Travis CI config. + +### Changed +- Go version in Travis CI config. + +### Fixed +- Removed redundant tests. +- Use correct example naming in tests. + +## [1.10.2] - 2015-10-29 (backfilled 2016-04-25) +### Fixed +- Remove unused var in bash completion. + +## [1.10.1] - 2015-10-21 (backfilled 2016-04-25) +### Added +- Coverage and reference logos in README. + +### Fixed +- Use specified values in help and version parsing. +- Only display app version and help message once. + +## [1.10.0] - 2015-10-06 (backfilled 2016-04-25) +### Added +- More tests for existing functionality. +- `ArgsUsage` at app and command level for help text flexibility. + +### Fixed +- Honor `HideHelp` and `HideVersion` in `App.Run`. +- Remove juvenile word from README. + +## [1.9.0] - 2015-09-08 (backfilled 2016-04-25) +### Added +- `FullName` on command with accompanying help output update. +- Set default `$PROG` in bash completion. + +### Changed +- Docs formatting. + +### Fixed +- Removed self-referential imports in tests. + +## [1.8.0] - 2015-06-30 (backfilled 2016-04-25) +### Added +- Support for `Copyright` at app level. +- `Parent` func at context level to walk up context lineage. + +### Fixed +- Global flag processing at top level. + +## [1.7.1] - 2015-06-11 (backfilled 2016-04-25) +### Added +- Aggregate errors from `Before`/`After` funcs. +- Doc comments on flag structs. +- Include non-global flags when checking version and help. +- Travis CI config updates. + +### Fixed +- Ensure slice type flags have non-nil values. +- Collect global flags from the full command hierarchy. +- Docs prose. + +## [1.7.0] - 2015-05-03 (backfilled 2016-04-25) +### Changed +- `HelpPrinter` signature includes output writer. + +### Fixed +- Specify go 1.1+ in docs. +- Set `Writer` when running command as app. + +## [1.6.0] - 2015-03-23 (backfilled 2016-04-25) +### Added +- Multiple author support. +- `NumFlags` at context level. +- `Aliases` at command level. + +### Deprecated +- `ShortName` at command level. + +### Fixed +- Subcommand help output. +- Backward compatible support for deprecated `Author` and `Email` fields. +- Docs regarding `Names`/`Aliases`. + +## [1.5.0] - 2015-02-20 (backfilled 2016-04-25) +### Added +- `After` hook func support at app and command level. + +### Fixed +- Use parsed context when running command as subcommand. +- Docs prose. + +## [1.4.1] - 2015-01-09 (backfilled 2016-04-25) +### Added +- Support for hiding `-h / --help` flags, but not `help` subcommand. +- Stop flag parsing after `--`. + +### Fixed +- Help text for generic flags to specify single value. +- Use double quotes in output for defaults. +- Use `ParseInt` instead of `ParseUint` for int environment var values. +- Use `0` as base when parsing int environment var values. + +## [1.4.0] - 2014-12-12 (backfilled 2016-04-25) +### Added +- Support for environment variable lookup "cascade". +- Support for `Stdout` on app for output redirection. + +### Fixed +- Print command help instead of app help in `ShowCommandHelp`. + +## [1.3.1] - 2014-11-13 (backfilled 2016-04-25) +### Added +- Docs and example code updates. + +### Changed +- Default `-v / --version` flag made optional. + +## [1.3.0] - 2014-08-10 (backfilled 2016-04-25) +### Added +- `FlagNames` at context level. +- Exposed `VersionPrinter` var for more control over version output. +- Zsh completion hook. +- `AUTHOR` section in default app help template. +- Contribution guidelines. +- `DurationFlag` type. + +## [1.2.0] - 2014-08-02 +### Added +- Support for environment variable defaults on flags plus tests. + +## [1.1.0] - 2014-07-15 +### Added +- Bash completion. +- Optional hiding of built-in help command. +- Optional skipping of flag parsing at command level. +- `Author`, `Email`, and `Compiled` metadata on app. +- `Before` hook func support at app and command level. +- `CommandNotFound` func support at app level. +- Command reference available on context. +- `GenericFlag` type. +- `Float64Flag` type. +- `BoolTFlag` type. +- `IsSet` flag helper on context. +- More flag lookup funcs at context level. +- More tests & docs. + +### Changed +- Help template updates to account for presence/absence of flags. +- Separated subcommand help template. +- Exposed `HelpPrinter` var for more control over help output. + +## [1.0.0] - 2013-11-01 +### Added +- `help` flag in default app flag set and each command flag set. +- Custom handling of argument parsing errors. +- Command lookup by name at app level. +- `StringSliceFlag` type and supporting `StringSlice` type. +- `IntSliceFlag` type and supporting `IntSlice` type. +- Slice type flag lookups by name at context level. +- Export of app and command help functions. +- More tests & docs. + +## 0.1.0 - 2013-07-22 +### Added +- Initial implementation. + +[Unreleased]: https://github.com/codegangsta/cli/compare/v1.17.0...HEAD +[1.17.0]: https://github.com/codegangsta/cli/compare/v1.16.0...v1.17.0 +[1.16.0]: https://github.com/codegangsta/cli/compare/v1.15.0...v1.16.0 +[1.15.0]: https://github.com/codegangsta/cli/compare/v1.14.0...v1.15.0 +[1.14.0]: https://github.com/codegangsta/cli/compare/v1.13.0...v1.14.0 +[1.13.0]: https://github.com/codegangsta/cli/compare/v1.12.0...v1.13.0 +[1.12.0]: https://github.com/codegangsta/cli/compare/v1.11.1...v1.12.0 +[1.11.1]: https://github.com/codegangsta/cli/compare/v1.11.0...v1.11.1 +[1.11.0]: https://github.com/codegangsta/cli/compare/v1.10.2...v1.11.0 +[1.10.2]: https://github.com/codegangsta/cli/compare/v1.10.1...v1.10.2 +[1.10.1]: https://github.com/codegangsta/cli/compare/v1.10.0...v1.10.1 +[1.10.0]: https://github.com/codegangsta/cli/compare/v1.9.0...v1.10.0 +[1.9.0]: https://github.com/codegangsta/cli/compare/v1.8.0...v1.9.0 +[1.8.0]: https://github.com/codegangsta/cli/compare/v1.7.1...v1.8.0 +[1.7.1]: https://github.com/codegangsta/cli/compare/v1.7.0...v1.7.1 +[1.7.0]: https://github.com/codegangsta/cli/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/codegangsta/cli/compare/v1.5.0...v1.6.0 +[1.5.0]: https://github.com/codegangsta/cli/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/codegangsta/cli/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/codegangsta/cli/compare/v1.3.1...v1.4.0 +[1.3.1]: https://github.com/codegangsta/cli/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/codegangsta/cli/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/codegangsta/cli/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/codegangsta/cli/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/codegangsta/cli/compare/v0.1.0...v1.0.0 diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/LICENSE diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/README.md similarity index 52% rename from Godeps/_workspace/src/github.com/codegangsta/cli/README.md rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/README.md index ae0a4ca3ac7f9..c1709cef82f08 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/README.md @@ -1,22 +1,24 @@ [![Coverage](http://gocover.io/_badge/github.com/codegangsta/cli?0)](http://gocover.io/github.com/codegangsta/cli) [![Build Status](https://travis-ci.org/codegangsta/cli.svg?branch=master)](https://travis-ci.org/codegangsta/cli) [![GoDoc](https://godoc.org/github.com/codegangsta/cli?status.svg)](https://godoc.org/github.com/codegangsta/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-codegangsta-cli) +[![Go Report Card](https://goreportcard.com/badge/codegangsta/cli)](https://goreportcard.com/report/codegangsta/cli) -# cli.go +# cli -`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. +cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. ## Overview Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. -**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive! +**This is where cli comes into play.** cli makes command line programming fun, organized, and expressive! ## Installation Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html). -To install `cli.go`, simply run: +To install cli, simply run: ``` $ go get github.com/codegangsta/cli ``` @@ -28,7 +30,7 @@ export PATH=$PATH:$GOPATH/bin ## Getting Started -One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`. +One of the philosophies behind cli is that an API should be playful and full of discovery. So a cli app can be as little as one line of code in `main()`. ``` go package main @@ -45,11 +47,16 @@ func main() { This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: + ``` go package main import ( + "fmt" "os" + "github.com/codegangsta/cli" ) @@ -57,10 +64,11 @@ func main() { app := cli.NewApp() app.Name = "boom" app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) { - println("boom! I say!") + app.Action = func(c *cli.Context) error { + fmt.Println("boom! I say!") + return nil } - + app.Run(os.Args) } ``` @@ -73,11 +81,16 @@ Being a programmer can be a lonely job. Thankfully by the power of automation th Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: + ``` go package main import ( + "fmt" "os" + "github.com/codegangsta/cli" ) @@ -85,8 +98,9 @@ func main() { app := cli.NewApp() app.Name = "greet" app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) { - println("Hello friend!") + app.Action = func(c *cli.Context) error { + fmt.Println("Hello friend!") + return nil } app.Run(os.Args) @@ -106,7 +120,7 @@ $ greet Hello friend! ``` -`cli.go` also generates neat help text: +cli also generates neat help text: ``` $ greet help @@ -132,8 +146,9 @@ You can lookup arguments by calling the `Args` function on `cli.Context`. ``` go ... -app.Action = func(c *cli.Context) { - println("Hello", c.Args()[0]) +app.Action = func(c *cli.Context) error { + fmt.Println("Hello", c.Args()[0]) + return nil } ... ``` @@ -151,16 +166,17 @@ app.Flags = []cli.Flag { Usage: "language for the greeting", }, } -app.Action = func(c *cli.Context) { +app.Action = func(c *cli.Context) error { name := "someone" - if len(c.Args()) > 0 { + if c.NArg() > 0 { name = c.Args()[0] } if c.String("lang") == "spanish" { - println("Hola", name) + fmt.Println("Hola", name) } else { - println("Hello", name) + fmt.Println("Hello", name) } + return nil } ... ``` @@ -178,22 +194,45 @@ app.Flags = []cli.Flag { Destination: &language, }, } -app.Action = func(c *cli.Context) { +app.Action = func(c *cli.Context) error { name := "someone" - if len(c.Args()) > 0 { + if c.NArg() > 0 { name = c.Args()[0] } if language == "spanish" { - println("Hola", name) + fmt.Println("Hola", name) } else { - println("Hello", name) + fmt.Println("Hello", name) } + return nil } ... ``` See full list of flags at http://godoc.org/github.com/codegangsta/cli +#### Placeholder Values + +Sometimes it's useful to specify a flag's value within the usage string itself. Such placeholders are +indicated with back quotes. + +For example this: + +```go +cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +``` + +Note that only the first placeholder is used. Subsequent back-quoted words will be left as-is. + #### Alternate Names You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. @@ -238,6 +277,49 @@ app.Flags = []cli.Flag { } ``` +#### Values from alternate input sources (YAML and others) + +There is a separate package altsrc that adds support for getting flag values from other input sources like YAML. + +In order to get values for a flag from an alternate input source the following code would be added to wrap an existing cli.Flag like below: + +``` go + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) +``` + +Initialization must also occur for these flags. Below is an example initializing getting data from a yaml file below. + +``` go + command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) +``` + +The code above will use the "load" string as a flag name to get the file name of a yaml file from the cli.Context. +It will then use that file name to initialize the yaml input source for any flags that are defined on that command. +As a note the "load" flag used would also have to be defined on the command flags in order for this code snipped to work. + +Currently only YAML files are supported but developers can add support for other input sources by implementing the +altsrc.InputSourceContext for their given sources. + +Here is a more complete sample of a command using YAML support: + +``` go + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + // Action to run + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + err := command.Run(c) +``` + ### Subcommands Subcommands can be defined for a more git-like command line app. @@ -249,16 +331,18 @@ app.Commands = []cli.Command{ Name: "add", Aliases: []string{"a"}, Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("added task: ", c.Args().First()) + return nil }, }, { Name: "complete", Aliases: []string{"c"}, Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil }, }, { @@ -269,15 +353,17 @@ app.Commands = []cli.Command{ { Name: "add", Usage: "add a new template", - Action: func(c *cli.Context) { - println("new task template: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("new task template: ", c.Args().First()) + return nil }, }, { Name: "remove", Usage: "remove an existing template", - Action: func(c *cli.Context) { - println("removed task template: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("removed task template: ", c.Args().First()) + return nil }, }, }, @@ -286,6 +372,80 @@ app.Commands = []cli.Command{ ... ``` +### Subcommands categories + +For additional organization in apps that have many subcommands, you can +associate a category for each command to group them together in the help +output. + +E.g. + +```go +... + app.Commands = []cli.Command{ + { + Name: "noop", + }, + { + Name: "add", + Category: "template", + }, + { + Name: "remove", + Category: "template", + }, + } +... +``` + +Will include: + +``` +... +COMMANDS: + noop + + Template actions: + add + remove +... +``` + +### Exit code + +Calling `App.Run` will not automatically call `os.Exit`, which means that by +default the exit code will "fall through" to being `0`. An explicit exit code +may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a +`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: + +``` go +package main + +import ( + "os" + + "github.com/codegangsta/cli" +) + +func main() { + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.BoolTFlag{ + Name: "ginger-crouton", + Usage: "is it in the soup?", + }, + } + app.Action = func(ctx *cli.Context) error { + if !ctx.Bool("ginger-crouton") { + return cli.NewExitError("it is not in the soup", 86) + } + return nil + } + + app.Run(os.Args) +} +``` + ### Bash Completion You can enable completion commands by setting the `EnableBashCompletion` @@ -303,12 +463,13 @@ app.Commands = []cli.Command{ Name: "complete", Aliases: []string{"c"}, Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil }, BashComplete: func(c *cli.Context) { // This will complete if no args are passed - if len(c.Args()) > 0 { + if c.NArg() > 0 { return } for _, t := range tasks { @@ -343,6 +504,72 @@ Alternatively, you can just document that users should source the generic `autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set to the name of their program (as above). +### Generated Help Text Customization + +All of the help text generation may be customized, and at multiple levels. The +templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and +`SubcommandHelpTemplate` which may be reassigned or augmented, and full override +is possible by assigning a compatible func to the `cli.HelpPrinter` variable, +e.g.: + + +``` go +package main + +import ( + "fmt" + "io" + "os" + + "github.com/codegangsta/cli" +) + +func main() { + // EXAMPLE: Append to an existing template + cli.AppHelpTemplate = fmt.Sprintf(`%s + +WEBSITE: http://awesometown.example.com + +SUPPORT: support@awesometown.example.com + +`, cli.AppHelpTemplate) + + // EXAMPLE: Override a template + cli.AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command +[command options]{{end}} {{if +.ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if len .Authors}} +AUTHOR(S): + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} +COMMANDS: +{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t" +}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} +GLOBAL OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}}{{if .Version}} +VERSION: + {{.Version}} + {{end}} +` + + // EXAMPLE: Replace the `HelpPrinter` func + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Println("Ha HA. I pwnd the help!!1") + } + + cli.NewApp().Run(os.Args) +} +``` + ## Contribution Guidelines Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/app.go similarity index 53% rename from Godeps/_workspace/src/github.com/codegangsta/cli/app.go rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/app.go index 1ea3fd0b17df8..7c9b95804217e 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/app.go @@ -5,10 +5,27 @@ import ( "io" "io/ioutil" "os" - "path" + "path/filepath" + "reflect" + "sort" "time" ) +var ( + changeLogURL = "https://github.com/codegangsta/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errNonFuncAction = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be a func of type `cli.ActionFunc`. %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) + errInvalidActionSignature = NewExitError("ERROR invalid Action signature. "+ + fmt.Sprintf("Must be `cli.ActionFunc`. %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + // App is the main structure of a cli application. It is recommended that // an app be created with the cli.NewApp() function type App struct { @@ -32,24 +49,27 @@ type App struct { EnableBashCompletion bool // Boolean to hide built-in help command HideHelp bool - // Boolean to hide built-in version flag + // Boolean to hide built-in version flag and the VERSION section of help HideVersion bool + // Populate on app startup, only gettable through method Categories() + categories CommandCategories // An action to execute when the bash-completion flag is set - BashComplete func(context *Context) + BashComplete BashCompleteFunc // An action to execute before any subcommands are run, but after the context is ready // If a non-nil error is returned, no subcommands are run - Before func(context *Context) error + Before BeforeFunc // An action to execute after any subcommands are run, but after the subcommand has finished // It is run even if Action() panics - After func(context *Context) error + After AfterFunc // The action to execute when no subcommands are specified - Action func(context *Context) + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + // Execute this function if the proper command cannot be found - CommandNotFound func(context *Context, command string) - // Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages. - // This function is able to replace the original error messages. - // If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted. - OnUsageError func(context *Context, err error, isSubcommand bool) error + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc // Compilation date Compiled time.Time // List of all authors who contributed @@ -62,6 +82,12 @@ type App struct { Email string // Writer writer to write output to Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Other custom info + Metadata map[string]interface{} + + didSetup bool } // Tries to find out when this binary was compiled. @@ -74,11 +100,12 @@ func compileTime() time.Time { return info.ModTime() } -// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. func NewApp() *App { return &App{ - Name: path.Base(os.Args[0]), - HelpName: path.Base(os.Args[0]), + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), Usage: "A new cli application", UsageText: "", Version: "0.0.0", @@ -89,8 +116,16 @@ func NewApp() *App { } } -// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) (err error) { +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + if a.Author != "" || a.Email != "" { a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) } @@ -104,6 +139,12 @@ func (a *App) Run(arguments []string) (err error) { } a.Commands = newCmds + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + // append help to commands if a.Command(helpCommand.Name) == nil && !a.HideHelp { a.Commands = append(a.Commands, helpCommand) @@ -120,6 +161,12 @@ func (a *App) Run(arguments []string) (err error) { if !a.HideVersion { a.appendFlag(VersionFlag) } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() // parse flags set := flagSet(a.Name, a.Flags) @@ -140,12 +187,12 @@ func (a *App) Run(arguments []string) (err error) { if err != nil { if a.OnUsageError != nil { err := a.OnUsageError(context, err, false) - return err - } else { - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowAppHelp(context) + HandleExitCoder(err) return err } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowAppHelp(context) + return err } if !a.HideHelp && checkHelp(context) { @@ -171,10 +218,12 @@ func (a *App) Run(arguments []string) (err error) { } if a.Before != nil { - err = a.Before(context) - if err != nil { - fmt.Fprintf(a.Writer, "%v\n\n", err) + beforeErr := a.Before(context) + if beforeErr != nil { + fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) ShowAppHelp(context) + HandleExitCoder(beforeErr) + err = beforeErr return err } } @@ -189,19 +238,25 @@ func (a *App) Run(arguments []string) (err error) { } // Run default Action - a.Action(context) - return nil + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err } -// Another entry point to the cli app, takes care of passing arguments and error handling +// DEPRECATED: Another entry point to the cli app, takes care of passing arguments and error handling func (a *App) RunAndExitOnError() { + fmt.Fprintf(a.errWriter(), + "DEPRECATED cli.App.RunAndExitOnError. %s See %s\n", + contactSysadmin, runAndExitOnErrorDeprecationURL) if err := a.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) + fmt.Fprintln(a.errWriter(), err) + OsExiter(1) } } -// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags func (a *App) RunAsSubcommand(ctx *Context) (err error) { // append help to commands if len(a.Commands) > 0 { @@ -252,12 +307,12 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { if err != nil { if a.OnUsageError != nil { err = a.OnUsageError(context, err, true) - return err - } else { - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowSubcommandHelp(context) + HandleExitCoder(err) return err } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowSubcommandHelp(context) + return err } if len(a.Commands) > 0 { @@ -274,6 +329,7 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { defer func() { afterErr := a.After(context) if afterErr != nil { + HandleExitCoder(err) if err != nil { err = NewMultiError(err, afterErr) } else { @@ -284,8 +340,10 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } if a.Before != nil { - err := a.Before(context) - if err != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + HandleExitCoder(beforeErr) + err = beforeErr return err } } @@ -300,12 +358,13 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } // Run default Action - a.Action(context) + err = HandleAction(a.Action, context) - return nil + HandleExitCoder(err) + return err } -// Returns the named command on App. Returns nil if the command does not exist +// Command returns the named command on App. Returns nil if the command does not exist func (a *App) Command(name string) *Command { for _, c := range a.Commands { if c.HasName(name) { @@ -316,6 +375,46 @@ func (a *App) Command(name string) *Command { return nil } +// Categories returns a slice containing all the categories with the commands they contain +func (a *App) Categories() CommandCategories { + return a.categories +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + func (a *App) hasFlag(flag Flag) bool { for _, f := range a.Flags { if flag == f { @@ -326,6 +425,16 @@ func (a *App) hasFlag(flag Flag) bool { return false } +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + func (a *App) appendFlag(flag Flag) { if !a.hasFlag(flag) { a.Flags = append(a.Flags, flag) @@ -347,3 +456,43 @@ func (a Author) String() string { return fmt.Sprintf("%v %v", a.Name, e) } + +// HandleAction uses ✧✧✧reflection✧✧✧ to figure out if the given Action is an +// ActionFunc, a func with the legacy signature for Action, or some other +// invalid thing. If it's an ActionFunc or a func with the legacy signature for +// Action, the func is run! +func HandleAction(action interface{}, context *Context) (err error) { + defer func() { + if r := recover(); r != nil { + switch r.(type) { + case error: + err = r.(error) + default: + err = NewExitError(fmt.Sprintf("ERROR unknown Action error: %v. See %s", r, appActionDeprecationURL), 2) + } + } + }() + + if reflect.TypeOf(action).Kind() != reflect.Func { + return errNonFuncAction + } + + vals := reflect.ValueOf(action).Call([]reflect.Value{reflect.ValueOf(context)}) + + if len(vals) == 0 { + fmt.Fprintf(ErrWriter, + "DEPRECATED Action signature. Must be `cli.ActionFunc`. %s See %s\n", + contactSysadmin, appActionDeprecationURL) + return nil + } + + if len(vals) > 1 { + return errInvalidActionSignature + } + + if retErr, ok := vals[0].Interface().(error); vals[0].IsValid() && ok { + return retErr + } + + return err +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/appveyor.yml b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/appveyor.yml similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/appveyor.yml rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/appveyor.yml diff --git a/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/category.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/category.go new file mode 100644 index 0000000000000..1a6055023e72f --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/category.go @@ -0,0 +1,44 @@ +package cli + +// CommandCategories is a slice of *CommandCategory. +type CommandCategories []*CommandCategory + +// CommandCategory is a category containing commands. +type CommandCategory struct { + Name string + Commands Commands +} + +func (c CommandCategories) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandCategories) Len() int { + return len(c) +} + +func (c CommandCategories) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// AddCommand adds a command to a category. +func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { + for _, commandCategory := range c { + if commandCategory.Name == category { + commandCategory.Commands = append(commandCategory.Commands, command) + return c + } + } + return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/cli.go similarity index 60% rename from Godeps/_workspace/src/github.com/codegangsta/cli/cli.go rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/cli.go index 31dc9124d1eb6..f0440c563ff8f 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/cli.go @@ -10,31 +10,10 @@ // app := cli.NewApp() // app.Name = "greet" // app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) { +// app.Action = func(c *cli.Context) error { // println("Greetings") // } // // app.Run(os.Args) // } package cli - -import ( - "strings" -) - -type MultiError struct { - Errors []error -} - -func NewMultiError(err ...error) MultiError { - return MultiError{Errors: err} -} - -func (m MultiError) Error() string { - errs := make([]string, len(m.Errors)) - for i, err := range m.Errors { - errs[i] = err.Error() - } - - return strings.Join(errs, "\n") -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/command.go similarity index 77% rename from Godeps/_workspace/src/github.com/codegangsta/cli/command.go rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/command.go index bbf42ae4069b5..8950ccae43c06 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/command.go @@ -3,6 +3,7 @@ package cli import ( "fmt" "io/ioutil" + "sort" "strings" ) @@ -22,35 +23,40 @@ type Command struct { Description string // A short description of the arguments of this command ArgsUsage string + // The category the command is part of + Category string // The function to call when checking for bash command completions - BashComplete func(context *Context) + BashComplete BashCompleteFunc // An action to execute before any sub-subcommands are run, but after the context is ready // If a non-nil error is returned, no sub-subcommands are run - Before func(context *Context) error + Before BeforeFunc // An action to execute after any subcommands are run, but after the subcommand has finished // It is run even if Action() panics - After func(context *Context) error + After AfterFunc // The function to call when this command is invoked - Action func(context *Context) - // Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages. - // This function is able to replace the original error messages. - // If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted. - OnUsageError func(context *Context, err error) error + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc // List of child commands - Subcommands []Command + Subcommands Commands // List of flags to parse Flags []Flag // Treat all flags as normal arguments if true SkipFlagParsing bool // Boolean to hide built-in help command HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool // Full name of command for help, defaults to full command name, including parent commands. HelpName string commandNamePath []string } -// Returns the full name of the command. +// FullName returns the full name of the command. // For subcommands this ensures that parent commands are part of the command path func (c Command) FullName() string { if c.commandNamePath == nil { @@ -59,7 +65,10 @@ func (c Command) FullName() string { return strings.Join(c.commandNamePath, " ") } -// Invokes the command given the context, parses ctx.Args() to generate command-specific flags +// Commands is a slice of Command +type Commands []Command + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags func (c Command) Run(ctx *Context) (err error) { if len(c.Subcommands) > 0 { return c.startApp(ctx) @@ -120,14 +129,14 @@ func (c Command) Run(ctx *Context) (err error) { if err != nil { if c.OnUsageError != nil { - err := c.OnUsageError(ctx, err) - return err - } else { - fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) + err := c.OnUsageError(ctx, err, false) + HandleExitCoder(err) return err } + fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return err } nerr := normalizeFlags(c.Flags, set) @@ -137,6 +146,7 @@ func (c Command) Run(ctx *Context) (err error) { ShowCommandHelp(ctx, c.Name) return nerr } + context := NewContext(ctx.App, set, ctx) if checkCommandCompletions(context, c.Name) { @@ -151,6 +161,7 @@ func (c Command) Run(ctx *Context) (err error) { defer func() { afterErr := c.After(context) if afterErr != nil { + HandleExitCoder(err) if err != nil { err = NewMultiError(err, afterErr) } else { @@ -161,20 +172,26 @@ func (c Command) Run(ctx *Context) (err error) { } if c.Before != nil { - err := c.Before(context) + err = c.Before(context) if err != nil { fmt.Fprintln(ctx.App.Writer, err) fmt.Fprintln(ctx.App.Writer) ShowCommandHelp(ctx, c.Name) + HandleExitCoder(err) return err } } context.Command = c - c.Action(context) - return nil + err = HandleAction(c.Action, context) + + if err != nil { + HandleExitCoder(err) + } + return err } +// Names returns the names including short names and aliases. func (c Command) Names() []string { names := []string{c.Name} @@ -185,7 +202,7 @@ func (c Command) Names() []string { return append(names, c.Aliases...) } -// Returns true if Command.Name or Command.ShortName matches given name +// HasName returns true if Command.Name or Command.ShortName matches given name func (c Command) HasName(name string) bool { for _, n := range c.Names() { if n == name { @@ -197,7 +214,7 @@ func (c Command) HasName(name string) bool { func (c Command) startApp(ctx *Context) error { app := NewApp() - + app.Metadata = ctx.App.Metadata // set the name and usage app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) if c.HelpName == "" { @@ -227,6 +244,13 @@ func (c Command) startApp(ctx *Context) error { app.Email = ctx.App.Email app.Writer = ctx.App.Writer + app.categories = CommandCategories{} + for _, command := range c.Subcommands { + app.categories = app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories) + // bash completion app.EnableBashCompletion = ctx.App.EnableBashCompletion if c.BashComplete != nil { @@ -248,3 +272,8 @@ func (c Command) startApp(ctx *Context) error { return app.RunAsSubcommand(ctx) } + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/context.go similarity index 66% rename from Godeps/_workspace/src/github.com/codegangsta/cli/context.go rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/context.go index 0513d34f61ec9..c34246369005c 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/context.go @@ -21,57 +21,62 @@ type Context struct { parentContext *Context } -// Creates a new context. For use in when invoking an App or Command action. +// NewContext creates a new context. For use in when invoking an App or Command action. func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { return &Context{App: app, flagSet: set, parentContext: parentCtx} } -// Looks up the value of a local int flag, returns 0 if no int flag exists +// Int looks up the value of a local int flag, returns 0 if no int flag exists func (c *Context) Int(name string) int { return lookupInt(name, c.flagSet) } -// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists +// Duration looks up the value of a local time.Duration flag, returns 0 if no +// time.Duration flag exists func (c *Context) Duration(name string) time.Duration { return lookupDuration(name, c.flagSet) } -// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists +// Float64 looks up the value of a local float64 flag, returns 0 if no float64 +// flag exists func (c *Context) Float64(name string) float64 { return lookupFloat64(name, c.flagSet) } -// Looks up the value of a local bool flag, returns false if no bool flag exists +// Bool looks up the value of a local bool flag, returns false if no bool flag exists func (c *Context) Bool(name string) bool { return lookupBool(name, c.flagSet) } -// Looks up the value of a local boolT flag, returns false if no bool flag exists +// BoolT looks up the value of a local boolT flag, returns false if no bool flag exists func (c *Context) BoolT(name string) bool { return lookupBoolT(name, c.flagSet) } -// Looks up the value of a local string flag, returns "" if no string flag exists +// String looks up the value of a local string flag, returns "" if no string flag exists func (c *Context) String(name string) string { return lookupString(name, c.flagSet) } -// Looks up the value of a local string slice flag, returns nil if no string slice flag exists +// StringSlice looks up the value of a local string slice flag, returns nil if no +// string slice flag exists func (c *Context) StringSlice(name string) []string { return lookupStringSlice(name, c.flagSet) } -// Looks up the value of a local int slice flag, returns nil if no int slice flag exists +// IntSlice looks up the value of a local int slice flag, returns nil if no int +// slice flag exists func (c *Context) IntSlice(name string) []int { return lookupIntSlice(name, c.flagSet) } -// Looks up the value of a local generic flag, returns nil if no generic flag exists +// Generic looks up the value of a local generic flag, returns nil if no generic +// flag exists func (c *Context) Generic(name string) interface{} { return lookupGeneric(name, c.flagSet) } -// Looks up the value of a global int flag, returns 0 if no int flag exists +// GlobalInt looks up the value of a global int flag, returns 0 if no int flag exists func (c *Context) GlobalInt(name string) int { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupInt(name, fs) @@ -79,7 +84,17 @@ func (c *Context) GlobalInt(name string) int { return 0 } -// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists +// GlobalFloat64 looks up the value of a global float64 flag, returns float64(0) +// if no float64 flag exists +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return float64(0) +} + +// GlobalDuration looks up the value of a global time.Duration flag, returns 0 +// if no time.Duration flag exists func (c *Context) GlobalDuration(name string) time.Duration { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupDuration(name, fs) @@ -87,7 +102,8 @@ func (c *Context) GlobalDuration(name string) time.Duration { return 0 } -// Looks up the value of a global bool flag, returns false if no bool flag exists +// GlobalBool looks up the value of a global bool flag, returns false if no bool +// flag exists func (c *Context) GlobalBool(name string) bool { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupBool(name, fs) @@ -95,7 +111,17 @@ func (c *Context) GlobalBool(name string) bool { return false } -// Looks up the value of a global string flag, returns "" if no string flag exists +// GlobalBoolT looks up the value of a global bool flag, returns true if no bool +// flag exists +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +// GlobalString looks up the value of a global string flag, returns "" if no +// string flag exists func (c *Context) GlobalString(name string) string { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupString(name, fs) @@ -103,7 +129,8 @@ func (c *Context) GlobalString(name string) string { return "" } -// Looks up the value of a global string slice flag, returns nil if no string slice flag exists +// GlobalStringSlice looks up the value of a global string slice flag, returns +// nil if no string slice flag exists func (c *Context) GlobalStringSlice(name string) []string { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupStringSlice(name, fs) @@ -111,7 +138,8 @@ func (c *Context) GlobalStringSlice(name string) []string { return nil } -// Looks up the value of a global int slice flag, returns nil if no int slice flag exists +// GlobalIntSlice looks up the value of a global int slice flag, returns nil if +// no int slice flag exists func (c *Context) GlobalIntSlice(name string) []int { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupIntSlice(name, fs) @@ -119,7 +147,8 @@ func (c *Context) GlobalIntSlice(name string) []int { return nil } -// Looks up the value of a global generic flag, returns nil if no generic flag exists +// GlobalGeneric looks up the value of a global generic flag, returns nil if no +// generic flag exists func (c *Context) GlobalGeneric(name string) interface{} { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupGeneric(name, fs) @@ -127,12 +156,22 @@ func (c *Context) GlobalGeneric(name string) interface{} { return nil } -// Returns the number of flags set +// NumFlags returns the number of flags set func (c *Context) NumFlags() int { return c.flagSet.NFlag() } -// Determines if the flag was actually set +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + return c.flagSet.Set(name, value) +} + +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + return globalContext(c).flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set func (c *Context) IsSet(name string) bool { if c.setFlags == nil { c.setFlags = make(map[string]bool) @@ -143,7 +182,7 @@ func (c *Context) IsSet(name string) bool { return c.setFlags[name] == true } -// Determines if the global flag was actually set +// GlobalIsSet determines if the global flag was actually set func (c *Context) GlobalIsSet(name string) bool { if c.globalSetFlags == nil { c.globalSetFlags = make(map[string]bool) @@ -160,7 +199,7 @@ func (c *Context) GlobalIsSet(name string) bool { return c.globalSetFlags[name] } -// Returns a slice of flag names used in this context. +// FlagNames returns a slice of flag names used in this context. func (c *Context) FlagNames() (names []string) { for _, flag := range c.Command.Flags { name := strings.Split(flag.GetName(), ",")[0] @@ -172,7 +211,7 @@ func (c *Context) FlagNames() (names []string) { return } -// Returns a slice of global flag names used by the app. +// GlobalFlagNames returns a slice of global flag names used by the app. func (c *Context) GlobalFlagNames() (names []string) { for _, flag := range c.App.Flags { name := strings.Split(flag.GetName(), ",")[0] @@ -184,20 +223,26 @@ func (c *Context) GlobalFlagNames() (names []string) { return } -// Returns the parent context, if any +// Parent returns the parent context, if any func (c *Context) Parent() *Context { return c.parentContext } +// Args contains apps console arguments type Args []string -// Returns the command line arguments associated with the context. +// Args returns the command line arguments associated with the context. func (c *Context) Args() Args { args := Args(c.flagSet.Args()) return args } -// Returns the nth argument, or else a blank string +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return len(c.Args()) +} + +// Get returns the nth argument, or else a blank string func (a Args) Get(n int) string { if len(a) > n { return a[n] @@ -205,12 +250,12 @@ func (a Args) Get(n int) string { return "" } -// Returns the first argument, or else a blank string +// First returns the first argument, or else a blank string func (a Args) First() string { return a.Get(0) } -// Return the rest of the arguments (not the first one) +// Tail returns the rest of the arguments (not the first one) // or else an empty string slice func (a Args) Tail() []string { if len(a) >= 2 { @@ -219,12 +264,12 @@ func (a Args) Tail() []string { return []string{} } -// Checks if there are any arguments present +// Present checks if there are any arguments present func (a Args) Present() bool { return len(a) != 0 } -// Swaps arguments at the given indexes +// Swap swaps arguments at the given indexes func (a Args) Swap(from, to int) error { if from >= len(a) || to >= len(a) { return errors.New("index out of range") @@ -233,6 +278,19 @@ func (a Args) Swap(from, to int) error { return nil } +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { if ctx.parentContext != nil { ctx = ctx.parentContext diff --git a/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/errors.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/errors.go new file mode 100644 index 0000000000000..ea551be16ac81 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/errors.go @@ -0,0 +1,92 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implents the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message string +} + +// NewExitError makes a new *ExitError +func NewExitError(message string, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return ee.message +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + fmt.Fprintln(ErrWriter, err) + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + for _, merr := range multiErr.Errors { + HandleExitCoder(merr) + } + } +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/flag.go similarity index 71% rename from Godeps/_workspace/src/github.com/codegangsta/cli/flag.go rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/flag.go index e951c2df7736d..1e8112e7e3736 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/flag.go @@ -4,24 +4,28 @@ import ( "flag" "fmt" "os" + "reflect" "runtime" "strconv" "strings" "time" ) -// This flag enables bash-completion for all commands and subcommands +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands var BashCompletionFlag = BoolFlag{ - Name: "generate-bash-completion", + Name: "generate-bash-completion", + Hidden: true, } -// This flag prints the version for the application +// VersionFlag prints the version for the application var VersionFlag = BoolFlag{ Name: "version, v", Usage: "print the version", } -// This flag prints the help for all commands and subcommands +// HelpFlag prints the help for all commands and subcommands // Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand // unless HideHelp is set to true) var HelpFlag = BoolFlag{ @@ -29,6 +33,10 @@ var HelpFlag = BoolFlag{ Usage: "show help", } +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + // Flag is a common interface related to parsing flags in cli. // For more advanced flag parsing techniques, it is recommended that // this interface be implemented. @@ -68,24 +76,14 @@ type GenericFlag struct { Value Generic Usage string EnvVar string + Hidden bool } // String returns the string representation of the generic flag to display the // help text to the user (uses the String() method of the generic flag to show // the value) func (f GenericFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage)) -} - -func (f GenericFlag) FormatValueHelp() string { - if f.Value == nil { - return "" - } - s := f.Value.String() - if len(s) == 0 { - return "" - } - return fmt.Sprintf("\"%s\"", s) + return FlagStringer(f) } // Apply takes the flagset and calls Set on the generic flag with the value @@ -107,6 +105,7 @@ func (f GenericFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of a flag. func (f GenericFlag) GetName() string { return f.Name } @@ -130,20 +129,19 @@ func (f *StringSlice) Value() []string { return *f } -// StringSlice is a string flag that can be specified multiple times on the +// StringSliceFlag is a string flag that can be specified multiple times on the // command-line type StringSliceFlag struct { Name string Value *StringSlice Usage string EnvVar string + Hidden bool } // String returns the usage func (f StringSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -171,11 +169,12 @@ func (f StringSliceFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of a flag. func (f StringSliceFlag) GetName() string { return f.Name } -// StringSlice is an opaque type for []int to satisfy flag.Value +// IntSlice is an opaque type for []int to satisfy flag.Value type IntSlice []int // Set parses the value into an integer and appends it to the list of values @@ -183,9 +182,8 @@ func (f *IntSlice) Set(value string) error { tmp, err := strconv.Atoi(value) if err != nil { return err - } else { - *f = append(*f, tmp) } + *f = append(*f, tmp) return nil } @@ -206,13 +204,12 @@ type IntSliceFlag struct { Value *IntSlice Usage string EnvVar string + Hidden bool } // String returns the usage func (f IntSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -226,7 +223,7 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) { s = strings.TrimSpace(s) err := newVal.Set(s) if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) + fmt.Fprintf(ErrWriter, err.Error()) } } f.Value = newVal @@ -243,6 +240,7 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f IntSliceFlag) GetName() string { return f.Name } @@ -253,11 +251,12 @@ type BoolFlag struct { Usage string EnvVar string Destination *bool + Hidden bool } // String returns a readable representation of this value (for usage defaults) func (f BoolFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -285,6 +284,7 @@ func (f BoolFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f BoolFlag) GetName() string { return f.Name } @@ -296,11 +296,12 @@ type BoolTFlag struct { Usage string EnvVar string Destination *bool + Hidden bool } // String returns a readable representation of this value (for usage defaults) func (f BoolTFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -328,6 +329,7 @@ func (f BoolTFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f BoolTFlag) GetName() string { return f.Name } @@ -339,19 +341,12 @@ type StringFlag struct { Usage string EnvVar string Destination *string + Hidden bool } // String returns the usage func (f StringFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage)) -} - -func (f StringFlag) FormatValueHelp() string { - s := f.Value - if len(s) == 0 { - return "" - } - return fmt.Sprintf("\"%s\"", s) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -375,6 +370,7 @@ func (f StringFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f StringFlag) GetName() string { return f.Name } @@ -387,11 +383,12 @@ type IntFlag struct { Usage string EnvVar string Destination *int + Hidden bool } // String returns the usage func (f IntFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -418,6 +415,7 @@ func (f IntFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f IntFlag) GetName() string { return f.Name } @@ -430,11 +428,12 @@ type DurationFlag struct { Usage string EnvVar string Destination *time.Duration + Hidden bool } // String returns a readable representation of this value (for usage defaults) func (f DurationFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -461,6 +460,7 @@ func (f DurationFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f DurationFlag) GetName() string { return f.Name } @@ -473,11 +473,12 @@ type Float64Flag struct { Usage string EnvVar string Destination *float64 + Hidden bool } // String returns the usage func (f Float64Flag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) + return FlagStringer(f) } // Apply populates the flag given the flag set and environment @@ -503,10 +504,21 @@ func (f Float64Flag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f Float64Flag) GetName() string { return f.Name } +func visibleFlags(fl []Flag) []Flag { + visible := []Flag{} + for _, flag := range fl { + if !reflect.ValueOf(flag).FieldByName("Hidden").Bool() { + visible = append(visible, flag) + } + } + return visible +} + func prefixFor(name string) (prefix string) { if len(name) == 1 { prefix = "-" @@ -517,16 +529,37 @@ func prefixFor(name string) (prefix string) { return } -func prefixedNames(fullName string) (prefixed string) { +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string parts := strings.Split(fullName, ",") for i, name := range parts { name = strings.Trim(name, " ") prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } if i < len(parts)-1 { prefixed += ", " } } - return + return prefixed } func withEnvHint(envVar, str string) string { @@ -544,3 +577,83 @@ func withEnvHint(envVar, str string) string { } return str + envText } + +func stringifyFlag(f Flag) string { + fv := reflect.ValueOf(f) + + switch f.(type) { + case IntSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag))) + case StringSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag))) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + val := fv.FieldByName("Value") + + if val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + + return withEnvHint(fv.FieldByName("EnvVar").String(), + fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault) +} diff --git a/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/funcs.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/funcs.go new file mode 100644 index 0000000000000..cba5e6cb0ccc6 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/funcs.go @@ -0,0 +1,28 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/help.go similarity index 57% rename from Godeps/_workspace/src/github.com/codegangsta/cli/help.go rename to Godeps/_workspace/src/gopkg.in/urfave/cli.v1/help.go index 15916f86aabae..801d2b167dd7d 100644 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go +++ b/Godeps/_workspace/src/gopkg.in/urfave/cli.v1/help.go @@ -3,68 +3,74 @@ package cli import ( "fmt" "io" + "os" "strings" "text/tabwriter" "text/template" ) -// The text template for the Default help topic. +// AppHelpTemplate is the text template for the Default help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var AppHelpTemplate = `NAME: {{.Name}} - {{.Usage}} USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} - {{if .Version}} + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + {{if .Version}}{{if not .HideVersion}} VERSION: {{.Version}} - {{end}}{{if len .Authors}} + {{end}}{{end}}{{if len .Authors}} AUTHOR(S): - {{range .Authors}}{{ . }}{{end}} - {{end}}{{if .Commands}} -COMMANDS: - {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} - {{end}}{{end}}{{if .Flags}} + {{range .Authors}}{{.}}{{end}} + {{end}}{{if .VisibleCommands}} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{.Name}}{{with .ShortName}}, {{.}}{{end}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{end}}{{if .VisibleFlags}} GLOBAL OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}}{{if .Copyright }} + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright}} COPYRIGHT: {{.Copyright}} {{end}} ` -// The text template for the command help topic. +// CommandHelpTemplate is the text template for the command help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var CommandHelpTemplate = `NAME: {{.HelpName}} - {{.Usage}} USAGE: - {{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}} + {{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} DESCRIPTION: - {{.Description}}{{end}}{{if .Flags}} + {{.Description}}{{end}}{{if .VisibleFlags}} OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} ` -// The text template for the subcommand help topic. +// SubcommandHelpTemplate is the text template for the subcommand help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var SubcommandHelpTemplate = `NAME: {{.HelpName}} - {{.Usage}} USAGE: - {{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} -COMMANDS: - {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{.Name}}{{with .ShortName}}, {{.}}{{end}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{if .VisibleFlags}} OPTIONS: - {{range .Flags}}{{.}} + {{range .VisibleFlags}}{{.}} {{end}}{{end}} ` @@ -73,13 +79,14 @@ var helpCommand = Command{ Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", ArgsUsage: "[command]", - Action: func(c *Context) { + Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowAppHelp(c) + return ShowCommandHelp(c, args.First()) } + + ShowAppHelp(c) + return nil }, } @@ -88,65 +95,73 @@ var helpSubcommand = Command{ Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", ArgsUsage: "[command]", - Action: func(c *Context) { + Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowSubcommandHelp(c) + return ShowCommandHelp(c, args.First()) } + + return ShowSubcommandHelp(c) }, } // Prints help for the App or Command type helpPrinter func(w io.Writer, templ string, data interface{}) +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) var HelpPrinter helpPrinter = printHelp -// Prints version for the App +// VersionPrinter prints the version for the App var VersionPrinter = printVersion +// ShowAppHelp is an action that displays the help. func ShowAppHelp(c *Context) { HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) } -// Prints the list of subcommands as the default app completion method +// DefaultAppComplete prints the list of subcommands as the default app completion method func DefaultAppComplete(c *Context) { for _, command := range c.App.Commands { + if command.Hidden { + continue + } for _, name := range command.Names() { fmt.Fprintln(c.App.Writer, name) } } } -// Prints help for the given command -func ShowCommandHelp(ctx *Context, command string) { +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { // show the subcommand help for a command with subcommands if command == "" { HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) - return + return nil } for _, c := range ctx.App.Commands { if c.HasName(command) { HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) - return + return nil } } - if ctx.App.CommandNotFound != nil { - ctx.App.CommandNotFound(ctx, command) - } else { - fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) } + + ctx.App.CommandNotFound(ctx, command) + return nil } -// Prints help for the given subcommand -func ShowSubcommandHelp(c *Context) { - ShowCommandHelp(c, c.Command.Name) +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) } -// Prints the version number of the App +// ShowVersion prints the version number of the App func ShowVersion(c *Context) { VersionPrinter(c) } @@ -155,7 +170,7 @@ func printVersion(c *Context) { fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) } -// Prints the lists of commands within a given context +// ShowCompletions prints the lists of commands within a given context func ShowCompletions(c *Context) { a := c.App if a != nil && a.BashComplete != nil { @@ -163,7 +178,7 @@ func ShowCompletions(c *Context) { } } -// Prints the custom completions for a given command +// ShowCommandCompletions prints the custom completions for a given command func ShowCommandCompletions(ctx *Context, command string) { c := ctx.App.Command(command) if c != nil && c.BashComplete != nil { @@ -181,7 +196,10 @@ func printHelp(out io.Writer, templ string, data interface{}) { err := t.Execute(w, data) if err != nil { // If the writer is closed, t.Execute will fail, and there's nothing - // we can do to recover. We could send this to os.Stderr if we need. + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } return } w.Flush() diff --git a/cmd/ethtest/main.go b/cmd/ethtest/main.go index d8f9696360de3..450dadcbb790d 100644 --- a/cmd/ethtest/main.go +++ b/cmd/ethtest/main.go @@ -25,10 +25,10 @@ import ( "path/filepath" "strings" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/tests" + "gopkg.in/urfave/cli.v1" ) var ( diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 7d9b3a6c33719..ce8e171bde733 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -24,7 +24,6 @@ import ( "runtime" "time" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" @@ -33,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/logger/glog" + "gopkg.in/urfave/cli.v1" ) var ( diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go index 2c2308514f0bc..fd5a4bcd444b2 100644 --- a/cmd/geth/accountcmd.go +++ b/cmd/geth/accountcmd.go @@ -20,13 +20,13 @@ import ( "fmt" "io/ioutil" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" + "gopkg.in/urfave/cli.v1" ) var ( diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 4f47de5d7095c..3355b7a6ad7da 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -23,7 +23,6 @@ import ( "strconv" "time" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/console" @@ -32,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/logger/glog" + "gopkg.in/urfave/cli.v1" ) var ( diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 8bfe27fef38f4..cc7a40fd99392 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -20,9 +20,9 @@ import ( "os" "os/signal" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/console" + "gopkg.in/urfave/cli.v1" ) var ( diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 2639147c44868..0c782680b1130 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -28,7 +28,6 @@ import ( "strings" "time" - "github.com/codegangsta/cli" "github.com/ethereum/ethash" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" @@ -44,6 +43,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/release" "github.com/ethereum/go-ethereum/rlp" + "gopkg.in/urfave/cli.v1" ) const ( diff --git a/cmd/geth/monitorcmd.go b/cmd/geth/monitorcmd.go index 5d839b5a379d6..7058b432f0de2 100644 --- a/cmd/geth/monitorcmd.go +++ b/cmd/geth/monitorcmd.go @@ -26,11 +26,11 @@ import ( "sort" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" "github.com/gizak/termui" + "gopkg.in/urfave/cli.v1" ) var ( diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 01a71c1f65a0b..e7ef9e2c7b4b9 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -21,9 +21,9 @@ package main import ( "io" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/internal/debug" + "gopkg.in/urfave/cli.v1" ) // AppHelpTemplate is the test template for the default, global app help topic. diff --git a/cmd/utils/client.go b/cmd/utils/client.go index ec72a1a4b3fe4..cc964758007ac 100644 --- a/cmd/utils/client.go +++ b/cmd/utils/client.go @@ -20,9 +20,9 @@ import ( "fmt" "strings" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" + "gopkg.in/urfave/cli.v1" ) // NewRemoteRPCClient returns a RPC client which connects to a running geth instance. diff --git a/cmd/utils/customflags.go b/cmd/utils/customflags.go index 4450065c14117..5cbccfe98b2a0 100644 --- a/cmd/utils/customflags.go +++ b/cmd/utils/customflags.go @@ -24,7 +24,7 @@ import ( "path" "strings" - "github.com/codegangsta/cli" + "gopkg.in/urfave/cli.v1" ) // Custom type which is registered in the flags library which cli uses for diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c476e1c779fe5..d2ba42801bf21 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -30,7 +30,6 @@ import ( "strings" "time" - "github.com/codegangsta/cli" "github.com/ethereum/ethash" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -51,6 +50,7 @@ import ( "github.com/ethereum/go-ethereum/release" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/whisper" + "gopkg.in/urfave/cli.v1" ) func init() { diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 5b1a9b23c46a7..9fc5fc4fe26c4 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -22,9 +22,9 @@ import ( _ "net/http/pprof" "runtime" - "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" + "gopkg.in/urfave/cli.v1" ) var ( From 63c5a46b8258762eb94953b71271fd54ee045457 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 10 Jun 2016 11:23:00 +0300 Subject: [PATCH 36/44] [release/1.4.7] cmd: fix CLI package deprecation warnings (cherry picked from commit 90e07b19abaa950eaaff2eecc4918b1d16ebbcaf) --- cmd/ethtest/main.go | 4 ++-- cmd/evm/main.go | 3 ++- cmd/geth/accountcmd.go | 15 ++++++++++----- cmd/geth/chaincmd.go | 16 ++++++++++------ cmd/geth/consolecmd.go | 16 +++++++++++----- cmd/geth/main.go | 20 ++++++++++++++------ cmd/geth/monitorcmd.go | 3 ++- cmd/geth/run_test.go | 5 ++++- 8 files changed, 55 insertions(+), 27 deletions(-) diff --git a/cmd/ethtest/main.go b/cmd/ethtest/main.go index 450dadcbb790d..e0ad0a7ea4416 100644 --- a/cmd/ethtest/main.go +++ b/cmd/ethtest/main.go @@ -183,7 +183,7 @@ func runSuite(test, file string) { } } -func setupApp(c *cli.Context) { +func setupApp(c *cli.Context) error { flagTest := c.GlobalString(TestFlag.Name) flagFile := c.GlobalString(FileFlag.Name) continueOnError = c.GlobalBool(ContinueOnErrorFlag.Name) @@ -196,8 +196,8 @@ func setupApp(c *cli.Context) { if err := runTestWithReader(flagTest, os.Stdin); err != nil { glog.Fatalln(err) } - } + return nil } func main() { diff --git a/cmd/evm/main.go b/cmd/evm/main.go index ce8e171bde733..e7b266d4e4efc 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -104,7 +104,7 @@ func init() { app.Action = run } -func run(ctx *cli.Context) { +func run(ctx *cli.Context) error { glog.SetToStderr(true) glog.SetV(ctx.GlobalInt(VerbosityFlag.Name)) @@ -154,6 +154,7 @@ num gc: %d fmt.Printf(" error: %v", e) } fmt.Println() + return nil } func main() { diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go index fd5a4bcd444b2..1415240ebd5b4 100644 --- a/cmd/geth/accountcmd.go +++ b/cmd/geth/accountcmd.go @@ -167,11 +167,12 @@ nodes. } ) -func accountList(ctx *cli.Context) { +func accountList(ctx *cli.Context) error { accman := utils.MakeAccountManager(ctx) for i, acct := range accman.Accounts() { fmt.Printf("Account #%d: {%x} %s\n", i, acct.Address, acct.File) } + return nil } // tries unlocking the specified account a few times. @@ -259,7 +260,7 @@ func ambiguousAddrRecovery(am *accounts.Manager, err *accounts.AmbiguousAddrErro } // accountCreate creates a new account into the keystore defined by the CLI flags. -func accountCreate(ctx *cli.Context) { +func accountCreate(ctx *cli.Context) error { accman := utils.MakeAccountManager(ctx) password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) @@ -268,11 +269,12 @@ func accountCreate(ctx *cli.Context) { utils.Fatalf("Failed to create account: %v", err) } fmt.Printf("Address: {%x}\n", account.Address) + return nil } // accountUpdate transitions an account from a previous format to the current // one, also providing the possibility to change the pass-phrase. -func accountUpdate(ctx *cli.Context) { +func accountUpdate(ctx *cli.Context) error { if len(ctx.Args()) == 0 { utils.Fatalf("No accounts specified to update") } @@ -283,9 +285,10 @@ func accountUpdate(ctx *cli.Context) { if err := accman.Update(account, oldPassword, newPassword); err != nil { utils.Fatalf("Could not update the account: %v", err) } + return nil } -func importWallet(ctx *cli.Context) { +func importWallet(ctx *cli.Context) error { keyfile := ctx.Args().First() if len(keyfile) == 0 { utils.Fatalf("keyfile must be given as argument") @@ -303,9 +306,10 @@ func importWallet(ctx *cli.Context) { utils.Fatalf("%v", err) } fmt.Printf("Address: {%x}\n", acct.Address) + return nil } -func accountImport(ctx *cli.Context) { +func accountImport(ctx *cli.Context) error { keyfile := ctx.Args().First() if len(keyfile) == 0 { utils.Fatalf("keyfile must be given as argument") @@ -321,4 +325,5 @@ func accountImport(ctx *cli.Context) { utils.Fatalf("Could not create the account: %v", err) } fmt.Printf("Address: {%x}\n", acct.Address) + return nil } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 3355b7a6ad7da..076852ff22b31 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -72,7 +72,7 @@ Use "ethereum dump 0" to dump the genesis block. } ) -func importChain(ctx *cli.Context) { +func importChain(ctx *cli.Context) error { if len(ctx.Args()) != 1 { utils.Fatalf("This command requires an argument.") } @@ -84,9 +84,10 @@ func importChain(ctx *cli.Context) { utils.Fatalf("Import error: %v", err) } fmt.Printf("Import done in %v", time.Since(start)) + return nil } -func exportChain(ctx *cli.Context) { +func exportChain(ctx *cli.Context) error { if len(ctx.Args()) < 1 { utils.Fatalf("This command requires an argument.") } @@ -114,9 +115,10 @@ func exportChain(ctx *cli.Context) { utils.Fatalf("Export error: %v\n", err) } fmt.Printf("Export done in %v", time.Since(start)) + return nil } -func removeDB(ctx *cli.Context) { +func removeDB(ctx *cli.Context) error { confirm, err := console.Stdin.PromptConfirm("Remove local database?") if err != nil { utils.Fatalf("%v", err) @@ -132,9 +134,10 @@ func removeDB(ctx *cli.Context) { } else { fmt.Println("Operation aborted") } + return nil } -func upgradeDB(ctx *cli.Context) { +func upgradeDB(ctx *cli.Context) error { glog.Infoln("Upgrading blockchain database") chain, chainDb := utils.MakeChain(ctx) @@ -163,9 +166,10 @@ func upgradeDB(ctx *cli.Context) { os.Remove(exportFile) glog.Infoln("Import finished") } + return nil } -func dump(ctx *cli.Context) { +func dump(ctx *cli.Context) error { chain, chainDb := utils.MakeChain(ctx) for _, arg := range ctx.Args() { var block *types.Block @@ -182,12 +186,12 @@ func dump(ctx *cli.Context) { state, err := state.New(block.Root(), chainDb) if err != nil { utils.Fatalf("could not create new state: %v", err) - return } fmt.Printf("%s\n", state.Dump()) } } chainDb.Close() + return nil } // hashish returns true for strings that look like hashes. diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index cc7a40fd99392..257050a627398 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -60,7 +60,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso // localConsole starts a new geth node, attaching a JavaScript console to it at the // same time. -func localConsole(ctx *cli.Context) { +func localConsole(ctx *cli.Context) error { // Create and start the node based on the CLI flags node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx) startNode(ctx, node) @@ -86,16 +86,18 @@ func localConsole(ctx *cli.Context) { // If only a short execution was requested, evaluate and return if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { console.Evaluate(script) - return + return nil } // Otherwise print the welcome screen and enter interactive mode console.Welcome() console.Interactive() + + return nil } // remoteConsole will connect to a remote geth instance, attaching a JavaScript // console to it. -func remoteConsole(ctx *cli.Context) { +func remoteConsole(ctx *cli.Context) error { // Attach to a remotely running geth instance and start the JavaScript console client, err := utils.NewRemoteRPCClient(ctx) if err != nil { @@ -116,17 +118,19 @@ func remoteConsole(ctx *cli.Context) { // If only a short execution was requested, evaluate and return if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { console.Evaluate(script) - return + return nil } // Otherwise print the welcome screen and enter interactive mode console.Welcome() console.Interactive() + + return nil } // ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript // console to it, and each of the files specified as arguments and tears the // everything down. -func ephemeralConsole(ctx *cli.Context) { +func ephemeralConsole(ctx *cli.Context) error { // Create and start the node based on the CLI flags node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx) startNode(ctx, node) @@ -164,4 +168,6 @@ func ephemeralConsole(ctx *cli.Context) { os.Exit(0) }() console.Stop(true) + + return nil } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 0c782680b1130..9ee1055bf20a4 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -271,15 +271,17 @@ func makeDefaultExtra() []byte { // geth is the main entry point into the system if no special subcommand is ran. // It creates a default node based on the command line arguments and runs it in // blocking mode, waiting for it to be shut down. -func geth(ctx *cli.Context) { +func geth(ctx *cli.Context) error { node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx) startNode(ctx, node) node.Wait() + + return nil } // initGenesis will initialise the given JSON format genesis file and writes it as // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. -func initGenesis(ctx *cli.Context) { +func initGenesis(ctx *cli.Context) error { genesisPath := ctx.Args().First() if len(genesisPath) == 0 { utils.Fatalf("must supply path to genesis JSON file") @@ -300,6 +302,7 @@ func initGenesis(ctx *cli.Context) { utils.Fatalf("failed to write genesis block: %v", err) } glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash()) + return nil } // startNode boots up the system node and all registered protocols, after which @@ -331,7 +334,7 @@ func startNode(ctx *cli.Context, stack *node.Node) { } } -func makedag(ctx *cli.Context) { +func makedag(ctx *cli.Context) error { args := ctx.Args() wrongArgs := func() { utils.Fatalf(`Usage: geth makedag `) @@ -358,13 +361,15 @@ func makedag(ctx *cli.Context) { default: wrongArgs() } + return nil } -func gpuinfo(ctx *cli.Context) { +func gpuinfo(ctx *cli.Context) error { eth.PrintOpenCLDevices() + return nil } -func gpubench(ctx *cli.Context) { +func gpubench(ctx *cli.Context) error { args := ctx.Args() wrongArgs := func() { utils.Fatalf(`Usage: geth gpubench `) @@ -381,9 +386,10 @@ func gpubench(ctx *cli.Context) { default: wrongArgs() } + return nil } -func version(c *cli.Context) { +func version(c *cli.Context) error { fmt.Println(clientIdentifier) fmt.Println("Version:", verString) fmt.Println("Protocol Versions:", eth.ProtocolVersions) @@ -392,4 +398,6 @@ func version(c *cli.Context) { fmt.Println("OS:", runtime.GOOS) fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) + + return nil } diff --git a/cmd/geth/monitorcmd.go b/cmd/geth/monitorcmd.go index 7058b432f0de2..11fdca89c2dd5 100644 --- a/cmd/geth/monitorcmd.go +++ b/cmd/geth/monitorcmd.go @@ -67,7 +67,7 @@ to display multiple metrics simultaneously. ) // monitor starts a terminal UI based monitoring tool for the requested metrics. -func monitor(ctx *cli.Context) { +func monitor(ctx *cli.Context) error { var ( client rpc.Client err error @@ -154,6 +154,7 @@ func monitor(ctx *cli.Context) { } }() termui.Loop() + return nil } // retrieveMetrics contacts the attached geth node and retrieves the entire set diff --git a/cmd/geth/run_test.go b/cmd/geth/run_test.go index f6bc3f869ddeb..e26b4509a3c87 100644 --- a/cmd/geth/run_test.go +++ b/cmd/geth/run_test.go @@ -58,7 +58,10 @@ type testgeth struct { func init() { // Run the app if we're the child process for runGeth. if os.Getenv("GETH_TEST_CHILD") != "" { - app.RunAndExitOnError() + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } os.Exit(0) } } From 0405f728c632b99ad6b755ff68019c6ae7ab0cec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 13 Jun 2016 13:01:19 +0300 Subject: [PATCH 37/44] [release/1.4.7] eth/downloader: fix occasional fast sync critical section test fails (cherry picked from commit 783289068a63b3accbc6d69cb0ecc0f2c39c5f54) --- eth/downloader/downloader_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index a9c069a926bc6..e9e051ded40bc 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -1824,13 +1824,15 @@ func testFastCriticalRestarts(t *testing.T, protocol int) { for i := 0; i < fsPivotInterval; i++ { tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true } + tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test + // Synchronise with the peer a few times and make sure they fail until the retry limit for i := 0; i < fsCriticalTrials; i++ { // Attempt a sync and ensure it fails properly if err := tester.sync("peer", nil, FastSync); err == nil { t.Fatalf("failing fast sync succeeded: %v", err) } - time.Sleep(500 * time.Millisecond) // Make sure no in-flight requests remain + time.Sleep(100 * time.Millisecond) // Make sure no in-flight requests remain // If it's the first failure, pivot should be locked => reenable all others to detect pivot changes if i == 0 { From f7fdfa4eace4f0ab1dd820398efa0f2f44bd1eca Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Mon, 13 Jun 2016 11:56:42 +0200 Subject: [PATCH 38/44] [release/1.4.7] core/state, eth: Updated suicides objects when tracing transactions Consensus rules dictate that objects can only be removed during the finalisation of the transaction (i.e. after all calls have finished). Thus calling a suicided contract twice from the same transaction: A->B(S)->ret(A)->B(S) results in 2 suicides. Calling the suicided object twice from two transactions: A->B(S), A->B, results in only one suicide and a call to an empty object. Our current debug tracing functionality replays all transaction that were executed prior to the targetted transaction in order to provide the user with an accurate trace. As a side effect to calling StateDB.IntermediateRoot it also deletes any suicides objects. Our tracing code never calls this function because it isn't interested in the intermediate root. Becasue of this it caused a bug in the tracing code where transactions that were send to priviously deleted objects resulted in two suicides rather than one suicide and a call to an empty object. Fixes #2542 (cherry picked from commit bb3651abc865c6f6babec0d357afa85f5a539d83) --- core/state/statedb.go | 21 +++++++++++++++++++++ eth/api.go | 1 + 2 files changed, 22 insertions(+) diff --git a/core/state/statedb.go b/core/state/statedb.go index 70673799ed24b..3e25e0c1609e4 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -370,6 +370,27 @@ func (s *StateDB) IntermediateRoot() common.Hash { return s.trie.Hash() } +// DeleteSuicides flags the suicided objects for deletion so that it +// won't be referenced again when called / queried up on. +// +// DeleteSuicides should not be used for consensus related updates +// under any circumstances. +func (s *StateDB) DeleteSuicides() { + // Reset refund so that any used-gas calculations can use + // this method. + s.refund = new(big.Int) + for _, stateObject := range s.stateObjects { + if stateObject.dirty { + // If the object has been removed by a suicide + // flag the object as deleted. + if stateObject.remove { + stateObject.deleted = true + } + stateObject.dirty = false + } + } +} + // Commit commits all state changes to the database. func (s *StateDB) Commit() (root common.Hash, err error) { root, batch := s.CommitBatch() diff --git a/eth/api.go b/eth/api.go index f5f942c27d62a..a2be81428525d 100644 --- a/eth/api.go +++ b/eth/api.go @@ -1876,6 +1876,7 @@ func (api *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogC if err != nil { return nil, fmt.Errorf("mutation failed: %v", err) } + stateDb.DeleteSuicides() continue } // Otherwise trace the transaction and return From bc6c4a337cb831a2d6121201141fbd324d1b3d06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 10 Jun 2016 10:35:10 +0300 Subject: [PATCH 39/44] [release/1.4.7] accounts/abi: fix uint64 upper range encoding. (cherry picked from commit 0f9539e1e3e77bb181d67591cfbb77f6a17e5537) --- accounts/abi/numbers.go | 12 +++-------- accounts/abi/numbers_test.go | 42 +++++++++++++++++++++++++----------- 2 files changed, 33 insertions(+), 21 deletions(-) diff --git a/accounts/abi/numbers.go b/accounts/abi/numbers.go index 06c4422f93d23..3d58422925c36 100644 --- a/accounts/abi/numbers.go +++ b/accounts/abi/numbers.go @@ -56,27 +56,21 @@ var ( big_ts = reflect.TypeOf([]*big.Int(nil)) ) -// U256 will ensure unsigned 256bit on big nums +// U256 converts a big Int into a 256bit EVM number. func U256(n *big.Int) []byte { return common.LeftPadBytes(common.U256(n).Bytes(), 32) } -// S256 will ensure signed 256bit on big nums -func U2U256(n uint64) []byte { - return U256(big.NewInt(int64(n))) -} - // packNum packs the given number (using the reflect value) and will cast it to appropriate number representation func packNum(value reflect.Value) []byte { switch kind := value.Kind(); kind { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return U2U256(value.Uint()) + return U256(new(big.Int).SetUint64(value.Uint())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return U2U256(uint64(value.Int())) + return U256(big.NewInt(value.Int())) case reflect.Ptr: return U256(value.Interface().(*big.Int)) } - return nil } diff --git a/accounts/abi/numbers_test.go b/accounts/abi/numbers_test.go index f409aa60fb79c..44afe8647898b 100644 --- a/accounts/abi/numbers_test.go +++ b/accounts/abi/numbers_test.go @@ -18,6 +18,7 @@ package abi import ( "bytes" + "math" "math/big" "reflect" "testing" @@ -34,21 +35,38 @@ func TestNumberTypes(t *testing.T) { } func TestPackNumber(t *testing.T) { - ubytes := make([]byte, 32) - ubytes[31] = 1 - maxunsigned := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} + tests := []struct { + value reflect.Value + packed []byte + }{ + // Protocol limits + {reflect.ValueOf(0), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {reflect.ValueOf(1), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + {reflect.ValueOf(-1), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}, + + // Type corner cases + {reflect.ValueOf(uint8(math.MaxUint8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255}}, + {reflect.ValueOf(uint16(math.MaxUint16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255}}, + {reflect.ValueOf(uint32(math.MaxUint32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255}}, + {reflect.ValueOf(uint64(math.MaxUint64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255}}, - packed := packNum(reflect.ValueOf(1)) - if !bytes.Equal(packed, ubytes) { - t.Errorf("expected %x got %x", ubytes, packed) + {reflect.ValueOf(int8(math.MaxInt8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127}}, + {reflect.ValueOf(int16(math.MaxInt16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255}}, + {reflect.ValueOf(int32(math.MaxInt32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255}}, + {reflect.ValueOf(int64(math.MaxInt64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255}}, + + {reflect.ValueOf(int8(math.MinInt8)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128}}, + {reflect.ValueOf(int16(math.MinInt16)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0}}, + {reflect.ValueOf(int32(math.MinInt32)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0}}, + {reflect.ValueOf(int64(math.MinInt64)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0, 0, 0, 0, 0}}, } - packed = packNum(reflect.ValueOf(-1)) - if !bytes.Equal(packed, maxunsigned) { - t.Errorf("expected %x got %x", maxunsigned, packed) + for i, tt := range tests { + packed := packNum(tt.value) + if !bytes.Equal(packed, tt.packed) { + t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed) + } } - - packed = packNum(reflect.ValueOf("string")) - if packed != nil { + if packed := packNum(reflect.ValueOf("string")); packed != nil { t.Errorf("expected 'string' to pack to nil. got %x instead", packed) } } From 77b280115bcfc8d1fa5af46b2c196531f7b03c44 Mon Sep 17 00:00:00 2001 From: Franko Date: Tue, 14 Jun 2016 19:19:09 -0400 Subject: [PATCH 40/44] cleanup rebase 1.4.6 --- .gitmodules | 2 +- cmd/gexp/js.go | 426 ------------------------------ cmd/gexp/js_test.go | 504 ------------------------------------ cmd/gexp/main.go | 2 +- exp/bad_block.go | 2 +- internal/jsre/expanse_js.go | 2 +- jsre/expanse_js.go | 4 +- 7 files changed, 6 insertions(+), 936 deletions(-) delete mode 100644 cmd/gexp/js.go delete mode 100644 cmd/gexp/js_test.go diff --git a/.gitmodules b/.gitmodules index 5bb332a5c048b..fbd82891d081b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "cmd/mist/assets/ext/expanse.js"] path = cmd/mist/assets/ext/expanse.js - url = https://github.com/expanse-org/web3.js + url = https://github.com/expanse-project/web3.js diff --git a/cmd/gexp/js.go b/cmd/gexp/js.go deleted file mode 100644 index b1fb495bf637f..0000000000000 --- a/cmd/gexp/js.go +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// Copyright 2015 go-expanse Authors -// This file is part of go-expanse. -// -// go-expanse is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-expanse is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-expanse. If not, see . - -package main - -import ( - "fmt" - "math/big" - "os" - "os/signal" - "path/filepath" - "regexp" - "sort" - "strings" - - "github.com/codegangsta/cli" - "github.com/expanse-project/go-expanse/accounts" - "github.com/expanse-project/go-expanse/cmd/utils" - "github.com/expanse-project/go-expanse/common" - "github.com/expanse-project/go-expanse/common/registrar" - "github.com/expanse-project/go-expanse/exp" - "github.com/expanse-project/go-expanse/internal/web3ext" - re "github.com/expanse-project/go-expanse/jsre" - "github.com/expanse-project/go-expanse/node" - "github.com/expanse-project/go-expanse/rpc" - "github.com/peterh/liner" - "github.com/robertkrimen/otto" -) - -var ( - passwordRegexp = regexp.MustCompile("personal.[nus]") - onlyws = regexp.MustCompile("^\\s*$") - exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$") -) - -type jsre struct { - re *re.JSRE - stack *node.Node - wait chan *big.Int - ps1 string - atexit func() - corsDomain string - client rpc.Client -} - -func makeCompleter(re *jsre) liner.WordCompleter { - return func(line string, pos int) (head string, completions []string, tail string) { - if len(line) == 0 || pos == 0 { - return "", nil, "" - } - // chuck data to relevant part for autocompletion, e.g. in case of nested lines exp.getBalance(exp.coinb - i := 0 - for i = pos - 1; i > 0; i-- { - if line[i] == '.' || (line[i] >= 'a' && line[i] <= 'z') || (line[i] >= 'A' && line[i] <= 'Z') { - continue - } - if i >= 3 && line[i] == '3' && line[i-3] == 'w' && line[i-2] == 'e' && line[i-1] == 'b' { - continue - } - i += 1 - break - } - return line[:i], re.re.CompleteKeywords(line[i:pos]), line[pos:] - } -} - -func newLightweightJSRE(docRoot string, client rpc.Client, datadir string, interactive bool) *jsre { - js := &jsre{ps1: "> "} - js.wait = make(chan *big.Int) - js.client = client - js.re = re.New(docRoot) - if err := js.apiBindings(); err != nil { - utils.Fatalf("Unable to initialize console - %v", err) - } - js.setupInput(datadir) - return js -} - -func newJSRE(stack *node.Node, docRoot, corsDomain string, client rpc.Client, interactive bool) *jsre { - js := &jsre{stack: stack, ps1: "> "} - // set default cors domain used by startRpc from CLI flag - js.corsDomain = corsDomain - js.wait = make(chan *big.Int) - js.client = client - js.re = re.New(docRoot) - if err := js.apiBindings(); err != nil { - utils.Fatalf("Unable to connect - %v", err) - } - js.setupInput(stack.DataDir()) - return js -} - -func (self *jsre) setupInput(datadir string) { - self.withHistory(datadir, func(hist *os.File) { utils.Stdin.ReadHistory(hist) }) - utils.Stdin.SetCtrlCAborts(true) - utils.Stdin.SetWordCompleter(makeCompleter(self)) - utils.Stdin.SetTabCompletionStyle(liner.TabPrints) - self.atexit = func() { - self.withHistory(datadir, func(hist *os.File) { - hist.Truncate(0) - utils.Stdin.WriteHistory(hist) - }) - utils.Stdin.Close() - close(self.wait) - } -} - -func (self *jsre) batch(statement string) { - err := self.re.EvalAndPrettyPrint(statement) - - if err != nil { - fmt.Printf("%v", jsErrorString(err)) - } - - if self.atexit != nil { - self.atexit() - } - - self.re.Stop(false) -} - -// show summary of current gexp instance -func (self *jsre) welcome() { - self.re.Run(` - (function () { - console.log('instance: ' + web3.version.node); - console.log("coinbase: " + exp.coinbase); - var ts = 1000 * exp.getBlock(exp.blockNumber).timestamp; - console.log("at block: " + exp.blockNumber + " (" + new Date(ts) + ")"); - console.log(' datadir: ' + admin.datadir); - })(); - `) - if modules, err := self.supportedApis(); err == nil { - loadedModules := make([]string, 0) - for api, version := range modules { - loadedModules = append(loadedModules, fmt.Sprintf("%s:%s", api, version)) - } - sort.Strings(loadedModules) - } -} - -func (self *jsre) supportedApis() (map[string]string, error) { - return self.client.SupportedModules() -} - -func (js *jsre) apiBindings() error { - apis, err := js.supportedApis() - if err != nil { - return err - } - - apiNames := make([]string, 0, len(apis)) - for a, _ := range apis { - apiNames = append(apiNames, a) - } - - jeth := utils.NewJeth(js.re, js.client) - js.re.Set("jeth", struct{}{}) - t, _ := js.re.Get("jeth") - jethObj := t.Object() - - jethObj.Set("send", jeth.Send) - jethObj.Set("sendAsync", jeth.Send) - - err = js.re.Compile("bignumber.js", re.BigNumber_JS) - if err != nil { - utils.Fatalf("Error loading bignumber.js: %v", err) - } - - err = js.re.Compile("web3.js", re.Web3_JS) - if err != nil { - utils.Fatalf("Error loading web3.js: %v", err) - } - - _, err = js.re.Run("var Web3 = require('web3');") - if err != nil { - utils.Fatalf("Error requiring web3: %v", err) - } - - _, err = js.re.Run("var web3 = new Web3(jeth);") - if err != nil { - utils.Fatalf("Error setting web3 provider: %v", err) - } - - // load only supported API's in javascript runtime - shortcuts := "var exp = web3.exp; var personal = web3.personal; " - for _, apiName := range apiNames { - if apiName == "web3" || apiName == "rpc" { - continue // manually mapped or ignore - } - - if jsFile, ok := web3ext.Modules[apiName]; ok { - if err = js.re.Compile(fmt.Sprintf("%s.js", apiName), jsFile); err == nil { - shortcuts += fmt.Sprintf("var %s = web3.%s; ", apiName, apiName) - } else { - utils.Fatalf("Error loading %s.js: %v", apiName, err) - } - } - } - - _, err = js.re.Run(shortcuts) - if err != nil { - utils.Fatalf("Error setting namespaces: %v", err) - } - - js.re.Run(`var GlobalRegistrar = exp.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`) - - // overrule some of the methods that require password as input and ask for it interactively - p, err := js.re.Get("personal") - if err != nil { - fmt.Println("Unable to overrule sensitive methods in personal module") - return nil - } - - // Override the unlockAccount and newAccount methods on the personal object since these require user interaction. - // Assign the jeth.unlockAccount and jeth.newAccount in the jsre the original web3 callbacks. These will be called - // by the jeth.* methods after they got the password from the user and send the original web3 request to the backend. - if persObj := p.Object(); persObj != nil { // make sure the personal api is enabled over the interface - js.re.Run(`jeth.unlockAccount = personal.unlockAccount;`) - persObj.Set("unlockAccount", jeth.UnlockAccount) - js.re.Run(`jeth.newAccount = personal.newAccount;`) - persObj.Set("newAccount", jeth.NewAccount) - } - - // The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer. - // Bind these if the admin module is available. - if a, err := js.re.Get("admin"); err == nil { - if adminObj := a.Object(); adminObj != nil { - adminObj.Set("sleepBlocks", jeth.SleepBlocks) - adminObj.Set("sleep", jeth.Sleep) - } - } - - return nil -} - -func (self *jsre) AskPassword() (string, bool) { - pass, err := utils.Stdin.PasswordPrompt("Passphrase: ") - if err != nil { - return "", false - } - return pass, true -} - -func (self *jsre) ConfirmTransaction(tx string) bool { - // Retrieve the Expanse instance from the node - var expanse *exp.Expanse - if err := self.stack.Service(&expanse); err != nil { - return false - } - // If natspec is enabled, ask for permission - if expanse.NatSpec && false /* disabled for now */ { - // notice := natspec.GetNotice(self.xeth, tx, expanse.HTTPClient()) - // fmt.Println(notice) - // answer, _ := self.Prompt("Confirm Transaction [y/n]") - // return strings.HasPrefix(strings.Trim(answer, " "), "y") - } - return true -} - -func (self *jsre) UnlockAccount(addr []byte) bool { - fmt.Printf("Please unlock account %x.\n", addr) - pass, err := utils.Stdin.PasswordPrompt("Passphrase: ") - if err != nil { - return false - } - // TODO: allow retry - var expanse *exp.Expanse - if err := self.stack.Service(&expanse); err != nil { - return false - } - a := accounts.Account{Address: common.BytesToAddress(addr)} - if err := expanse.AccountManager().Unlock(a, pass); err != nil { - return false - } else { - fmt.Println("Account is now unlocked for this session.") - return true - } -} - -// preloadJSFiles loads JS files that the user has specified with ctx.PreLoadJSFlag into -// the JSRE. If not all files could be loaded it will return an error describing the error. -func (self *jsre) preloadJSFiles(ctx *cli.Context) error { - if ctx.GlobalString(utils.PreLoadJSFlag.Name) != "" { - assetPath := ctx.GlobalString(utils.JSpathFlag.Name) - jsFiles := strings.Split(ctx.GlobalString(utils.PreLoadJSFlag.Name), ",") - for _, file := range jsFiles { - filename := common.AbsolutePath(assetPath, strings.TrimSpace(file)) - if err := self.re.Exec(filename); err != nil { - return fmt.Errorf("%s: %v", file, jsErrorString(err)) - } - } - } - return nil -} - -// jsErrorString adds a backtrace to errors generated by otto. -func jsErrorString(err error) string { - if ottoErr, ok := err.(*otto.Error); ok { - return ottoErr.String() - } - return err.Error() -} - -func (self *jsre) interactive() { - // Read input lines. - prompt := make(chan string) - inputln := make(chan string) - go func() { - defer close(inputln) - for { - line, err := utils.Stdin.Prompt(<-prompt) - if err != nil { - if err == liner.ErrPromptAborted { // ctrl-C - self.resetPrompt() - inputln <- "" - continue - } - return - } - inputln <- line - } - }() - // Wait for Ctrl-C, too. - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt) - - defer func() { - if self.atexit != nil { - self.atexit() - } - self.re.Stop(false) - }() - for { - prompt <- self.ps1 - select { - case <-sig: - fmt.Println("caught interrupt, exiting") - return - case input, ok := <-inputln: - if !ok || indentCount <= 0 && exit.MatchString(input) { - return - } - if onlyws.MatchString(input) { - continue - } - str += input + "\n" - self.setIndent() - if indentCount <= 0 { - if !excludeFromHistory(str) { - utils.Stdin.AppendHistory(str[:len(str)-1]) - } - self.parseInput(str) - str = "" - } - } - } -} - -func excludeFromHistory(input string) bool { - return len(input) == 0 || input[0] == ' ' || passwordRegexp.MatchString(input) -} - - -func (self *jsre) withHistory(datadir string, op func(*os.File)) { - hist, err := os.OpenFile(filepath.Join(datadir, "history"), os.O_RDWR|os.O_CREATE, os.ModePerm) - if err != nil { - fmt.Printf("unable to open history file: %v\n", err) - return - } - op(hist) - hist.Close() -} - -func (self *jsre) parseInput(code string) { - defer func() { - if r := recover(); r != nil { - fmt.Println("[native] error", r) - } - }() - if err := self.re.EvalAndPrettyPrint(code); err != nil { - if ottoErr, ok := err.(*otto.Error); ok { - fmt.Println(ottoErr.String()) - } else { - fmt.Println(err) - } - return - } -} - -var indentCount = 0 -var str = "" - -func (self *jsre) resetPrompt() { - indentCount = 0 - str = "" - self.ps1 = "> " -} - -func (self *jsre) setIndent() { - open := strings.Count(str, "{") - open += strings.Count(str, "(") - closed := strings.Count(str, "}") - closed += strings.Count(str, ")") - indentCount = open - closed - if indentCount <= 0 { - self.ps1 = "> " - } else { - self.ps1 = strings.Join(make([]string, indentCount*2), "..") - self.ps1 += " " - } -} diff --git a/cmd/gexp/js_test.go b/cmd/gexp/js_test.go deleted file mode 100644 index e648b9dc25879..0000000000000 --- a/cmd/gexp/js_test.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of go-expanse. -// -// go-expanse is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-expanse is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-expanse. If not, see . - -package main - -import ( - "fmt" - "io/ioutil" - "math/big" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "testing" - "time" - - "github.com/expanse-project/go-expanse/accounts" - "github.com/expanse-project/go-expanse/common" - "github.com/expanse-project/go-expanse/common/compiler" - "github.com/expanse-project/go-expanse/common/httpclient" - "github.com/expanse-project/go-expanse/core" - "github.com/expanse-project/go-expanse/crypto" - "github.com/expanse-project/go-expanse/eth" - "github.com/expanse-project/go-expanse/ethdb" - "github.com/expanse-project/go-expanse/node" - -) - -const ( - testSolcPath = "" - solcVersion = "0.9.23" - testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - testBalance = "10000000000000000000" - // of empty string - testHash = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" -) - -var ( - versionRE = regexp.MustCompile(strconv.Quote(`"compilerVersion":"` + solcVersion + `"`)) - testNodeKey, _ = crypto.HexToECDSA("4b50fa71f5c3eeb8fdc452224b2395af2fcc3d125e06c32c82e048c0559db03f") - testAccount, _ = crypto.HexToECDSA("e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674") - testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}` -) - -type testjethre struct { - *jsre - lastConfirm string - client *httpclient.HTTPClient -} - -// Temporary disabled while natspec hasn't been migrated -//func (self *testjethre) ConfirmTransaction(tx string) bool { -// var expanse *exp.Expanse -// self.stack.Service(&expanse) -// -// if expanse.NatSpec { -// self.lastConfirm = natspec.GetNotice(self.xeth, tx, self.client) -// } -// return true -//} - -func testJEthRE(t *testing.T) (string, *testjethre, *node.Node) { - return testREPL(t, nil) -} - -func testREPL(t *testing.T, config func(*exp.Config)) (string, *testjethre, *node.Node) { - tmp, err := ioutil.TempDir("", "gexp-test") - - if err != nil { - t.Fatal(err) - } - // Create a networkless protocol stack - stack, err := node.New(&node.Config{DataDir: tmp, PrivateKey: testNodeKey, Name: "test", NoDiscovery: true}) - if err != nil { - t.Fatalf("failed to create node: %v", err) - } - // Initialize and register the Expanse protocol - accman := accounts.NewPlaintextManager(filepath.Join(tmp, "keystore")) - db, _ := ethdb.NewMemDatabase() - core.WriteGenesisBlockForTesting(db, core.GenesisAccount{ - Address: common.HexToAddress(testAddress), - Balance: common.String2Big(testBalance), - }) - ethConf := &exp.Config{ - ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)}, - TestGenesisState: db, - AccountManager: accman, - DocRoot: "/", - SolcPath: testSolcPath, - PowTest: true, - - } - if config != nil { - config(ethConf) - } - - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return exp.New(ctx, ethConf) - }); err != nil { - t.Fatalf("failed to register expanse protocol: %v", err) - } - // Initialize all the keys for testing - a, err := accman.ImportECDSA(testAccount, "") - if err != nil { - t.Fatal(err) - } - if err := accman.Unlock(a, ""); err != nil { - t.Fatal(err) - } - // Start the node and assemble the REPL tester - if err := stack.Start(); err != nil { - t.Fatalf("failed to start test stack: %v", err) - } - var expanse *exp.Expanse - stack.Service(&expanse) - - assetPath := filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "expanse", "go-expanse", "cmd", "mist", "assets", "ext") - client, err := stack.Attach() - if err != nil { - t.Fatalf("failed to attach to node: %v", err) - } - tf := &testjethre{client: expanse.HTTPClient()} - repl := newJSRE(stack, assetPath, "", client, false) - tf.jsre = repl - return tmp, tf, stack -} - -func TestNodeInfo(t *testing.T) { - t.Skip("broken after p2p update") - tmp, repl, expanse := testJEthRE(t) - defer expanse.Stop() - defer os.RemoveAll(tmp) - - want := `{"DiscPort":0,"IP":"0.0.0.0","ListenAddr":"","Name":"test","NodeID":"4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5","NodeUrl":"enode://4cb2fc32924e94277bf94b5e4c983beedb2eabd5a0bc941db32202735c6625d020ca14a5963d1738af43b6ac0a711d61b1a06de931a499fe2aa0b1a132a902b5@0.0.0.0:0","TCPPort":0,"Td":"131072"}` - checkEvalJSON(t, repl, `admin.nodeInfo`, want) -} - -func TestAccounts(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - checkEvalJSON(t, repl, `exp.accounts`, `["`+testAddress+`"]`) - checkEvalJSON(t, repl, `exp.coinbase`, `"`+testAddress+`"`) - val, err := repl.re.Run(`jexp.newAccount("password")`) - if err != nil { - t.Errorf("expected no error, got %v", err) - } - addr := val.String() - if !regexp.MustCompile(`0x[0-9a-f]{40}`).MatchString(addr) { - t.Errorf("address not hex: %q", addr) - } - - checkEvalJSON(t, repl, `exp.accounts`, `["`+testAddress+`","`+addr+`"]`) - -} - -func TestBlockChain(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - // get current block dump before export/import. - val, err := repl.re.Run("JSON.stringify(debug.dumpBlock(exp.blockNumber))") - if err != nil { - t.Errorf("expected no error, got %v", err) - } - beforeExport := val.String() - - // do the export - extmp, err := ioutil.TempDir("", "gexp-test-export") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(extmp) - tmpfile := filepath.Join(extmp, "export.chain") - tmpfileq := strconv.Quote(tmpfile) - - var expanse *exp.Expanse - node.Service(&expanse) - expanse.BlockChain().Reset() - - checkEvalJSON(t, repl, `admin.exportChain(`+tmpfileq+`)`, `true`) - if _, err := os.Stat(tmpfile); err != nil { - t.Fatal(err) - } - - // check import, verify that dumpBlock gives the same result. - checkEvalJSON(t, repl, `admin.importChain(`+tmpfileq+`)`, `true`) - checkEvalJSON(t, repl, `debug.dumpBlock(exp.blockNumber)`, beforeExport) -} - -func TestMining(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - checkEvalJSON(t, repl, `exp.mining`, `false`) -} - -func TestRPC(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - checkEvalJSON(t, repl, `admin.startRPC("127.0.0.1", 5004, "*", "web3,exp,net")`, `true`) -} - -func TestCheckTestAccountBalance(t *testing.T) { - t.Skip() // i don't think it tests the correct behaviour here. it's actually testing - // internals which shouldn't be tested. This now fails because of a change in the core - // and i have no means to fix this, sorry - @obscuren - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - repl.re.Run(`primary = "` + testAddress + `"`) - checkEvalJSON(t, repl, `exp.getBalance(primary)`, `"`+testBalance+`"`) -} - -func TestSignature(t *testing.T) { - tmp, repl, node := testJEthRE(t) - defer node.Stop() - defer os.RemoveAll(tmp) - - val, err := repl.re.Run(`exp.sign("` + testAddress + `", "` + testHash + `")`) - - // This is a very preliminary test, lacking actual signature verification - if err != nil { - t.Errorf("Error running js: %v", err) - return - } - output := val.String() - t.Logf("Output: %v", output) - - regex := regexp.MustCompile(`^0x[0-9a-f]{130}$`) - if !regex.MatchString(output) { - t.Errorf("Signature is not 65 bytes represented in hexadecimal.") - return - } -} - -func TestContract(t *testing.T) { - t.Skip("contract testing is implemented with mining in ethash test mode. This takes about 7seconds to run. Unskip and run on demand") - coinbase := common.HexToAddress(testAddress) - tmp, repl, expanse := testREPL(t, func(conf *exp.Config) { - conf.Etherbase = coinbase - conf.PowTest = true - }) - if err := expanse.Start(); err != nil { - t.Errorf("error starting expanse: %v", err) - return - } - defer expanse.Stop() - defer os.RemoveAll(tmp) - - // Temporary disabled while registrar isn't migrated - //reg := registrar.New(repl.xeth) - //_, err := reg.SetGlobalRegistrar("", coinbase) - //if err != nil { - // t.Errorf("error setting HashReg: %v", err) - //} - //_, err = reg.SetHashReg("", coinbase) - //if err != nil { - // t.Errorf("error setting HashReg: %v", err) - //} - //_, err = reg.SetUrlHint("", coinbase) - //if err != nil { - // t.Errorf("error setting HashReg: %v", err) - //} - /* TODO: - * lookup receipt and contract addresses by tx hash - * name registration for HashReg and UrlHint addresses - * mine those transactions - * then set once more SetHashReg SetUrlHint - */ - - source := `contract test {\n` + - " /// @notice Will multiply `a` by 7." + `\n` + - ` function multiply(uint a) returns(uint d) {\n` + - ` return a * 7;\n` + - ` }\n` + - `}\n` - - if checkEvalJSON(t, repl, `admin.stopNatSpec()`, `true`) != nil { - return - } - - contractInfo, err := ioutil.ReadFile("info_test.json") - if err != nil { - t.Fatalf("%v", err) - } - if checkEvalJSON(t, repl, `primary = exp.accounts[0]`, `"`+testAddress+`"`) != nil { - return - } - if checkEvalJSON(t, repl, `source = "`+source+`"`, `"`+source+`"`) != nil { - return - } - - // if solc is found with right version, test it, otherwise read from file - sol, err := compiler.New("") - if err != nil { - t.Logf("solc not found: mocking contract compilation step") - } else if sol.Version() != solcVersion { - t.Logf("WARNING: solc different version found (%v, test written for %v, may need to update)", sol.Version(), solcVersion) - } - - if err != nil { - info, err := ioutil.ReadFile("info_test.json") - if err != nil { - t.Fatalf("%v", err) - } - _, err = repl.re.Run(`contract = JSON.parse(` + strconv.Quote(string(info)) + `)`) - if err != nil { - t.Errorf("%v", err) - } - } else { - if checkEvalJSON(t, repl, `contract = exp.compile.solidity(source).test`, string(contractInfo)) != nil { - return - } - } - - if checkEvalJSON(t, repl, `contract.code`, `"0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"`) != nil { - return - } - - if checkEvalJSON( - t, repl, - `contractaddress = exp.sendTransaction({from: primary, data: contract.code})`, - `"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74"`, - ) != nil { - return - } - - if !processTxs(repl, t, 8) { - return - } - - callSetup := `abiDef = JSON.parse('[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}]'); -Multiply7 = exp.contract(abiDef); -multiply7 = Multiply7.at(contractaddress); -` - _, err = repl.re.Run(callSetup) - if err != nil { - t.Errorf("unexpected error setting up contract, got %v", err) - return - } - - expNotice := "" - if repl.lastConfirm != expNotice { - t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm) - return - } - - if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil { - return - } - if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x4ef9088431a8033e4580d00e4eb2487275e031ff4163c7529df0ef45af17857b"`) != nil { - return - } - - if !processTxs(repl, t, 1) { - return - } - - expNotice = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x87e2802265838c7f14bb69eecd2112911af6767907a702eeaa445239fb20711b'): {"params":[{"to":"0x46d69d55c3c4b86a924a92c9fc4720bb7bce1d74","data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}]}` - if repl.lastConfirm != expNotice { - t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm) - return - } - - var contentHash = `"0x86d2b7cf1e72e9a7a3f8d96601f0151742a2f780f1526414304fbe413dc7f9bd"` - if sol != nil && solcVersion != sol.Version() { - modContractInfo := versionRE.ReplaceAll(contractInfo, []byte(`"compilerVersion":"`+sol.Version()+`"`)) - fmt.Printf("modified contractinfo:\n%s\n", modContractInfo) - contentHash = `"` + common.ToHex(crypto.Keccak256([]byte(modContractInfo))) + `"` - } - if checkEvalJSON(t, repl, `filename = "/tmp/info.json"`, `"/tmp/info.json"`) != nil { - return - } - if checkEvalJSON(t, repl, `contentHash = admin.saveInfo(contract.info, filename)`, contentHash) != nil { - return - } - if checkEvalJSON(t, repl, `admin.register(primary, contractaddress, contentHash)`, `true`) != nil { - return - } - if checkEvalJSON(t, repl, `admin.registerUrl(primary, contentHash, "file://"+filename)`, `true`) != nil { - return - } - - if checkEvalJSON(t, repl, `admin.startNatSpec()`, `true`) != nil { - return - } - - if !processTxs(repl, t, 3) { - return - } - - if checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary })`, `"0x66d7635c12ad0b231e66da2f987ca3dfdca58ffe49c6442aa55960858103fd0c"`) != nil { - return - } - - if !processTxs(repl, t, 1) { - return - } - - expNotice = "Will multiply 6 by 7." - if repl.lastConfirm != expNotice { - t.Errorf("incorrect confirmation message: expected\n%v, got\n%v", expNotice, repl.lastConfirm) - return - } -} - -func pendingTransactions(repl *testjethre, t *testing.T) (txc int64, err error) { - var expanse *exp.Expanse - repl.stack.Service(&expanse) - - txs := expanse.TxPool().GetTransactions() - return int64(len(txs)), nil -} - -func processTxs(repl *testjethre, t *testing.T, expTxc int) bool { - var txc int64 - var err error - for i := 0; i < 50; i++ { - txc, err = pendingTransactions(repl, t) - if err != nil { - t.Errorf("unexpected error checking pending transactions: %v", err) - return false - } - if expTxc < int(txc) { - t.Errorf("too many pending transactions: expected %v, got %v", expTxc, txc) - return false - } else if expTxc == int(txc) { - break - } - time.Sleep(100 * time.Millisecond) - } - if int(txc) != expTxc { - t.Errorf("incorrect number of pending transactions, expected %v, got %v", expTxc, txc) - return false - } - var expanse *exp.Expanse - repl.stack.Service(&expanse) - - err = expanse.StartMining(runtime.NumCPU(), "") - if err != nil { - t.Errorf("unexpected error mining: %v", err) - return false - } - defer expanse.StopMining() - - timer := time.NewTimer(100 * time.Second) - blockNr := expanse.BlockChain().CurrentBlock().Number() - height := new(big.Int).Add(blockNr, big.NewInt(1)) - repl.wait <- height - select { - case <-timer.C: - // if times out make sure the xeth loop does not block - go func() { - select { - case repl.wait <- nil: - case <-repl.wait: - } - }() - case <-repl.wait: - } - txc, err = pendingTransactions(repl, t) - if err != nil { - t.Errorf("unexpected error checking pending transactions: %v", err) - return false - } - if txc != 0 { - t.Errorf("%d trasactions were not mined", txc) - return false - } - return true -} - -func checkEvalJSON(t *testing.T, re *testjethre, expr, want string) error { - val, err := re.re.Run("JSON.stringify(" + expr + ")") - if err == nil && val.String() != want { - err = fmt.Errorf("Output mismatch for `%s`:\ngot: %s\nwant: %s", expr, val.String(), want) - } - if err != nil { - _, file, line, _ := runtime.Caller(1) - file = filepath.Base(file) - fmt.Printf("\t%s:%d: %v\n", file, line, err) - t.Fail() - } - return err -} diff --git a/cmd/gexp/main.go b/cmd/gexp/main.go index 15d6224439d44..66679131ce147 100644 --- a/cmd/gexp/main.go +++ b/cmd/gexp/main.go @@ -221,7 +221,7 @@ participating. // because it is not intended to run while testing. // In addition to this check, bad block reports are sent only // for chains with the main network genesis block and network id 1. - eth.EnableBadBlockReporting = true + exp.EnableBadBlockReporting = true utils.SetupNetwork(ctx) diff --git a/exp/bad_block.go b/exp/bad_block.go index 6e4d2a6044bac..9f19f81485af7 100644 --- a/exp/bad_block.go +++ b/exp/bad_block.go @@ -27,7 +27,7 @@ import ( "github.com/expanse-project/go-expanse/core/types" "github.com/expanse-project/go-expanse/logger" "github.com/expanse-project/go-expanse/logger/glog" - "github.comexpanse-project/go-expanse/rlp" + "github.com/expanse-project/go-expanse/rlp" ) const ( diff --git a/internal/jsre/expanse_js.go b/internal/jsre/expanse_js.go index 3ba0847c80346..59df5574b3cdd 100644 --- a/internal/jsre/expanse_js.go +++ b/internal/jsre/expanse_js.go @@ -2642,7 +2642,7 @@ Web3.prototype.createBatch = function () { module.exports = Web3; -},{"./utils/sha3":19,"./utils/utils":20,"./version.json":21,"./web3/batch":24,"./web3/extend":28,"./web3/httpprovider":32,"./web3/iban":33,"./web3/ipcprovider":34,"./web3/methods/db":37,"./web3/methods/eth":38,"./web3/methods/net":39,"./web3/methods/personal":40,"./web3/methods/shh":41,"./web3/property":44,"./web3/requestmanager":45,"./web3/settings":46}],23:[function(require,module,exports){ +},{"./utils/sha3":19,"./utils/utils":20,"./version.json":21,"./web3/batch":24,"./web3/extend":28,"./web3/httpprovider":32,"./web3/iban":33,"./web3/ipcprovider":34,"./web3/methods/db":37,"./web3/methods/exp":38,"./web3/methods/net":39,"./web3/methods/personal":40,"./web3/methods/shh":41,"./web3/property":44,"./web3/requestmanager":45,"./web3/settings":46}],23:[function(require,module,exports){ /* This file is part of web3.js. diff --git a/jsre/expanse_js.go b/jsre/expanse_js.go index d3cf48b285185..b24d3eb1b9bf1 100644 --- a/jsre/expanse_js.go +++ b/jsre/expanse_js.go @@ -3088,7 +3088,7 @@ ContractFactory.prototype.getData = function () { * @param {Address} contract address */ var Contract = function (exp, abi, address) { - this._eth = exp; + this._exp = exp; this.transactionHash = null; this.address = address; this.abi = abi; @@ -3965,7 +3965,7 @@ var sha3 = require('../utils/sha3'); * This prototype should be used to call/sendTransaction to solidity functions */ var SolidityFunction = function (exp, json, address) { - this._eth = exp; + this._exp = exp; this._inputTypes = json.inputs.map(function (i) { return i.type; }); From 47965930a138480b51d7250f21977ffec35f1e79 Mon Sep 17 00:00:00 2001 From: Tosh Camille Date: Mon, 13 Jun 2016 14:08:12 +0200 Subject: [PATCH 41/44] [release/1.4.7] cmd/utils: add space between "to" and filename (cherry picked from commit ac66d96c5acd4661253aad8015aa245c6788cd45) --- cmd/utils/cmd.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 9e2b14f567f20..3b521a0e127e7 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -120,7 +120,7 @@ func ImportChain(chain *core.BlockChain, fn string) error { } } - glog.Infoln("Importing blockchain", fn) + glog.Infoln("Importing blockchain ", fn) fh, err := os.Open(fn) if err != nil { return err @@ -182,7 +182,7 @@ func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { } func ExportChain(blockchain *core.BlockChain, fn string) error { - glog.Infoln("Exporting blockchain to", fn) + glog.Infoln("Exporting blockchain to ", fn) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) if err != nil { return err @@ -191,12 +191,12 @@ func ExportChain(blockchain *core.BlockChain, fn string) error { if err := blockchain.Export(fh); err != nil { return err } - glog.Infoln("Exported blockchain to", fn) + glog.Infoln("Exported blockchain to ", fn) return nil } func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { - glog.Infoln("Exporting blockchain to", fn) + glog.Infoln("Exporting blockchain to ", fn) // TODO verify mode perms fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) if err != nil { @@ -206,6 +206,6 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las if err := blockchain.ExportN(fh, first, last); err != nil { return err } - glog.Infoln("Exported blockchain to", fn) + glog.Infoln("Exported blockchain to ", fn) return nil } From be29e41334fd831d18fc6acdadeb2e0e31eb9c2a Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Tue, 14 Jun 2016 16:09:27 +0200 Subject: [PATCH 42/44] [release/1.4.7] cmd/evm: added --create flag indicating the exec code is to be created This fixes an issue if you wanted to test out code deployment rather than running a piece of code with an argument. This solves it by adding a --create flag that indicates the Create function should be used rather than the Call function. This also adds a statedb.commit call so that the proper state can be dumped when requested using the --dump flag. (cherry picked from commit e5165aeb277b1f1ae8d5bf60c22716ccd874a8e6) --- cmd/evm/main.go | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/cmd/evm/main.go b/cmd/evm/main.go index e7b266d4e4efc..aa48f6edeced6 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -84,11 +84,16 @@ var ( Name: "verbosity", Usage: "sets the verbosity level", } + CreateFlag = cli.BoolFlag{ + Name: "create", + Usage: "indicates the action should be create rather than call", + } ) func init() { app = utils.NewApp("0.2", "the evm command line interface") app.Flags = []cli.Flag{ + CreateFlag, DebugFlag, VerbosityFlag, ForceJitFlag, @@ -111,8 +116,6 @@ func run(ctx *cli.Context) error { db, _ := ethdb.NewMemDatabase() statedb, _ := state.New(common.Hash{}, db) sender := statedb.CreateAccount(common.StringToAddress("sender")) - receiver := statedb.CreateAccount(common.StringToAddress("receiver")) - receiver.SetCode(common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))) vmenv := NewEnv(statedb, common.StringToAddress("evmuser"), common.Big(ctx.GlobalString(ValueFlag.Name)), vm.Config{ Debug: ctx.GlobalBool(DebugFlag.Name), @@ -121,17 +124,37 @@ func run(ctx *cli.Context) error { }) tstart := time.Now() - ret, e := vmenv.Call( - sender, - receiver.Address(), - common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), - common.Big(ctx.GlobalString(GasFlag.Name)), - common.Big(ctx.GlobalString(PriceFlag.Name)), - common.Big(ctx.GlobalString(ValueFlag.Name)), + + var ( + ret []byte + err error ) + + if ctx.GlobalBool(CreateFlag.Name) { + input := append(common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name)), common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...) + ret, _, err = vmenv.Create( + sender, + input, + common.Big(ctx.GlobalString(GasFlag.Name)), + common.Big(ctx.GlobalString(PriceFlag.Name)), + common.Big(ctx.GlobalString(ValueFlag.Name)), + ) + } else { + receiver := statedb.CreateAccount(common.StringToAddress("receiver")) + receiver.SetCode(common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))) + ret, err = vmenv.Call( + sender, + receiver.Address(), + common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), + common.Big(ctx.GlobalString(GasFlag.Name)), + common.Big(ctx.GlobalString(PriceFlag.Name)), + common.Big(ctx.GlobalString(ValueFlag.Name)), + ) + } vmdone := time.Since(tstart) if ctx.GlobalBool(DumpFlag.Name) { + statedb.Commit() fmt.Println(string(statedb.Dump())) } vm.StdErrFormat(vmenv.StructLogs()) @@ -150,8 +173,8 @@ num gc: %d } fmt.Printf("OUT: 0x%x", ret) - if e != nil { - fmt.Printf(" error: %v", e) + if err != nil { + fmt.Printf(" error: %v", err) } fmt.Println() return nil From d2089e46f875cbfe3ac31ed8b78cb482ba4952f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 14 Jun 2016 17:12:44 +0300 Subject: [PATCH 43/44] VERSION, cmd/geth: bumped version 1.4.7 --- VERSION | 2 +- cmd/geth/main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index c514bd85c2ece..be05bba982958 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.4.6 +1.4.7 diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 9ee1055bf20a4..54e2ca6dde508 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -50,7 +50,7 @@ const ( clientIdentifier = "Geth" // Client identifier to advertise over the network versionMajor = 1 // Major version component of the current release versionMinor = 4 // Minor version component of the current release - versionPatch = 6 // Patch version component of the current release + versionPatch = 7 // Patch version component of the current release versionMeta = "stable" // Version metadata to append to the version string versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle From 246215bcb265bc43dee67e62ecfd4178abaa0045 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Sun, 19 Jun 2016 02:24:17 +0000 Subject: [PATCH 44/44] merge 1.4.7 cleanup --- cmd/gexp/main.go | 2 +- cmd/utils/flags.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/gexp/main.go b/cmd/gexp/main.go index 5830302266fa6..73de09c8d8e2d 100644 --- a/cmd/gexp/main.go +++ b/cmd/gexp/main.go @@ -33,7 +33,7 @@ import ( "github.com/expanse-project/go-expanse/common" "github.com/expanse-project/go-expanse/console" "github.com/expanse-project/go-expanse/core" - "github.com/expanse-project/go-expanse/eth" + "github.com/expanse-project/go-expanse/exp" "github.com/expanse-project/go-expanse/ethdb" "github.com/expanse-project/go-expanse/internal/debug" "github.com/expanse-project/go-expanse/logger" diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 9bd33c64dc0c1..c4f8aac658f4d 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -30,14 +30,13 @@ import ( "strings" "time" - "github.com/codegangsta/cli" "github.com/expanse-project/ethash" "github.com/expanse-project/go-expanse/accounts" "github.com/expanse-project/go-expanse/common" "github.com/expanse-project/go-expanse/core" "github.com/expanse-project/go-expanse/core/state" "github.com/expanse-project/go-expanse/crypto" - "github.com/expanse-project/go-expanse/eth" + "github.com/expanse-project/go-expanse/exp" "github.com/expanse-project/go-expanse/ethdb" "github.com/expanse-project/go-expanse/event" "github.com/expanse-project/go-expanse/logger"