diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 5bb332a5c048b..0000000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "cmd/mist/assets/ext/expanse.js"] - path = cmd/mist/assets/ext/expanse.js - url = https://github.com/expanse-org/web3.js diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 86b50271ed9f1..0000000000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,329 +0,0 @@ -{ - "ImportPath": "github.com/expanse-org/go-expanse", - "GoVersion": "go1.5.2", - "GodepVersion": "v74", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/Gustav-Simonsson/go-opencl/cl", - "Rev": "593e01cfc4f3353585015321e01951d4a907d3ef" - }, - { - "ImportPath": "github.com/cespare/cp", - "Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c" - }, - { - "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" - }, - { - "ImportPath": "github.com/expanse-org/ethash", - "Comment": "v23.1-247-g2e80de5", - "Rev": "2e80de5022370cfe632195b1720db52d07ff8a77" - }, - { - "ImportPath": "github.com/fatih/color", - "Comment": "v0.1-12-g9aae6aa", - "Rev": "9aae6aaa22315390f03959adca2c4d395b02fcef" - }, - { - "ImportPath": "github.com/gizak/termui", - "Rev": "08a5d3f67b7d9ec87830ea39c48e570a1f18531f" - }, - { - "ImportPath": "github.com/golang/snappy", - "Rev": "799c780093d646c1b79d30894e22512c319fa137" - }, - { - "ImportPath": "github.com/hashicorp/golang-lru", - "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" - }, - { - "ImportPath": "github.com/hashicorp/golang-lru/simplelru", - "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" - }, - { - "ImportPath": "github.com/huin/goupnp", - "Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8" - }, - { - "ImportPath": "github.com/huin/goupnp/dcps/internetgateway1", - "Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8" - }, - { - "ImportPath": "github.com/huin/goupnp/dcps/internetgateway2", - "Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8" - }, - { - "ImportPath": "github.com/huin/goupnp/httpu", - "Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8" - }, - { - "ImportPath": "github.com/huin/goupnp/scpd", - "Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8" - }, - { - "ImportPath": "github.com/huin/goupnp/soap", - "Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8" - }, - { - "ImportPath": "github.com/huin/goupnp/ssdp", - "Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8" - }, - { - "ImportPath": "github.com/jackpal/gateway", - "Rev": "192609c58b8985e645cbe82ddcb28a4362ca0fdc" - }, - { - "ImportPath": "github.com/jackpal/go-nat-pmp", - "Rev": "46523a463303c6ede3ddfe45bde1c7ed52ebaacd" - }, - { - "ImportPath": "github.com/mattn/go-colorable", - "Rev": "9fdad7c47650b7d2e1da50644c1f4ba7f172f252" - }, - { - "ImportPath": "github.com/mattn/go-isatty", - "Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639" - }, - { - "ImportPath": "github.com/mattn/go-runewidth", - "Comment": "travisish-44-ge882a96", - "Rev": "e882a96ec18dd43fa283187b66af74497c9101c0" - }, - { - "ImportPath": "github.com/microsoft/go-winio", - "Comment": "v0.2.0", - "Rev": "9e2895e5f6c3f16473b91d37fae6e89990a4520c" - }, - { - "ImportPath": "github.com/nsf/termbox-go", - "Rev": "362329b0aa6447eadd52edd8d660ec1dff470295" - }, - { - "ImportPath": "github.com/pborman/uuid", - "Comment": "v1.0-6-g0f1a469", - "Rev": "0f1a46960a86dcdf5dd30d3e6568a497a997909f" - }, - { - "ImportPath": "github.com/peterh/liner", - "Rev": "ad1edfd30321d8f006ccf05f1e0524adeb943060" - }, - { - "ImportPath": "github.com/rcrowley/go-metrics", - "Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d" - }, - { - "ImportPath": "github.com/rjeczalik/notify", - "Rev": "f627deca7a510d96f0ef9388f2d0e8b16d21f87f" - }, - { - "ImportPath": "github.com/robertkrimen/otto", - "Rev": "53221230c215611a90762720c9042ac782ef74ee" - }, - { - "ImportPath": "github.com/robertkrimen/otto/ast", - "Rev": "53221230c215611a90762720c9042ac782ef74ee" - }, - { - "ImportPath": "github.com/robertkrimen/otto/dbg", - "Rev": "53221230c215611a90762720c9042ac782ef74ee" - }, - { - "ImportPath": "github.com/robertkrimen/otto/file", - "Rev": "53221230c215611a90762720c9042ac782ef74ee" - }, - { - "ImportPath": "github.com/robertkrimen/otto/parser", - "Rev": "53221230c215611a90762720c9042ac782ef74ee" - }, - { - "ImportPath": "github.com/robertkrimen/otto/registry", - "Rev": "53221230c215611a90762720c9042ac782ef74ee" - }, - { - "ImportPath": "github.com/robertkrimen/otto/token", - "Rev": "53221230c215611a90762720c9042ac782ef74ee" - }, - { - "ImportPath": "github.com/rs/cors", - "Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379" - }, - { - "ImportPath": "github.com/rs/xhandler", - "Rev": "d9d9599b6aaf6a058cb7b1f48291ded2cbd13390" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/cache", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/errors", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/filter", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/journal", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/opt", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/storage", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/table", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb/util", - "Rev": "917f41c560270110ceb73c5b38be2a9127387071" - }, - { - "ImportPath": "golang.org/x/crypto/pbkdf2", - "Rev": "1f22c0103821b9390939b6776727195525381532" - }, - { - "ImportPath": "golang.org/x/crypto/ripemd160", - "Rev": "1f22c0103821b9390939b6776727195525381532" - }, - { - "ImportPath": "golang.org/x/crypto/scrypt", - "Rev": "1f22c0103821b9390939b6776727195525381532" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634" - }, - { - "ImportPath": "golang.org/x/net/html", - "Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634" - }, - { - "ImportPath": "golang.org/x/net/html/atom", - "Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634" - }, - { - "ImportPath": "golang.org/x/net/html/charset", - "Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634" - }, - { - "ImportPath": "golang.org/x/net/websocket", - "Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634" - }, - { - "ImportPath": "golang.org/x/sys/unix", - "Rev": "50c6bc5e4292a1d4e65c6e9be5f53be28bcbe28e" - }, - { - "ImportPath": "golang.org/x/text/encoding", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/charmap", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/htmlindex", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/internal", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/internal/identifier", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/japanese", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/korean", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/simplifiedchinese", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/traditionalchinese", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/encoding/unicode", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/internal/utf8internal", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/text/transform", - "Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e" - }, - { - "ImportPath": "golang.org/x/tools/go/ast/astutil", - "Rev": "758728c4b28cfbac299730969ef8f655c4761283" - }, - { - "ImportPath": "golang.org/x/tools/imports", - "Rev": "758728c4b28cfbac299730969ef8f655c4761283" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "4f90aeace3a26ad7021961c297b22c42160c7b25" - }, - { - "ImportPath": "gopkg.in/fatih/set.v0", - "Comment": "v0.1.0-3-g27c4092", - "Rev": "27c40922c40b43fe04554d8223a402af3ea333f3" - }, - { - "ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque", - "Rev": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57" - }, - { - "ImportPath": "gopkg.in/urfave/cli.v1", - "Comment": "v1.17.0", - "Rev": "01857ac33766ce0c93856370626f9799281c14f4" - } - ] -} diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go b/Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go deleted file mode 100644 index 0ca7dbe75e044..0000000000000 --- a/Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go +++ /dev/null @@ -1,628 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build opencl - -package ethash - -//#cgo LDFLAGS: -w -//#include -//#include -//#include "src/libethash/internal.h" -import "C" - -import ( - crand "crypto/rand" - "encoding/binary" - "fmt" - "math" - "math/big" - mrand "math/rand" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - "unsafe" - - "github.com/Gustav-Simonsson/go-opencl/cl" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/pow" -) - -/* - - This code have two main entry points: - - 1. The initCL(...) function configures one or more OpenCL device - (for now only GPU) and loads the Ethash DAG onto device memory - - 2. The Search(...) function loads a Ethash nonce into device(s) memory and - executes the Ethash OpenCL kernel. - - Throughout the code, we refer to "host memory" and "device memory". - For most systems (e.g. regular PC GPU miner) the host memory is RAM and - device memory is the GPU global memory (e.g. GDDR5). - - References mentioned in code comments: - - 1. https://github.com/ethereum/wiki/wiki/Ethash - 2. https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner.cpp - 3. https://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/ - 4. http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_OpenCL_Programming_User_Guide.pdf - -*/ - -type OpenCLDevice struct { - deviceId int - device *cl.Device - openCL11 bool // OpenCL version 1.1 and 1.2 are handled a bit different - openCL12 bool - - dagBuf *cl.MemObject // Ethash full DAG in device mem - headerBuf *cl.MemObject // Hash of block-to-mine in device mem - searchBuffers []*cl.MemObject - - searchKernel *cl.Kernel - hashKernel *cl.Kernel - - queue *cl.CommandQueue - ctx *cl.Context - workGroupSize int - - nonceRand *mrand.Rand // seeded by crypto/rand, see comments where it's initialised - result common.Hash -} - -type OpenCLMiner struct { - mu sync.Mutex - - ethash *Ethash // Ethash full DAG & cache in host mem - - deviceIds []int - devices []*OpenCLDevice - - dagSize uint64 - - hashRate int32 // Go atomics & uint64 have some issues; int32 is supported on all platforms -} - -type pendingSearch struct { - bufIndex uint32 - startNonce uint64 -} - -const ( - SIZEOF_UINT32 = 4 - - // See [1] - ethashMixBytesLen = 128 - ethashAccesses = 64 - - // See [4] - workGroupSize = 32 // must be multiple of 8 - maxSearchResults = 63 - searchBufSize = 2 - globalWorkSize = 1024 * 256 -) - -func NewCL(deviceIds []int) *OpenCLMiner { - ids := make([]int, len(deviceIds)) - copy(ids, deviceIds) - return &OpenCLMiner{ - ethash: New(), - dagSize: 0, // to see if we need to update DAG. - deviceIds: ids, - } -} - -func PrintDevices() { - fmt.Println("=============================================") - fmt.Println("============ OpenCL Device Info =============") - fmt.Println("=============================================") - - var found []*cl.Device - - platforms, err := cl.GetPlatforms() - if err != nil { - fmt.Println("Plaform error (check your OpenCL installation):", err) - return - } - - for i, p := range platforms { - fmt.Println("Platform id ", i) - fmt.Println("Platform Name ", p.Name()) - fmt.Println("Platform Vendor ", p.Vendor()) - fmt.Println("Platform Version ", p.Version()) - fmt.Println("Platform Extensions ", p.Extensions()) - fmt.Println("Platform Profile ", p.Profile()) - fmt.Println("") - - devices, err := cl.GetDevices(p, cl.DeviceTypeGPU) - if err != nil { - fmt.Println("Device error (check your GPU drivers) :", err) - return - } - - for _, d := range devices { - fmt.Println("Device OpenCL id ", i) - fmt.Println("Device id for mining ", len(found)) - fmt.Println("Device Name ", d.Name()) - fmt.Println("Vendor ", d.Vendor()) - fmt.Println("Version ", d.Version()) - fmt.Println("Driver version ", d.DriverVersion()) - fmt.Println("Address bits ", d.AddressBits()) - fmt.Println("Max clock freq ", d.MaxClockFrequency()) - fmt.Println("Global mem size ", d.GlobalMemSize()) - fmt.Println("Max constant buffer size", d.MaxConstantBufferSize()) - fmt.Println("Max mem alloc size ", d.MaxMemAllocSize()) - fmt.Println("Max compute units ", d.MaxComputeUnits()) - fmt.Println("Max work group size ", d.MaxWorkGroupSize()) - fmt.Println("Max work item sizes ", d.MaxWorkItemSizes()) - fmt.Println("=============================================") - - found = append(found, d) - } - } - if len(found) == 0 { - fmt.Println("Found no GPU(s). Check that your OS can see the GPU(s)") - } else { - var idsFormat string - for i := 0; i < len(found); i++ { - idsFormat += strconv.Itoa(i) - if i != len(found)-1 { - idsFormat += "," - } - } - fmt.Printf("Found %v devices. Benchmark first GPU: gexp gpubench 0\n", len(found)) - fmt.Printf("Mine using all GPUs: gexp --minegpu %v\n", idsFormat) - } -} - -// See [2]. We basically do the same here, but the Go OpenCL bindings -// are at a slightly higher abtraction level. -func InitCL(blockNum uint64, c *OpenCLMiner) error { - platforms, err := cl.GetPlatforms() - if err != nil { - return fmt.Errorf("Plaform error: %v\nCheck your OpenCL installation and then run gexp gpuinfo", err) - } - - var devices []*cl.Device - for _, p := range platforms { - ds, err := cl.GetDevices(p, cl.DeviceTypeGPU) - if err != nil { - return fmt.Errorf("Devices error: %v\nCheck your GPU drivers and then run gexp gpuinfo", err) - } - for _, d := range ds { - devices = append(devices, d) - } - } - - pow := New() - _ = pow.getDAG(blockNum) // generates DAG if we don't have it - pow.Light.getCache(blockNum) // and cache - - c.ethash = pow - dagSize := uint64(C.ethash_get_datasize(C.uint64_t(blockNum))) - c.dagSize = dagSize - - for _, id := range c.deviceIds { - if id > len(devices)-1 { - return fmt.Errorf("Device id not found. See available device ids with: gexp gpuinfo") - } else { - err := initCLDevice(id, devices[id], c) - if err != nil { - return err - } - } - } - if len(c.devices) == 0 { - return fmt.Errorf("No GPU devices found") - } - return nil -} - -func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error { - devMaxAlloc := uint64(device.MaxMemAllocSize()) - devGlobalMem := uint64(device.GlobalMemSize()) - - // TODO: more fine grained version logic - if device.Version() == "OpenCL 1.0" { - fmt.Println("Device OpenCL version not supported: ", device.Version()) - return fmt.Errorf("opencl version not supported") - } - - var cl11, cl12 bool - if device.Version() == "OpenCL 1.1" { - cl11 = true - } - if device.Version() == "OpenCL 1.2" { - cl12 = true - } - - // log warnings but carry on; some device drivers report inaccurate values - if c.dagSize > devGlobalMem { - fmt.Printf("WARNING: device memory may be insufficient: %v. DAG size: %v.\n", devGlobalMem, c.dagSize) - } - - if c.dagSize > devMaxAlloc { - fmt.Printf("WARNING: DAG size (%v) larger than device max memory allocation size (%v).\n", c.dagSize, devMaxAlloc) - fmt.Printf("You probably have to export GPU_MAX_ALLOC_PERCENT=95\n") - } - - fmt.Printf("Initialising device %v: %v\n", deviceId, device.Name()) - - context, err := cl.CreateContext([]*cl.Device{device}) - if err != nil { - return fmt.Errorf("failed creating context: %v", err) - } - - // TODO: test running with CL_QUEUE_PROFILING_ENABLE for profiling? - queue, err := context.CreateCommandQueue(device, 0) - if err != nil { - return fmt.Errorf("command queue err: %v", err) - } - - // See [4] section 3.2 and [3] "clBuildProgram". - // The OpenCL kernel code is compiled at run-time. - kvs := make(map[string]string, 4) - kvs["GROUP_SIZE"] = strconv.FormatUint(workGroupSize, 10) - kvs["DAG_SIZE"] = strconv.FormatUint(c.dagSize/ethashMixBytesLen, 10) - kvs["ACCESSES"] = strconv.FormatUint(ethashAccesses, 10) - kvs["MAX_OUTPUTS"] = strconv.FormatUint(maxSearchResults, 10) - kernelCode := replaceWords(kernel, kvs) - - program, err := context.CreateProgramWithSource([]string{kernelCode}) - if err != nil { - return fmt.Errorf("program err: %v", err) - } - - /* if using AMD OpenCL impl, you can set this to debug on x86 CPU device. - see AMD OpenCL programming guide section 4.2 - - export in shell before running: - export AMD_OCL_BUILD_OPTIONS_APPEND="-g -O0" - export CPU_MAX_COMPUTE_UNITS=1 - - buildOpts := "-g -cl-opt-disable" - - */ - buildOpts := "" - err = program.BuildProgram([]*cl.Device{device}, buildOpts) - if err != nil { - return fmt.Errorf("program build err: %v", err) - } - - var searchKernelName, hashKernelName string - searchKernelName = "ethash_search" - hashKernelName = "ethash_hash" - - searchKernel, err := program.CreateKernel(searchKernelName) - hashKernel, err := program.CreateKernel(hashKernelName) - if err != nil { - return fmt.Errorf("kernel err: %v", err) - } - - // TODO: when this DAG size appears, patch the Go bindings - // (context.go) to work with uint64 as size_t - if c.dagSize > math.MaxInt32 { - fmt.Println("DAG too large for allocation.") - return fmt.Errorf("DAG too large for alloc") - } - - // TODO: patch up Go bindings to work with size_t, will overflow if > maxint32 - // TODO: fuck. shit's gonna overflow around 2017-06-09 12:17:02 - dagBuf := *(new(*cl.MemObject)) - dagBuf, err = context.CreateEmptyBuffer(cl.MemReadOnly, int(c.dagSize)) - if err != nil { - return fmt.Errorf("allocating dag buf failed: %v", err) - } - - // write DAG to device mem - dagPtr := unsafe.Pointer(c.ethash.Full.current.ptr.data) - _, err = queue.EnqueueWriteBuffer(dagBuf, true, 0, int(c.dagSize), dagPtr, nil) - if err != nil { - return fmt.Errorf("writing to dag buf failed: %v", err) - } - - searchBuffers := make([]*cl.MemObject, searchBufSize) - for i := 0; i < searchBufSize; i++ { - searchBuff, err := context.CreateEmptyBuffer(cl.MemWriteOnly, (1+maxSearchResults)*SIZEOF_UINT32) - if err != nil { - return fmt.Errorf("search buffer err: %v", err) - } - searchBuffers[i] = searchBuff - } - - headerBuf, err := context.CreateEmptyBuffer(cl.MemReadOnly, 32) - if err != nil { - return fmt.Errorf("header buffer err: %v", err) - } - - // Unique, random nonces are crucial for mining efficieny. - // While we do not need cryptographically secure PRNG for nonces, - // we want to have uniform distribution and minimal repetition of nonces. - // We could guarantee strict uniqueness of nonces by generating unique ranges, - // but a int64 seed from crypto/rand should be good enough. - // we then use math/rand for speed and to avoid draining OS entropy pool - seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - return err - } - nonceRand := mrand.New(mrand.NewSource(seed.Int64())) - - deviceStruct := &OpenCLDevice{ - deviceId: deviceId, - device: device, - openCL11: cl11, - openCL12: cl12, - - dagBuf: dagBuf, - headerBuf: headerBuf, - searchBuffers: searchBuffers, - - searchKernel: searchKernel, - hashKernel: hashKernel, - - queue: queue, - ctx: context, - - workGroupSize: workGroupSize, - - nonceRand: nonceRand, - } - c.devices = append(c.devices, deviceStruct) - - return nil -} - -func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) { - c.mu.Lock() - newDagSize := uint64(C.ethash_get_datasize(C.uint64_t(block.NumberU64()))) - if newDagSize > c.dagSize { - // TODO: clean up buffers from previous DAG? - err := InitCL(block.NumberU64(), c) - if err != nil { - fmt.Println("OpenCL init error: ", err) - return 0, []byte{0} - } - } - defer c.mu.Unlock() - - // Avoid unneeded OpenCL initialisation if we received stop while running InitCL - select { - case <-stop: - return 0, []byte{0} - default: - } - - headerHash := block.HashNoNonce() - diff := block.Difficulty() - target256 := new(big.Int).Div(maxUint256, diff) - target64 := new(big.Int).Rsh(target256, 192).Uint64() - var zero uint32 = 0 - - d := c.devices[index] - - _, err := d.queue.EnqueueWriteBuffer(d.headerBuf, false, 0, 32, unsafe.Pointer(&headerHash[0]), nil) - if err != nil { - fmt.Println("Error in Search clEnqueueWriterBuffer : ", err) - return 0, []byte{0} - } - - for i := 0; i < searchBufSize; i++ { - _, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[i], false, 0, 4, unsafe.Pointer(&zero), nil) - if err != nil { - fmt.Println("Error in Search clEnqueueWriterBuffer : ", err) - return 0, []byte{0} - } - } - - // wait for all search buffers to complete - err = d.queue.Finish() - if err != nil { - fmt.Println("Error in Search clFinish : ", err) - return 0, []byte{0} - } - - err = d.searchKernel.SetArg(1, d.headerBuf) - if err != nil { - fmt.Println("Error in Search clSetKernelArg : ", err) - return 0, []byte{0} - } - - err = d.searchKernel.SetArg(2, d.dagBuf) - if err != nil { - fmt.Println("Error in Search clSetKernelArg : ", err) - return 0, []byte{0} - } - - err = d.searchKernel.SetArg(4, target64) - if err != nil { - fmt.Println("Error in Search clSetKernelArg : ", err) - return 0, []byte{0} - } - err = d.searchKernel.SetArg(5, uint32(math.MaxUint32)) - if err != nil { - fmt.Println("Error in Search clSetKernelArg : ", err) - return 0, []byte{0} - } - - // wait on this before returning - var preReturnEvent *cl.Event - if d.openCL12 { - preReturnEvent, err = d.ctx.CreateUserEvent() - if err != nil { - fmt.Println("Error in Search create CL user event : ", err) - return 0, []byte{0} - } - } - - pending := make([]pendingSearch, 0, searchBufSize) - var p *pendingSearch - searchBufIndex := uint32(0) - var checkNonce uint64 - loops := int64(0) - prevHashRate := int32(0) - start := time.Now().UnixNano() - // we grab a single random nonce and sets this as argument to the kernel search function - // the device will then add each local threads gid to the nonce, creating a unique nonce - // for each device computing unit executing in parallel - initNonce := uint64(d.nonceRand.Int63()) - for nonce := initNonce; ; nonce += uint64(globalWorkSize) { - select { - case <-stop: - - /* - if d.openCL12 { - err = cl.WaitForEvents([]*cl.Event{preReturnEvent}) - if err != nil { - fmt.Println("Error in Search WaitForEvents: ", err) - } - } - */ - - atomic.AddInt32(&c.hashRate, -prevHashRate) - return 0, []byte{0} - default: - } - - if (loops % (1 << 7)) == 0 { - elapsed := time.Now().UnixNano() - start - // TODO: verify if this is correct hash rate calculation - hashes := (float64(1e9) / float64(elapsed)) * float64(loops*1024*256) - hashrateDiff := int32(hashes) - prevHashRate - prevHashRate = int32(hashes) - atomic.AddInt32(&c.hashRate, hashrateDiff) - } - loops++ - - err = d.searchKernel.SetArg(0, d.searchBuffers[searchBufIndex]) - if err != nil { - fmt.Println("Error in Search clSetKernelArg : ", err) - return 0, []byte{0} - } - err = d.searchKernel.SetArg(3, nonce) - if err != nil { - fmt.Println("Error in Search clSetKernelArg : ", err) - return 0, []byte{0} - } - - // execute kernel - _, err := d.queue.EnqueueNDRangeKernel( - d.searchKernel, - []int{0}, - []int{globalWorkSize}, - []int{d.workGroupSize}, - nil) - if err != nil { - fmt.Println("Error in Search clEnqueueNDRangeKernel : ", err) - return 0, []byte{0} - } - - pending = append(pending, pendingSearch{bufIndex: searchBufIndex, startNonce: nonce}) - searchBufIndex = (searchBufIndex + 1) % searchBufSize - - if len(pending) == searchBufSize { - p = &(pending[searchBufIndex]) - cres, _, err := d.queue.EnqueueMapBuffer(d.searchBuffers[p.bufIndex], true, - cl.MapFlagRead, 0, (1+maxSearchResults)*SIZEOF_UINT32, - nil) - if err != nil { - fmt.Println("Error in Search clEnqueueMapBuffer: ", err) - return 0, []byte{0} - } - - results := cres.ByteSlice() - nfound := binary.LittleEndian.Uint32(results) - nfound = uint32(math.Min(float64(nfound), float64(maxSearchResults))) - // OpenCL returns the offsets from the start nonce - for i := uint32(0); i < nfound; i++ { - lo := (i + 1) * SIZEOF_UINT32 - hi := (i + 2) * SIZEOF_UINT32 - upperNonce := uint64(binary.LittleEndian.Uint32(results[lo:hi])) - checkNonce = p.startNonce + upperNonce - if checkNonce != 0 { - // We verify that the nonce is indeed a solution by - // executing the Ethash verification function (on the CPU). - cache := c.ethash.Light.getCache(block.NumberU64()) - ok, mixDigest, result := cache.compute(c.dagSize, headerHash, checkNonce) - - // TODO: return result first - if ok && result.Big().Cmp(target256) <= 0 { - _, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil) - if err != nil { - fmt.Println("Error in Search clEnqueueUnmapMemObject: ", err) - } - if d.openCL12 { - err = cl.WaitForEvents([]*cl.Event{preReturnEvent}) - if err != nil { - fmt.Println("Error in Search WaitForEvents: ", err) - } - } - return checkNonce, mixDigest.Bytes() - } - _, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[p.bufIndex], false, 0, 4, unsafe.Pointer(&zero), nil) - if err != nil { - fmt.Println("Error in Search cl: EnqueueWriteBuffer", err) - return 0, []byte{0} - } - } - } - _, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil) - if err != nil { - fmt.Println("Error in Search clEnqueueUnMapMemObject: ", err) - return 0, []byte{0} - } - pending = append(pending[:searchBufIndex], pending[searchBufIndex+1:]...) - } - } - if d.openCL12 { - err := cl.WaitForEvents([]*cl.Event{preReturnEvent}) - if err != nil { - fmt.Println("Error in Search clWaitForEvents: ", err) - return 0, []byte{0} - } - } - return 0, []byte{0} -} - -func (c *OpenCLMiner) Verify(block pow.Block) bool { - return c.ethash.Light.Verify(block) -} -func (c *OpenCLMiner) GetHashrate() int64 { - return int64(atomic.LoadInt32(&c.hashRate)) -} -func (c *OpenCLMiner) Turbo(on bool) { - // This is GPU mining. Always be turbo. -} - -func replaceWords(text string, kvs map[string]string) string { - for k, v := range kvs { - text = strings.Replace(text, k, v, -1) - } - return text -} - -func logErr(err error) { - if err != nil { - fmt.Println("Error in OpenCL call:", err) - } -} - -func argErr(err error) error { - return fmt.Errorf("arg err: %v", err) -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/.gitignore b/Godeps/_workspace/src/github.com/expanse-org/ethash/.gitignore deleted file mode 100644 index da074e6b3ac3e..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -.idea/ -.DS_Store -*/**/*un~ -.vagrant/ -*.pyc -build/ -pyethash.egg-info/ -*.so -*~ -*.swp -MANIFEST -dist/ diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/.travis.yml b/Godeps/_workspace/src/github.com/expanse-org/ethash/.travis.yml deleted file mode 100644 index cd94016fa04cd..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -go: - - 1.4.2 - -before_install: - # for g++4.8 and C++11 - - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test - - # Set up go-expanse - - sudo apt-get update -y -qq - - sudo apt-get install -yqq libgmp3-dev - - git clone --depth=10 https://github.com/expanse-org/go-expanse ${GOPATH}/src/github.com/expanse-org/go-expanse - # use canned dependencies from the go-expanse repository - - export GOPATH=$GOPATH:$GOPATH/src/github.com/expanse-org/go-expanse/Godeps/_workspace/ - - echo $GOPATH - -install: - # need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11 - - sudo apt-get install -qq --yes --force-yes g++-4.8 - - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50 - - sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev valgrind - - sudo pip install virtualenv -q -script: "./test/test.sh" diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/CMakeLists.txt b/Godeps/_workspace/src/github.com/expanse-org/ethash/CMakeLists.txt deleted file mode 100644 index 807c43e963473..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -cmake_minimum_required(VERSION 2.8.7) -project(ethash) - -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/") -set(ETHHASH_LIBS ethash) - -if (WIN32 AND WANT_CRYPTOPP) - add_subdirectory(cryptopp) -endif() - -add_subdirectory(src/libethash) - -add_subdirectory(src/benchmark EXCLUDE_FROM_ALL) -add_subdirectory(test/c) diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/MANIFEST.in b/Godeps/_workspace/src/github.com/expanse-org/ethash/MANIFEST.in deleted file mode 100644 index 74e73c8be48d7..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/MANIFEST.in +++ /dev/null @@ -1,17 +0,0 @@ -include setup.py - -# C sources -include src/libethash/internal.c -include src/libethash/sha3.c -include src/libethash/util.c -include src/python/core.c - -# Headers -include src/libethash/compiler.h -include src/libethash/data_sizes.h -include src/libethash/endian.h -include src/libethash/ethash.h -include src/libethash/fnv.h -include src/libethash/internal.h -include src/libethash/sha3.h -include src/libethash/util.h diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/Makefile b/Godeps/_workspace/src/github.com/expanse-org/ethash/Makefile deleted file mode 100644 index 741d3b56dcc76..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -.PHONY: clean test -test: - ./test/test.sh - -clean: - rm -rf *.so pyethash.egg-info/ build/ test/python/python-virtual-env/ test/c/build/ pyethash.so test/python/*.pyc dist/ MANIFEST diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/README.md b/Godeps/_workspace/src/github.com/expanse-org/ethash/README.md deleted file mode 100644 index e8e5ee39a68bd..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/README.md +++ /dev/null @@ -1,22 +0,0 @@ -[![Build Status](https://travis-ci.org/expanse/ethash.svg?branch=master)](https://travis-ci.org/expanse/ethash) -[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/debris/ethash?branch=master&svg=true)](https://ci.appveyor.com/project/debris/ethash-nr37r/branch/master) - -# Ethash - -For details on this project, please see the Expanse wiki: -https://github.com/expanse-org/wiki/wiki/Ethash - -### Coding Style for C++ code: - -Follow the same exact style as in [cpp-expanse](https://github.com/expanse-org/cpp-expanse/blob/develop/CodingStandards.txt) - -### Coding Style for C code: - -The main thing above all is code consistency. - -- Tabs for indentation. A tab is 4 spaces -- Try to stick to the [K&R](http://en.wikipedia.org/wiki/Indent_style#K.26R_style), - especially for the C code. -- Keep the line lengths reasonable. No hard limit on 80 characters but don't go further - than 110. Some people work with multiple buffers next to each other. - Make them like you :) diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/Vagrantfile b/Godeps/_workspace/src/github.com/expanse-org/ethash/Vagrantfile deleted file mode 100644 index 03891653f05a9..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/Vagrantfile +++ /dev/null @@ -1,7 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -Vagrant.configure(2) do |config| - config.vm.box = "Ubuntu 12.04" - config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box" -end diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/appveyor.yml b/Godeps/_workspace/src/github.com/expanse-org/ethash/appveyor.yml deleted file mode 100644 index ac36a06261c2f..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/appveyor.yml +++ /dev/null @@ -1,43 +0,0 @@ -version: 1.0.0.{build} - -environment: - BOOST_ROOT: "c:/projects/ethash/deps/boost" - -branches: - only: - - master - - develop - -os: Windows Server 2012 R2 - -clone_folder: c:\projects\ethash - -#platform: Any CPU -#configuration: Debug - -install: - # by default, all script lines are interpreted as batch - -# scripts to run before build -before_build: - - echo "Downloading boost..." - - mkdir c:\projects\ethash\deps - - cd c:\projects\ethash\deps - - curl -O https://build.ethdev.com/builds/windows-precompiled/boost.tar.gz - - echo "Unzipping boost..." - - 7z x boost.tar.gz > nul - - 7z x boost.tar > nul - - ls - - echo "Running cmake..." - - cd c:\projects\ethash - - cmake . - -build: - project: ALL_BUILD.vcxproj # path to Visual Studio solution or project - -after_build: - - echo "Running tests..." - - cd c:\projects\ethash\test\c\Debug - - Test.exe - - echo "Finished!" - diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/CMakeParseArguments.cmake b/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/CMakeParseArguments.cmake deleted file mode 100644 index 8553f38f5f090..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/CMakeParseArguments.cmake +++ /dev/null @@ -1,161 +0,0 @@ -#.rst: -# CMakeParseArguments -# ------------------- -# -# -# -# CMAKE_PARSE_ARGUMENTS( -# args...) -# -# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions -# for parsing the arguments given to that macro or function. It -# processes the arguments and defines a set of variables which hold the -# values of the respective options. -# -# The argument contains all options for the respective macro, -# i.e. keywords which can be used when calling the macro without any -# value following, like e.g. the OPTIONAL keyword of the install() -# command. -# -# The argument contains all keywords for this macro -# which are followed by one value, like e.g. DESTINATION keyword of the -# install() command. -# -# The argument contains all keywords for this -# macro which can be followed by more than one value, like e.g. the -# TARGETS or FILES keywords of the install() command. -# -# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the -# keywords listed in , and -# a variable composed of the given -# followed by "_" and the name of the respective keyword. These -# variables will then hold the respective value from the argument list. -# For the keywords this will be TRUE or FALSE. -# -# All remaining arguments are collected in a variable -# _UNPARSED_ARGUMENTS, this can be checked afterwards to see -# whether your macro was called with unrecognized parameters. -# -# As an example here a my_install() macro, which takes similar arguments -# as the real install() command: -# -# :: -# -# function(MY_INSTALL) -# set(options OPTIONAL FAST) -# set(oneValueArgs DESTINATION RENAME) -# set(multiValueArgs TARGETS CONFIGURATIONS) -# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" -# "${multiValueArgs}" ${ARGN} ) -# ... -# -# -# -# Assume my_install() has been called like this: -# -# :: -# -# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub) -# -# -# -# After the cmake_parse_arguments() call the macro will have set the -# following variables: -# -# :: -# -# MY_INSTALL_OPTIONAL = TRUE -# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install() -# MY_INSTALL_DESTINATION = "bin" -# MY_INSTALL_RENAME = "" (was not used) -# MY_INSTALL_TARGETS = "foo;bar" -# MY_INSTALL_CONFIGURATIONS = "" (was not used) -# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL" -# -# -# -# You can then continue and process these variables. -# -# Keywords terminate lists of values, e.g. if directly after a -# one_value_keyword another recognized keyword follows, this is -# interpreted as the beginning of the new option. E.g. -# my_install(TARGETS foo DESTINATION OPTIONAL) would result in -# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION -# would be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor. - -#============================================================================= -# Copyright 2010 Alexander Neundorf -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distribute this file outside of CMake, substitute the full -# License text for the above reference.) - - -if(__CMAKE_PARSE_ARGUMENTS_INCLUDED) - return() -endif() -set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE) - - -function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames) - # first set all result variables to empty/FALSE - foreach(arg_name ${_singleArgNames} ${_multiArgNames}) - set(${prefix}_${arg_name}) - endforeach() - - foreach(option ${_optionNames}) - set(${prefix}_${option} FALSE) - endforeach() - - set(${prefix}_UNPARSED_ARGUMENTS) - - set(insideValues FALSE) - set(currentArgName) - - # now iterate over all arguments and fill the result variables - foreach(currentArg ${ARGN}) - list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword - list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword - list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword - - if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1) - if(insideValues) - if("${insideValues}" STREQUAL "SINGLE") - set(${prefix}_${currentArgName} ${currentArg}) - set(insideValues FALSE) - elseif("${insideValues}" STREQUAL "MULTI") - list(APPEND ${prefix}_${currentArgName} ${currentArg}) - endif() - else() - list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg}) - endif() - else() - if(NOT ${optionIndex} EQUAL -1) - set(${prefix}_${currentArg} TRUE) - set(insideValues FALSE) - elseif(NOT ${singleArgIndex} EQUAL -1) - set(currentArgName ${currentArg}) - set(${prefix}_${currentArgName}) - set(insideValues "SINGLE") - elseif(NOT ${multiArgIndex} EQUAL -1) - set(currentArgName ${currentArg}) - set(${prefix}_${currentArgName}) - set(insideValues "MULTI") - endif() - endif() - - endforeach() - - # propagate the result variables to the caller: - foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames}) - set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE) - endforeach() - set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE) - -endfunction() diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindCryptoPP.cmake b/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindCryptoPP.cmake deleted file mode 100644 index 5ca01e4468bcc..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindCryptoPP.cmake +++ /dev/null @@ -1,108 +0,0 @@ -# Module for locating the Crypto++ encryption library. -# -# Customizable variables: -# CRYPTOPP_ROOT_DIR -# This variable points to the CryptoPP root directory. On Windows the -# library location typically will have to be provided explicitly using the -# -D command-line option. The directory should include the include/cryptopp, -# lib and/or bin sub-directories. -# -# Read-only variables: -# CRYPTOPP_FOUND -# Indicates whether the library has been found. -# -# CRYPTOPP_INCLUDE_DIRS -# Points to the CryptoPP include directory. -# -# CRYPTOPP_LIBRARIES -# Points to the CryptoPP libraries that should be passed to -# target_link_libararies. -# -# -# Copyright (c) 2012 Sergiu Dotenco -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -INCLUDE (FindPackageHandleStandardArgs) - -FIND_PATH (CRYPTOPP_ROOT_DIR - NAMES cryptopp/cryptlib.h include/cryptopp/cryptlib.h - PATHS ENV CRYPTOPPROOT - DOC "CryptoPP root directory") - -# Re-use the previous path: -FIND_PATH (CRYPTOPP_INCLUDE_DIR - NAMES cryptopp/cryptlib.h - HINTS ${CRYPTOPP_ROOT_DIR} - PATH_SUFFIXES include - DOC "CryptoPP include directory") - -FIND_LIBRARY (CRYPTOPP_LIBRARY_DEBUG - NAMES cryptlibd cryptoppd - HINTS ${CRYPTOPP_ROOT_DIR} - PATH_SUFFIXES lib - DOC "CryptoPP debug library") - -FIND_LIBRARY (CRYPTOPP_LIBRARY_RELEASE - NAMES cryptlib cryptopp - HINTS ${CRYPTOPP_ROOT_DIR} - PATH_SUFFIXES lib - DOC "CryptoPP release library") - -IF (CRYPTOPP_LIBRARY_DEBUG AND CRYPTOPP_LIBRARY_RELEASE) - SET (CRYPTOPP_LIBRARY - optimized ${CRYPTOPP_LIBRARY_RELEASE} - debug ${CRYPTOPP_LIBRARY_DEBUG} CACHE DOC "CryptoPP library") -ELSEIF (CRYPTOPP_LIBRARY_RELEASE) - SET (CRYPTOPP_LIBRARY ${CRYPTOPP_LIBRARY_RELEASE} CACHE DOC - "CryptoPP library") -ENDIF (CRYPTOPP_LIBRARY_DEBUG AND CRYPTOPP_LIBRARY_RELEASE) - -IF (CRYPTOPP_INCLUDE_DIR) - SET (_CRYPTOPP_VERSION_HEADER ${CRYPTOPP_INCLUDE_DIR}/cryptopp/config.h) - - IF (EXISTS ${_CRYPTOPP_VERSION_HEADER}) - FILE (STRINGS ${_CRYPTOPP_VERSION_HEADER} _CRYPTOPP_VERSION_TMP REGEX - "^#define CRYPTOPP_VERSION[ \t]+[0-9]+$") - - STRING (REGEX REPLACE - "^#define CRYPTOPP_VERSION[ \t]+([0-9]+)" "\\1" _CRYPTOPP_VERSION_TMP - ${_CRYPTOPP_VERSION_TMP}) - - STRING (REGEX REPLACE "([0-9]+)[0-9][0-9]" "\\1" CRYPTOPP_VERSION_MAJOR - ${_CRYPTOPP_VERSION_TMP}) - STRING (REGEX REPLACE "[0-9]([0-9])[0-9]" "\\1" CRYPTOPP_VERSION_MINOR - ${_CRYPTOPP_VERSION_TMP}) - STRING (REGEX REPLACE "[0-9][0-9]([0-9])" "\\1" CRYPTOPP_VERSION_PATCH - ${_CRYPTOPP_VERSION_TMP}) - - SET (CRYPTOPP_VERSION_COUNT 3) - SET (CRYPTOPP_VERSION - ${CRYPTOPP_VERSION_MAJOR}.${CRYPTOPP_VERSION_MINOR}.${CRYPTOPP_VERSION_PATCH}) - ENDIF (EXISTS ${_CRYPTOPP_VERSION_HEADER}) -ENDIF (CRYPTOPP_INCLUDE_DIR) - -SET (CRYPTOPP_INCLUDE_DIRS ${CRYPTOPP_INCLUDE_DIR}) -SET (CRYPTOPP_LIBRARIES ${CRYPTOPP_LIBRARY}) - -MARK_AS_ADVANCED (CRYPTOPP_INCLUDE_DIR CRYPTOPP_LIBRARY CRYPTOPP_LIBRARY_DEBUG - CRYPTOPP_LIBRARY_RELEASE) - -FIND_PACKAGE_HANDLE_STANDARD_ARGS (CryptoPP REQUIRED_VARS CRYPTOPP_ROOT_DIR - CRYPTOPP_INCLUDE_DIR CRYPTOPP_LIBRARY VERSION_VAR CRYPTOPP_VERSION) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindOpenCL.cmake b/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindOpenCL.cmake deleted file mode 100644 index 415c95dbd0fe7..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindOpenCL.cmake +++ /dev/null @@ -1,148 +0,0 @@ -#.rst: -# FindOpenCL -# ---------- -# -# Try to find OpenCL -# -# Once done this will define:: -# -# OpenCL_FOUND - True if OpenCL was found -# OpenCL_INCLUDE_DIRS - include directories for OpenCL -# OpenCL_LIBRARIES - link against this library to use OpenCL -# OpenCL_VERSION_STRING - Highest supported OpenCL version (eg. 1.2) -# OpenCL_VERSION_MAJOR - The major version of the OpenCL implementation -# OpenCL_VERSION_MINOR - The minor version of the OpenCL implementation -# -# The module will also define two cache variables:: -# -# OpenCL_INCLUDE_DIR - the OpenCL include directory -# OpenCL_LIBRARY - the path to the OpenCL library -# - -#============================================================================= -# Copyright 2014 Matthaeus G. Chajdas -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distribute this file outside of CMake, substitute the full -# License text for the above reference.) - -function(_FIND_OPENCL_VERSION) - include(CheckSymbolExists) - include(CMakePushCheckState) - set(CMAKE_REQUIRED_QUIET ${OpenCL_FIND_QUIETLY}) - - CMAKE_PUSH_CHECK_STATE() - foreach(VERSION "2_0" "1_2" "1_1" "1_0") - set(CMAKE_REQUIRED_INCLUDES "${OpenCL_INCLUDE_DIR}") - - if(APPLE) - CHECK_SYMBOL_EXISTS( - CL_VERSION_${VERSION} - "${OpenCL_INCLUDE_DIR}/OpenCL/cl.h" - OPENCL_VERSION_${VERSION}) - else() - CHECK_SYMBOL_EXISTS( - CL_VERSION_${VERSION} - "${OpenCL_INCLUDE_DIR}/CL/cl.h" - OPENCL_VERSION_${VERSION}) - endif() - - if(OPENCL_VERSION_${VERSION}) - string(REPLACE "_" "." VERSION "${VERSION}") - set(OpenCL_VERSION_STRING ${VERSION} PARENT_SCOPE) - string(REGEX MATCHALL "[0-9]+" version_components "${VERSION}") - list(GET version_components 0 major_version) - list(GET version_components 1 minor_version) - set(OpenCL_VERSION_MAJOR ${major_version} PARENT_SCOPE) - set(OpenCL_VERSION_MINOR ${minor_version} PARENT_SCOPE) - break() - endif() - endforeach() - CMAKE_POP_CHECK_STATE() -endfunction() - -find_path(OpenCL_INCLUDE_DIR - NAMES - CL/cl.h OpenCL/cl.h - PATHS - ENV "PROGRAMFILES(X86)" - ENV AMDAPPSDKROOT - ENV INTELOCLSDKROOT - ENV NVSDKCOMPUTE_ROOT - ENV CUDA_PATH - ENV ATISTREAMSDKROOT - PATH_SUFFIXES - include - OpenCL/common/inc - "AMD APP/include") - -_FIND_OPENCL_VERSION() - -if(WIN32) - if(CMAKE_SIZEOF_VOID_P EQUAL 4) - find_library(OpenCL_LIBRARY - NAMES OpenCL - PATHS - ENV "PROGRAMFILES(X86)" - ENV AMDAPPSDKROOT - ENV INTELOCLSDKROOT - ENV CUDA_PATH - ENV NVSDKCOMPUTE_ROOT - ENV ATISTREAMSDKROOT - PATH_SUFFIXES - "AMD APP/lib/x86" - lib/x86 - lib/Win32 - OpenCL/common/lib/Win32) - elseif(CMAKE_SIZEOF_VOID_P EQUAL 8) - find_library(OpenCL_LIBRARY - NAMES OpenCL - PATHS - ENV "PROGRAMFILES(X86)" - ENV AMDAPPSDKROOT - ENV INTELOCLSDKROOT - ENV CUDA_PATH - ENV NVSDKCOMPUTE_ROOT - ENV ATISTREAMSDKROOT - PATH_SUFFIXES - "AMD APP/lib/x86_64" - lib/x86_64 - lib/x64 - OpenCL/common/lib/x64) - endif() -else() - find_library(OpenCL_LIBRARY - NAMES OpenCL - PATHS - ENV "PROGRAMFILES(X86)" - ENV AMDAPPSDKROOT - ENV INTELOCLSDKROOT - ENV CUDA_PATH - ENV NVSDKCOMPUTE_ROOT - ENV ATISTREAMSDKROOT - PATH_SUFFIXES - "AMD APP/lib/x86_64" - lib/x86_64 - lib/x64 - OpenCL/common/lib/x64) -endif() - -set(OpenCL_LIBRARIES ${OpenCL_LIBRARY}) -set(OpenCL_INCLUDE_DIRS ${OpenCL_INCLUDE_DIR}) - -include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake) -find_package_handle_standard_args( - OpenCL - FOUND_VAR OpenCL_FOUND - REQUIRED_VARS OpenCL_LIBRARY OpenCL_INCLUDE_DIR - VERSION_VAR OpenCL_VERSION_STRING) - -mark_as_advanced( - OpenCL_INCLUDE_DIR - OpenCL_LIBRARY) diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindPackageHandleStandardArgs.cmake b/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindPackageHandleStandardArgs.cmake deleted file mode 100644 index 6bcf1e788b8ca..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindPackageHandleStandardArgs.cmake +++ /dev/null @@ -1,382 +0,0 @@ -#.rst: -# FindPackageHandleStandardArgs -# ----------------------------- -# -# -# -# FIND_PACKAGE_HANDLE_STANDARD_ARGS( ... ) -# -# This function is intended to be used in FindXXX.cmake modules files. -# It handles the REQUIRED, QUIET and version-related arguments to -# find_package(). It also sets the _FOUND variable. The -# package is considered found if all variables ... listed contain -# valid results, e.g. valid filepaths. -# -# There are two modes of this function. The first argument in both -# modes is the name of the Find-module where it is called (in original -# casing). -# -# The first simple mode looks like this: -# -# :: -# -# FIND_PACKAGE_HANDLE_STANDARD_ARGS( -# (DEFAULT_MSG|"Custom failure message") ... ) -# -# If the variables to are all valid, then -# _FOUND will be set to TRUE. If DEFAULT_MSG is given -# as second argument, then the function will generate itself useful -# success and error messages. You can also supply a custom error -# message for the failure case. This is not recommended. -# -# The second mode is more powerful and also supports version checking: -# -# :: -# -# FIND_PACKAGE_HANDLE_STANDARD_ARGS(NAME -# [FOUND_VAR ] -# [REQUIRED_VARS ...] -# [VERSION_VAR ] -# [HANDLE_COMPONENTS] -# [CONFIG_MODE] -# [FAIL_MESSAGE "Custom failure message"] ) -# -# In this mode, the name of the result-variable can be set either to -# either _FOUND or _FOUND using the -# FOUND_VAR option. Other names for the result-variable are not -# allowed. So for a Find-module named FindFooBar.cmake, the two -# possible names are FooBar_FOUND and FOOBAR_FOUND. It is recommended -# to use the original case version. If the FOUND_VAR option is not -# used, the default is _FOUND. -# -# As in the simple mode, if through are all valid, -# _FOUND will be set to TRUE. After REQUIRED_VARS the -# variables which are required for this package are listed. Following -# VERSION_VAR the name of the variable can be specified which holds the -# version of the package which has been found. If this is done, this -# version will be checked against the (potentially) specified required -# version used in the find_package() call. The EXACT keyword is also -# handled. The default messages include information about the required -# version and the version which has been actually found, both if the -# version is ok or not. If the package supports components, use the -# HANDLE_COMPONENTS option to enable handling them. In this case, -# find_package_handle_standard_args() will report which components have -# been found and which are missing, and the _FOUND variable -# will be set to FALSE if any of the required components (i.e. not the -# ones listed after OPTIONAL_COMPONENTS) are missing. Use the option -# CONFIG_MODE if your FindXXX.cmake module is a wrapper for a -# find_package(... NO_MODULE) call. In this case VERSION_VAR will be -# set to _VERSION and the macro will automatically check whether -# the Config module was found. Via FAIL_MESSAGE a custom failure -# message can be specified, if this is not used, the default message -# will be displayed. -# -# Example for mode 1: -# -# :: -# -# find_package_handle_standard_args(LibXml2 DEFAULT_MSG -# LIBXML2_LIBRARY LIBXML2_INCLUDE_DIR) -# -# -# -# LibXml2 is considered to be found, if both LIBXML2_LIBRARY and -# LIBXML2_INCLUDE_DIR are valid. Then also LIBXML2_FOUND is set to -# TRUE. If it is not found and REQUIRED was used, it fails with -# FATAL_ERROR, independent whether QUIET was used or not. If it is -# found, success will be reported, including the content of . On -# repeated Cmake runs, the same message won't be printed again. -# -# Example for mode 2: -# -# :: -# -# find_package_handle_standard_args(LibXslt -# FOUND_VAR LibXslt_FOUND -# REQUIRED_VARS LibXslt_LIBRARIES LibXslt_INCLUDE_DIRS -# VERSION_VAR LibXslt_VERSION_STRING) -# -# In this case, LibXslt is considered to be found if the variable(s) -# listed after REQUIRED_VAR are all valid, i.e. LibXslt_LIBRARIES and -# LibXslt_INCLUDE_DIRS in this case. The result will then be stored in -# LibXslt_FOUND . Also the version of LibXslt will be checked by using -# the version contained in LibXslt_VERSION_STRING. Since no -# FAIL_MESSAGE is given, the default messages will be printed. -# -# Another example for mode 2: -# -# :: -# -# find_package(Automoc4 QUIET NO_MODULE HINTS /opt/automoc4) -# find_package_handle_standard_args(Automoc4 CONFIG_MODE) -# -# In this case, FindAutmoc4.cmake wraps a call to find_package(Automoc4 -# NO_MODULE) and adds an additional search directory for automoc4. Here -# the result will be stored in AUTOMOC4_FOUND. The following -# FIND_PACKAGE_HANDLE_STANDARD_ARGS() call produces a proper -# success/error message. - -#============================================================================= -# Copyright 2007-2009 Kitware, Inc. -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distribute this file outside of CMake, substitute the full -# License text for the above reference.) - -include(${CMAKE_CURRENT_LIST_DIR}/FindPackageMessage.cmake) -include(${CMAKE_CURRENT_LIST_DIR}/CMakeParseArguments.cmake) - -# internal helper macro -macro(_FPHSA_FAILURE_MESSAGE _msg) - if (${_NAME}_FIND_REQUIRED) - message(FATAL_ERROR "${_msg}") - else () - if (NOT ${_NAME}_FIND_QUIETLY) - message(STATUS "${_msg}") - endif () - endif () -endmacro() - - -# internal helper macro to generate the failure message when used in CONFIG_MODE: -macro(_FPHSA_HANDLE_FAILURE_CONFIG_MODE) - # _CONFIG is set, but FOUND is false, this means that some other of the REQUIRED_VARS was not found: - if(${_NAME}_CONFIG) - _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: missing: ${MISSING_VARS} (found ${${_NAME}_CONFIG} ${VERSION_MSG})") - else() - # If _CONSIDERED_CONFIGS is set, the config-file has been found, but no suitable version. - # List them all in the error message: - if(${_NAME}_CONSIDERED_CONFIGS) - set(configsText "") - list(LENGTH ${_NAME}_CONSIDERED_CONFIGS configsCount) - math(EXPR configsCount "${configsCount} - 1") - foreach(currentConfigIndex RANGE ${configsCount}) - list(GET ${_NAME}_CONSIDERED_CONFIGS ${currentConfigIndex} filename) - list(GET ${_NAME}_CONSIDERED_VERSIONS ${currentConfigIndex} version) - set(configsText "${configsText} ${filename} (version ${version})\n") - endforeach() - if (${_NAME}_NOT_FOUND_MESSAGE) - set(configsText "${configsText} Reason given by package: ${${_NAME}_NOT_FOUND_MESSAGE}\n") - endif() - _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} ${VERSION_MSG}, checked the following files:\n${configsText}") - - else() - # Simple case: No Config-file was found at all: - _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: found neither ${_NAME}Config.cmake nor ${_NAME_LOWER}-config.cmake ${VERSION_MSG}") - endif() - endif() -endmacro() - - -function(FIND_PACKAGE_HANDLE_STANDARD_ARGS _NAME _FIRST_ARG) - -# set up the arguments for CMAKE_PARSE_ARGUMENTS and check whether we are in -# new extended or in the "old" mode: - set(options CONFIG_MODE HANDLE_COMPONENTS) - set(oneValueArgs FAIL_MESSAGE VERSION_VAR FOUND_VAR) - set(multiValueArgs REQUIRED_VARS) - set(_KEYWORDS_FOR_EXTENDED_MODE ${options} ${oneValueArgs} ${multiValueArgs} ) - list(FIND _KEYWORDS_FOR_EXTENDED_MODE "${_FIRST_ARG}" INDEX) - - if(${INDEX} EQUAL -1) - set(FPHSA_FAIL_MESSAGE ${_FIRST_ARG}) - set(FPHSA_REQUIRED_VARS ${ARGN}) - set(FPHSA_VERSION_VAR) - else() - - CMAKE_PARSE_ARGUMENTS(FPHSA "${options}" "${oneValueArgs}" "${multiValueArgs}" ${_FIRST_ARG} ${ARGN}) - - if(FPHSA_UNPARSED_ARGUMENTS) - message(FATAL_ERROR "Unknown keywords given to FIND_PACKAGE_HANDLE_STANDARD_ARGS(): \"${FPHSA_UNPARSED_ARGUMENTS}\"") - endif() - - if(NOT FPHSA_FAIL_MESSAGE) - set(FPHSA_FAIL_MESSAGE "DEFAULT_MSG") - endif() - endif() - -# now that we collected all arguments, process them - - if("x${FPHSA_FAIL_MESSAGE}" STREQUAL "xDEFAULT_MSG") - set(FPHSA_FAIL_MESSAGE "Could NOT find ${_NAME}") - endif() - - # In config-mode, we rely on the variable _CONFIG, which is set by find_package() - # when it successfully found the config-file, including version checking: - if(FPHSA_CONFIG_MODE) - list(INSERT FPHSA_REQUIRED_VARS 0 ${_NAME}_CONFIG) - list(REMOVE_DUPLICATES FPHSA_REQUIRED_VARS) - set(FPHSA_VERSION_VAR ${_NAME}_VERSION) - endif() - - if(NOT FPHSA_REQUIRED_VARS) - message(FATAL_ERROR "No REQUIRED_VARS specified for FIND_PACKAGE_HANDLE_STANDARD_ARGS()") - endif() - - list(GET FPHSA_REQUIRED_VARS 0 _FIRST_REQUIRED_VAR) - - string(TOUPPER ${_NAME} _NAME_UPPER) - string(TOLOWER ${_NAME} _NAME_LOWER) - - if(FPHSA_FOUND_VAR) - if(FPHSA_FOUND_VAR MATCHES "^${_NAME}_FOUND$" OR FPHSA_FOUND_VAR MATCHES "^${_NAME_UPPER}_FOUND$") - set(_FOUND_VAR ${FPHSA_FOUND_VAR}) - else() - message(FATAL_ERROR "The argument for FOUND_VAR is \"${FPHSA_FOUND_VAR}\", but only \"${_NAME}_FOUND\" and \"${_NAME_UPPER}_FOUND\" are valid names.") - endif() - else() - set(_FOUND_VAR ${_NAME_UPPER}_FOUND) - endif() - - # collect all variables which were not found, so they can be printed, so the - # user knows better what went wrong (#6375) - set(MISSING_VARS "") - set(DETAILS "") - # check if all passed variables are valid - unset(${_FOUND_VAR}) - foreach(_CURRENT_VAR ${FPHSA_REQUIRED_VARS}) - if(NOT ${_CURRENT_VAR}) - set(${_FOUND_VAR} FALSE) - set(MISSING_VARS "${MISSING_VARS} ${_CURRENT_VAR}") - else() - set(DETAILS "${DETAILS}[${${_CURRENT_VAR}}]") - endif() - endforeach() - if(NOT "${${_FOUND_VAR}}" STREQUAL "FALSE") - set(${_FOUND_VAR} TRUE) - endif() - - # component handling - unset(FOUND_COMPONENTS_MSG) - unset(MISSING_COMPONENTS_MSG) - - if(FPHSA_HANDLE_COMPONENTS) - foreach(comp ${${_NAME}_FIND_COMPONENTS}) - if(${_NAME}_${comp}_FOUND) - - if(NOT DEFINED FOUND_COMPONENTS_MSG) - set(FOUND_COMPONENTS_MSG "found components: ") - endif() - set(FOUND_COMPONENTS_MSG "${FOUND_COMPONENTS_MSG} ${comp}") - - else() - - if(NOT DEFINED MISSING_COMPONENTS_MSG) - set(MISSING_COMPONENTS_MSG "missing components: ") - endif() - set(MISSING_COMPONENTS_MSG "${MISSING_COMPONENTS_MSG} ${comp}") - - if(${_NAME}_FIND_REQUIRED_${comp}) - set(${_FOUND_VAR} FALSE) - set(MISSING_VARS "${MISSING_VARS} ${comp}") - endif() - - endif() - endforeach() - set(COMPONENT_MSG "${FOUND_COMPONENTS_MSG} ${MISSING_COMPONENTS_MSG}") - set(DETAILS "${DETAILS}[c${COMPONENT_MSG}]") - endif() - - # version handling: - set(VERSION_MSG "") - set(VERSION_OK TRUE) - set(VERSION ${${FPHSA_VERSION_VAR}}) - - # check with DEFINED here as the requested or found version may be "0" - if (DEFINED ${_NAME}_FIND_VERSION) - if(DEFINED ${FPHSA_VERSION_VAR}) - - if(${_NAME}_FIND_VERSION_EXACT) # exact version required - # count the dots in the version string - string(REGEX REPLACE "[^.]" "" _VERSION_DOTS "${VERSION}") - # add one dot because there is one dot more than there are components - string(LENGTH "${_VERSION_DOTS}." _VERSION_DOTS) - if (_VERSION_DOTS GREATER ${_NAME}_FIND_VERSION_COUNT) - # Because of the C++ implementation of find_package() ${_NAME}_FIND_VERSION_COUNT - # is at most 4 here. Therefore a simple lookup table is used. - if (${_NAME}_FIND_VERSION_COUNT EQUAL 1) - set(_VERSION_REGEX "[^.]*") - elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 2) - set(_VERSION_REGEX "[^.]*\\.[^.]*") - elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 3) - set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*") - else () - set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*\\.[^.]*") - endif () - string(REGEX REPLACE "^(${_VERSION_REGEX})\\..*" "\\1" _VERSION_HEAD "${VERSION}") - unset(_VERSION_REGEX) - if (NOT ${_NAME}_FIND_VERSION VERSION_EQUAL _VERSION_HEAD) - set(VERSION_MSG "Found unsuitable version \"${VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"") - set(VERSION_OK FALSE) - else () - set(VERSION_MSG "(found suitable exact version \"${VERSION}\")") - endif () - unset(_VERSION_HEAD) - else () - if (NOT "${${_NAME}_FIND_VERSION}" VERSION_EQUAL "${VERSION}") - set(VERSION_MSG "Found unsuitable version \"${VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"") - set(VERSION_OK FALSE) - else () - set(VERSION_MSG "(found suitable exact version \"${VERSION}\")") - endif () - endif () - unset(_VERSION_DOTS) - - else() # minimum version specified: - if ("${${_NAME}_FIND_VERSION}" VERSION_GREATER "${VERSION}") - set(VERSION_MSG "Found unsuitable version \"${VERSION}\", but required is at least \"${${_NAME}_FIND_VERSION}\"") - set(VERSION_OK FALSE) - else () - set(VERSION_MSG "(found suitable version \"${VERSION}\", minimum required is \"${${_NAME}_FIND_VERSION}\")") - endif () - endif() - - else() - - # if the package was not found, but a version was given, add that to the output: - if(${_NAME}_FIND_VERSION_EXACT) - set(VERSION_MSG "(Required is exact version \"${${_NAME}_FIND_VERSION}\")") - else() - set(VERSION_MSG "(Required is at least version \"${${_NAME}_FIND_VERSION}\")") - endif() - - endif() - else () - if(VERSION) - set(VERSION_MSG "(found version \"${VERSION}\")") - endif() - endif () - - if(VERSION_OK) - set(DETAILS "${DETAILS}[v${VERSION}(${${_NAME}_FIND_VERSION})]") - else() - set(${_FOUND_VAR} FALSE) - endif() - - - # print the result: - if (${_FOUND_VAR}) - FIND_PACKAGE_MESSAGE(${_NAME} "Found ${_NAME}: ${${_FIRST_REQUIRED_VAR}} ${VERSION_MSG} ${COMPONENT_MSG}" "${DETAILS}") - else () - - if(FPHSA_CONFIG_MODE) - _FPHSA_HANDLE_FAILURE_CONFIG_MODE() - else() - if(NOT VERSION_OK) - _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: ${VERSION_MSG} (found ${${_FIRST_REQUIRED_VAR}})") - else() - _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} (missing: ${MISSING_VARS}) ${VERSION_MSG}") - endif() - endif() - - endif () - - set(${_FOUND_VAR} ${${_FOUND_VAR}} PARENT_SCOPE) - -endfunction() diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindPackageMessage.cmake b/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindPackageMessage.cmake deleted file mode 100644 index a0349d3db99a3..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/cmake/modules/FindPackageMessage.cmake +++ /dev/null @@ -1,57 +0,0 @@ -#.rst: -# FindPackageMessage -# ------------------ -# -# -# -# FIND_PACKAGE_MESSAGE( "message for user" "find result details") -# -# This macro is intended to be used in FindXXX.cmake modules files. It -# will print a message once for each unique find result. This is useful -# for telling the user where a package was found. The first argument -# specifies the name (XXX) of the package. The second argument -# specifies the message to display. The third argument lists details -# about the find result so that if they change the message will be -# displayed again. The macro also obeys the QUIET argument to the -# find_package command. -# -# Example: -# -# :: -# -# if(X11_FOUND) -# FIND_PACKAGE_MESSAGE(X11 "Found X11: ${X11_X11_LIB}" -# "[${X11_X11_LIB}][${X11_INCLUDE_DIR}]") -# else() -# ... -# endif() - -#============================================================================= -# Copyright 2008-2009 Kitware, Inc. -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distribute this file outside of CMake, substitute the full -# License text for the above reference.) - -function(FIND_PACKAGE_MESSAGE pkg msg details) - # Avoid printing a message repeatedly for the same find result. - if(NOT ${pkg}_FIND_QUIETLY) - string(REPLACE "\n" "" details "${details}") - set(DETAILS_VAR FIND_PACKAGE_MESSAGE_DETAILS_${pkg}) - if(NOT "${details}" STREQUAL "${${DETAILS_VAR}}") - # The message has not yet been printed. - message(STATUS "${msg}") - - # Save the find details in the cache to avoid printing the same - # message again. - set("${DETAILS_VAR}" "${details}" - CACHE INTERNAL "Details about finding ${pkg}") - endif() - endif() -endfunction() diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/cryptopp/CMakeLists.txt b/Godeps/_workspace/src/github.com/expanse-org/ethash/cryptopp/CMakeLists.txt deleted file mode 100644 index 6f428053299fa..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/cryptopp/CMakeLists.txt +++ /dev/null @@ -1,13 +0,0 @@ -set(LIBRARY cryptopp) - -include_directories(../../cryptopp) - -# todo, subset -file(GLOB HEADERS "../../cryptopp/*.h") -file(GLOB SOURCE "../../cryptopp/*.cpp") - -add_library(${LIBRARY} ${HEADERS} ${SOURCE}) - -set(CRYPTOPP_INCLUDE_DIRS "../.." "../../../" PARENT_SCOPE) -set(CRYPTOPP_LIBRARIES ${LIBRARY} PARENT_SCOPE) -set(CRYPTOPP_FOUND TRUE PARENT_SCOPE) diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/ethash.go b/Godeps/_workspace/src/github.com/expanse-org/ethash/ethash.go deleted file mode 100644 index a4c0f2116e463..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/ethash.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// Copyright 2015 Lefteris Karapetsas -// Copyright 2015 Matthew Wampler-Doty -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethash - -/* -#include "src/libethash/internal.h" - -int ethashGoCallback_cgo(unsigned); -*/ -import "C" - -import ( - "errors" - "fmt" - "io/ioutil" - "math/big" - "math/rand" - "os" - "os/user" - "path/filepath" - "runtime" - "sync" - "sync/atomic" - "time" - "unsafe" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/pow" -) - -var ( - maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) - sharedLight = new(Light) -) - -const ( - epochLength uint64 = 30000 - cacheSizeForTesting C.uint64_t = 1024 - dagSizeForTesting C.uint64_t = 1024 * 32 -) - -var DefaultDir = defaultDir() - -func defaultDir() string { - home := os.Getenv("HOME") - if user, err := user.Current(); err == nil { - home = user.HomeDir - } - if runtime.GOOS == "windows" { - return filepath.Join(home, "AppData", "Ethash") - } - return filepath.Join(home, ".ethash") -} - -// cache wraps an ethash_light_t with some metadata -// and automatic memory management. -type cache struct { - epoch uint64 - used time.Time - test bool - - gen sync.Once // ensures cache is only generated once. - ptr *C.struct_ethash_light -} - -// generate creates the actual cache. it can be called from multiple -// goroutines. the first call will generate the cache, subsequent -// calls wait until it is generated. -func (cache *cache) generate() { - cache.gen.Do(func() { - started := time.Now() - seedHash := makeSeedHash(cache.epoch) - glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash) - size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength)) - if cache.test { - size = cacheSizeForTesting - } - cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0]))) - runtime.SetFinalizer(cache, freeCache) - glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started)) - }) -} - -func freeCache(cache *cache) { - C.ethash_light_delete(cache.ptr) - cache.ptr = nil -} - -func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) { - ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce)) - // Make sure cache is live until after the C call. - // This is important because a GC might happen and execute - // the finalizer before the call completes. - _ = cache - return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result) -} - -// Light implements the Verify half of the proof of work. It uses a few small -// in-memory caches to verify the nonces found by Full. -type Light struct { - test bool // If set, use a smaller cache size - - mu sync.Mutex // Protects the per-epoch map of verification caches - caches map[uint64]*cache // Currently maintained verification caches - future *cache // Pre-generated cache for the estimated future DAG - - NumCaches int // Maximum number of caches to keep before eviction (only init, don't modify) -} - -// Verify checks whether the block's nonce is valid. -func (l *Light) Verify(block pow.Block) bool { - // TODO: do ethash_quick_verify before getCache in order - // to prevent DOS attacks. - blockNum := block.NumberU64() - if blockNum >= epochLength*2048 { - glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048) - return false - } - - difficulty := block.Difficulty() - /* Cannot happen if block header diff is validated prior to PoW, but can - happen if PoW is checked first due to parallel PoW checking. - We could check the minimum valid difficulty but for SoC we avoid (duplicating) - Expanse protocol consensus rules here which are not in scope of Ethash - */ - if difficulty.Cmp(common.Big0) == 0 { - glog.V(logger.Debug).Infof("invalid block difficulty") - return false - } - - cache := l.getCache(blockNum) - dagSize := C.ethash_get_datasize(C.uint64_t(blockNum)) - if l.test { - dagSize = dagSizeForTesting - } - // Recompute the hash using the cache. - ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce()) - if !ok { - return false - } - - // avoid mixdigest malleability as it's not included in a block's "hashNononce" - if block.MixDigest() != mixDigest { - return false - } - - // The actual check. - target := new(big.Int).Div(maxUint256, difficulty) - return result.Big().Cmp(target) <= 0 -} - -func h256ToHash(in C.ethash_h256_t) common.Hash { - return *(*common.Hash)(unsafe.Pointer(&in.b)) -} - -func hashToH256(in common.Hash) C.ethash_h256_t { - return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))} -} - -func (l *Light) getCache(blockNum uint64) *cache { - var c *cache - epoch := blockNum / epochLength - - // If we have a PoW for that epoch, use that - l.mu.Lock() - if l.caches == nil { - l.caches = make(map[uint64]*cache) - } - if l.NumCaches == 0 { - l.NumCaches = 3 - } - c = l.caches[epoch] - if c == nil { - // No cached DAG, evict the oldest if the cache limit was reached - if len(l.caches) >= l.NumCaches { - var evict *cache - for _, cache := range l.caches { - if evict == nil || evict.used.After(cache.used) { - evict = cache - } - } - glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch) - delete(l.caches, evict.epoch) - } - // If we have the new DAG pre-generated, use that, otherwise create a new one - if l.future != nil && l.future.epoch == epoch { - glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch) - c, l.future = l.future, nil - } else { - glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch) - c = &cache{epoch: epoch, test: l.test} - } - l.caches[epoch] = c - - // If we just used up the future cache, or need a refresh, regenerate - if l.future == nil || l.future.epoch <= epoch { - glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1) - l.future = &cache{epoch: epoch + 1, test: l.test} - go l.future.generate() - } - } - c.used = time.Now() - l.mu.Unlock() - - // Wait for generation finish and return the cache - c.generate() - return c -} - -// dag wraps an ethash_full_t with some metadata -// and automatic memory management. -type dag struct { - epoch uint64 - test bool - dir string - - gen sync.Once // ensures DAG is only generated once. - ptr *C.struct_ethash_full -} - -// generate creates the actual DAG. it can be called from multiple -// goroutines. the first call will generate the DAG, subsequent -// calls wait until it is generated. -func (d *dag) generate() { - d.gen.Do(func() { - var ( - started = time.Now() - seedHash = makeSeedHash(d.epoch) - blockNum = C.uint64_t(d.epoch * epochLength) - cacheSize = C.ethash_get_cachesize(blockNum) - dagSize = C.ethash_get_datasize(blockNum) - ) - if d.test { - cacheSize = cacheSizeForTesting - dagSize = dagSizeForTesting - } - if d.dir == "" { - d.dir = DefaultDir - } - glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash) - // Generate a temporary cache. - // TODO: this could share the cache with Light - cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0]))) - defer C.ethash_light_delete(cache) - // Generate the actual DAG. - d.ptr = C.ethash_full_new_internal( - C.CString(d.dir), - hashToH256(seedHash), - dagSize, - cache, - (C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)), - ) - if d.ptr == nil { - panic("ethash_full_new IO or memory error") - } - runtime.SetFinalizer(d, freeDAG) - glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started)) - }) -} - -func freeDAG(d *dag) { - C.ethash_full_delete(d.ptr) - d.ptr = nil -} - -func (d *dag) Ptr() unsafe.Pointer { - return unsafe.Pointer(d.ptr.data) -} - -//export ethashGoCallback -func ethashGoCallback(percent C.unsigned) C.int { - glog.V(logger.Info).Infof("Generating DAG: %d%%", percent) - return 0 -} - -// MakeDAG pre-generates a DAG file for the given block number in the -// given directory. If dir is the empty string, the default directory -// is used. -func MakeDAG(blockNum uint64, dir string) error { - d := &dag{epoch: blockNum / epochLength, dir: dir} - if blockNum >= epochLength*2048 { - return fmt.Errorf("block number too high, limit is %d", epochLength*2048) - } - d.generate() - if d.ptr == nil { - return errors.New("failed") - } - return nil -} - -// Full implements the Search half of the proof of work. -type Full struct { - Dir string // use this to specify a non-default DAG directory - - test bool // if set use a smaller DAG size - turbo bool - hashRate int32 - - mu sync.Mutex // protects dag - current *dag // current full DAG -} - -func (pow *Full) getDAG(blockNum uint64) (d *dag) { - epoch := blockNum / epochLength - pow.mu.Lock() - if pow.current != nil && pow.current.epoch == epoch { - d = pow.current - } else { - d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir} - pow.current = d - } - pow.mu.Unlock() - // wait for it to finish generating. - d.generate() - return d -} - -func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce uint64, mixDigest []byte) { - dag := pow.getDAG(block.NumberU64()) - - r := rand.New(rand.NewSource(time.Now().UnixNano())) - diff := block.Difficulty() - - i := int64(0) - starti := i - start := time.Now().UnixNano() - previousHashrate := int32(0) - - nonce = uint64(r.Int63()) - hash := hashToH256(block.HashNoNonce()) - target := new(big.Int).Div(maxUint256, diff) - for { - select { - case <-stop: - atomic.AddInt32(&pow.hashRate, -previousHashrate) - return 0, nil - default: - i++ - - // we don't have to update hash rate on every nonce, so update after - // first nonce check and then after 2^X nonces - if i == 2 || ((i % (1 << 16)) == 0) { - elapsed := time.Now().UnixNano() - start - hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti) - hashrateDiff := int32(hashes) - previousHashrate - previousHashrate = int32(hashes) - atomic.AddInt32(&pow.hashRate, hashrateDiff) - } - - ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce)) - result := h256ToHash(ret.result).Big() - - // TODO: disagrees with the spec https://github.com/expanse-org/wiki/wiki/Ethash#mining - if ret.success && result.Cmp(target) <= 0 { - mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32)) - atomic.AddInt32(&pow.hashRate, -previousHashrate) - return nonce, mixDigest - } - nonce += 1 - } - - if !pow.turbo { - time.Sleep(20 * time.Microsecond) - } - } -} - -func (pow *Full) GetHashrate() int64 { - return int64(atomic.LoadInt32(&pow.hashRate)) -} - -func (pow *Full) Turbo(on bool) { - // TODO: this needs to use an atomic operation. - pow.turbo = on -} - -// Ethash combines block verification with Light and -// nonce searching with Full into a single proof of work. -type Ethash struct { - *Light - *Full -} - -// New creates an instance of the proof of work. -func New() *Ethash { - return &Ethash{new(Light), &Full{turbo: true}} -} - -// NewShared creates an instance of the proof of work., where a single instance -// of the Light cache is shared across all instances created with NewShared. -func NewShared() *Ethash { - return &Ethash{sharedLight, &Full{turbo: true}} -} - -// NewForTesting creates a proof of work for use in unit tests. -// It uses a smaller DAG and cache size to keep test times low. -// DAG files are stored in a temporary directory. -// -// Nonces found by a testing instance are not verifiable with a -// regular-size cache. -func NewForTesting() (*Ethash, error) { - dir, err := ioutil.TempDir("", "ethash-test") - if err != nil { - return nil, err - } - return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil -} - -func GetSeedHash(blockNum uint64) ([]byte, error) { - if blockNum >= epochLength*2048 { - return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048) - } - sh := makeSeedHash(blockNum / epochLength) - return sh[:], nil -} - -func makeSeedHash(epoch uint64) (sh common.Hash) { - for ; epoch > 0; epoch-- { - sh = crypto.Sha3Hash(sh[:]) - } - return sh -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/ethash_test.go b/Godeps/_workspace/src/github.com/expanse-org/ethash/ethash_test.go deleted file mode 100644 index a35638f041b09..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/ethash_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// Copyright 2015 Lefteris Karapetsas -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethash - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "log" - "math/big" - "os" - "sync" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" -) - -func init() { - // glog.SetV(6) - // glog.SetToStderr(true) -} - -type testBlock struct { - difficulty *big.Int - hashNoNonce common.Hash - nonce uint64 - mixDigest common.Hash - number uint64 -} - -func (b *testBlock) Difficulty() *big.Int { return b.difficulty } -func (b *testBlock) HashNoNonce() common.Hash { return b.hashNoNonce } -func (b *testBlock) Nonce() uint64 { return b.nonce } -func (b *testBlock) MixDigest() common.Hash { return b.mixDigest } -func (b *testBlock) NumberU64() uint64 { return b.number } - -var validBlocks = []*testBlock{ - // from proof of concept nine testnet, epoch 0 - { - number: 22, - hashNoNonce: common.HexToHash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d"), - difficulty: big.NewInt(132416), - nonce: 0x495732e0ed7a801c, - mixDigest: common.HexToHash("2f74cdeb198af0b9abe65d22d372e22fb2d474371774a9583c1cc427a07939f5"), - }, - // from proof of concept nine testnet, epoch 1 - { - number: 30001, - hashNoNonce: common.HexToHash("7e44356ee3441623bc72a683fd3708fdf75e971bbe294f33e539eedad4b92b34"), - difficulty: big.NewInt(1532671), - nonce: 0x318df1c8adef7e5e, - mixDigest: common.HexToHash("144b180aad09ae3c81fb07be92c8e6351b5646dda80e6844ae1b697e55ddde84"), - }, - // from proof of concept nine testnet, epoch 2 - { - number: 60000, - hashNoNonce: common.HexToHash("5fc898f16035bf5ac9c6d9077ae1e3d5fc1ecc3c9fd5bee8bb00e810fdacbaa0"), - difficulty: big.NewInt(2467358), - nonce: 0x50377003e5d830ca, - mixDigest: common.HexToHash("ab546a5b73c452ae86dadd36f0ed83a6745226717d3798832d1b20b489e82063"), - }, -} - -var invalidZeroDiffBlock = testBlock{ - number: 61440000, - hashNoNonce: crypto.Sha3Hash([]byte("foo")), - difficulty: big.NewInt(0), - nonce: 0xcafebabec00000fe, - mixDigest: crypto.Sha3Hash([]byte("bar")), -} - -func TestEthashVerifyValid(t *testing.T) { - exp := New() - for i, block := range validBlocks { - if !exp.Verify(block) { - t.Errorf("block %d (%x) did not validate.", i, block.hashNoNonce[:6]) - } - } -} - -func TestEthashVerifyInvalid(t *testing.T) { - exp := New() - if exp.Verify(&invalidZeroDiffBlock) { - t.Errorf("should not validate - we just ensure it does not panic on this block") - } -} - -func TestEthashConcurrentVerify(t *testing.T) { - exp, err := NewForTesting() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(exp.Full.Dir) - - block := &testBlock{difficulty: big.NewInt(10)} - nonce, md := exp.Search(block, nil, 0) - block.nonce = nonce - block.mixDigest = common.BytesToHash(md) - - // Verify the block concurrently to check for data races. - var wg sync.WaitGroup - wg.Add(100) - for i := 0; i < 100; i++ { - go func() { - if !exp.Verify(block) { - t.Error("Block could not be verified") - } - wg.Done() - }() - } - wg.Wait() -} - -func TestEthashConcurrentSearch(t *testing.T) { - exp, err := NewForTesting() - if err != nil { - t.Fatal(err) - } - exp.Turbo(true) - defer os.RemoveAll(exp.Full.Dir) - - type searchRes struct { - n uint64 - md []byte - } - - var ( - block = &testBlock{difficulty: big.NewInt(35000)} - nsearch = 10 - wg = new(sync.WaitGroup) - found = make(chan searchRes) - stop = make(chan struct{}) - ) - rand.Read(block.hashNoNonce[:]) - wg.Add(nsearch) - // launch n searches concurrently. - for i := 0; i < nsearch; i++ { - go func() { - nonce, md := exp.Search(block, stop, 0) - select { - case found <- searchRes{n: nonce, md: md}: - case <-stop: - } - wg.Done() - }() - } - - // wait for one of them to find the nonce - res := <-found - // stop the others - close(stop) - wg.Wait() - - block.nonce = res.n - block.mixDigest = common.BytesToHash(res.md) - if !exp.Verify(block) { - t.Error("Block could not be verified") - } -} - -func TestEthashSearchAcrossEpoch(t *testing.T) { - exp, err := NewForTesting() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(exp.Full.Dir) - - for i := epochLength - 40; i < epochLength+40; i++ { - block := &testBlock{number: i, difficulty: big.NewInt(90)} - rand.Read(block.hashNoNonce[:]) - nonce, md := exp.Search(block, nil, 0) - block.nonce = nonce - block.mixDigest = common.BytesToHash(md) - if !exp.Verify(block) { - t.Fatalf("Block could not be verified") - } - } -} - -func TestGetSeedHash(t *testing.T) { - seed0, err := GetSeedHash(0) - if err != nil { - t.Errorf("Failed to get seedHash for block 0: %v", err) - } - if bytes.Compare(seed0, make([]byte, 32)) != 0 { - log.Printf("seedHash for block 0 should be 0s, was: %v\n", seed0) - } - seed1, err := GetSeedHash(30000) - if err != nil { - t.Error(err) - } - - // From python: - // > from pyethash import get_seedhash - // > get_seedhash(30000) - expectedSeed1, err := hex.DecodeString("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") - if err != nil { - t.Error(err) - } - - if bytes.Compare(seed1, expectedSeed1) != 0 { - log.Printf("seedHash for block 1 should be: %v,\nactual value: %v\n", expectedSeed1, seed1) - } - -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/ethashc.go b/Godeps/_workspace/src/github.com/expanse-org/ethash/ethashc.go deleted file mode 100644 index 1d2ba16132f08..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/ethashc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethash - -/* - -mno-stack-arg-probe disables stack probing which avoids the function - __chkstk_ms being linked. this avoids a clash of this symbol as we also - separately link the secp256k1 lib which ends up defining this symbol - - 1. https://gcc.gnu.org/onlinedocs/gccint/Stack-Checking.html - 2. https://groups.google.com/forum/#!msg/golang-dev/v1bziURSQ4k/88fXuJ24e-gJ - 3. https://groups.google.com/forum/#!topic/golang-nuts/VNP6Mwz_B6o - -*/ - -/* -#cgo CFLAGS: -std=gnu99 -Wall -#cgo windows CFLAGS: -mno-stack-arg-probe -#cgo LDFLAGS: -lm - -#include "src/libethash/internal.c" -#include "src/libethash/sha3.c" -#include "src/libethash/io.c" - -#ifdef _WIN32 -# include "src/libethash/io_win32.c" -# include "src/libethash/mmap_win32.c" -#else -# include "src/libethash/io_posix.c" -#endif - -// 'gateway function' for calling back into go. -extern int ethashGoCallback(unsigned); -int ethashGoCallback_cgo(unsigned percent) { return ethashGoCallback(percent); } - -*/ -import "C" diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/LICENSE b/Godeps/_workspace/src/github.com/expanse-org/ethash/js/LICENSE deleted file mode 100644 index 070be633d11a2..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Tim Hughes - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/ethash.js b/Godeps/_workspace/src/github.com/expanse-org/ethash/js/ethash.js deleted file mode 100644 index bec1284f61a2f..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/ethash.js +++ /dev/null @@ -1,190 +0,0 @@ -// ethash.js -// Tim Hughes -// Revision 19 - -/*jslint node: true, shadow:true */ -"use strict"; - -var Keccak = require('./keccak'); -var util = require('./util'); - -// 32-bit unsigned modulo -function mod32(x, n) -{ - return (x>>>0) % (n>>>0); -} - -function fnv(x, y) -{ - // js integer multiply by 0x01000193 will lose precision - return ((x*0x01000000 | 0) + (x*0x193 | 0)) ^ y; -} - -function computeCache(params, seedWords) -{ - var cache = new Uint32Array(params.cacheSize >> 2); - var cacheNodeCount = params.cacheSize >> 6; - - // Initialize cache - var keccak = new Keccak(); - keccak.digestWords(cache, 0, 16, seedWords, 0, seedWords.length); - for (var n = 1; n < cacheNodeCount; ++n) - { - keccak.digestWords(cache, n<<4, 16, cache, (n-1)<<4, 16); - } - - var tmp = new Uint32Array(16); - - // Do randmemohash passes - for (var r = 0; r < params.cacheRounds; ++r) - { - for (var n = 0; n < cacheNodeCount; ++n) - { - var p0 = mod32(n + cacheNodeCount - 1, cacheNodeCount) << 4; - var p1 = mod32(cache[n<<4|0], cacheNodeCount) << 4; - - for (var w = 0; w < 16; w=(w+1)|0) - { - tmp[w] = cache[p0 | w] ^ cache[p1 | w]; - } - - keccak.digestWords(cache, n<<4, 16, tmp, 0, tmp.length); - } - } - return cache; -} - -function computeDagNode(o_node, params, cache, keccak, nodeIndex) -{ - var cacheNodeCount = params.cacheSize >> 6; - var dagParents = params.dagParents; - - var c = (nodeIndex % cacheNodeCount) << 4; - var mix = o_node; - for (var w = 0; w < 16; ++w) - { - mix[w] = cache[c|w]; - } - mix[0] ^= nodeIndex; - keccak.digestWords(mix, 0, 16, mix, 0, 16); - - for (var p = 0; p < dagParents; ++p) - { - // compute cache node (word) index - c = mod32(fnv(nodeIndex ^ p, mix[p&15]), cacheNodeCount) << 4; - - for (var w = 0; w < 16; ++w) - { - mix[w] = fnv(mix[w], cache[c|w]); - } - } - - keccak.digestWords(mix, 0, 16, mix, 0, 16); -} - -function computeHashInner(mix, params, cache, keccak, tempNode) -{ - var mixParents = params.mixParents|0; - var mixWordCount = params.mixSize >> 2; - var mixNodeCount = mixWordCount >> 4; - var dagPageCount = (params.dagSize / params.mixSize) >> 0; - - // grab initial first word - var s0 = mix[0]; - - // initialise mix from initial 64 bytes - for (var w = 16; w < mixWordCount; ++w) - { - mix[w] = mix[w & 15]; - } - - for (var a = 0; a < mixParents; ++a) - { - var p = mod32(fnv(s0 ^ a, mix[a & (mixWordCount-1)]), dagPageCount); - var d = (p * mixNodeCount)|0; - - for (var n = 0, w = 0; n < mixNodeCount; ++n, w += 16) - { - computeDagNode(tempNode, params, cache, keccak, (d + n)|0); - - for (var v = 0; v < 16; ++v) - { - mix[w|v] = fnv(mix[w|v], tempNode[v]); - } - } - } -} - -function convertSeed(seed) -{ - // todo, reconcile with spec, byte ordering? - // todo, big-endian conversion - var newSeed = util.toWords(seed); - if (newSeed === null) - throw Error("Invalid seed '" + seed + "'"); - return newSeed; -} - -exports.defaultParams = function() -{ - return { - cacheSize: 1048384, - cacheRounds: 3, - dagSize: 1073739904, - dagParents: 256, - mixSize: 128, - mixParents: 64, - }; -}; - -exports.Ethash = function(params, seed) -{ - // precompute cache and related values - seed = convertSeed(seed); - var cache = computeCache(params, seed); - - // preallocate buffers/etc - var initBuf = new ArrayBuffer(96); - var initBytes = new Uint8Array(initBuf); - var initWords = new Uint32Array(initBuf); - var mixWords = new Uint32Array(params.mixSize / 4); - var tempNode = new Uint32Array(16); - var keccak = new Keccak(); - - var retWords = new Uint32Array(8); - var retBytes = new Uint8Array(retWords.buffer); // supposedly read-only - - this.hash = function(header, nonce) - { - // compute initial hash - initBytes.set(header, 0); - initBytes.set(nonce, 32); - keccak.digestWords(initWords, 0, 16, initWords, 0, 8 + nonce.length/4); - - // compute mix - for (var i = 0; i != 16; ++i) - { - mixWords[i] = initWords[i]; - } - computeHashInner(mixWords, params, cache, keccak, tempNode); - - // compress mix and append to initWords - for (var i = 0; i != mixWords.length; i += 4) - { - initWords[16 + i/4] = fnv(fnv(fnv(mixWords[i], mixWords[i+1]), mixWords[i+2]), mixWords[i+3]); - } - - // final Keccak hashes - keccak.digestWords(retWords, 0, 8, initWords, 0, 24); // Keccak-256(s + cmix) - return retBytes; - }; - - this.cacheDigest = function() - { - return keccak.digest(32, new Uint8Array(cache.buffer)); - }; -}; - - - - diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/keccak.js b/Godeps/_workspace/src/github.com/expanse-org/ethash/js/keccak.js deleted file mode 100644 index 84ddde6451631..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/keccak.js +++ /dev/null @@ -1,404 +0,0 @@ -// keccak.js -// Tim Hughes -// derived from Markku-Juhani O. Saarinen's C code (http://keccak.noekeon.org/readable_code.html) - -/*jslint node: true, shadow:true */ -"use strict"; - -var Keccak_f1600_RC = new Uint32Array([ - 0x00000001, 0x00000000, - 0x00008082, 0x00000000, - 0x0000808a, 0x80000000, - 0x80008000, 0x80000000, - 0x0000808b, 0x00000000, - 0x80000001, 0x00000000, - 0x80008081, 0x80000000, - 0x00008009, 0x80000000, - 0x0000008a, 0x00000000, - 0x00000088, 0x00000000, - 0x80008009, 0x00000000, - 0x8000000a, 0x00000000, - 0x8000808b, 0x00000000, - 0x0000008b, 0x80000000, - 0x00008089, 0x80000000, - 0x00008003, 0x80000000, - 0x00008002, 0x80000000, - 0x00000080, 0x80000000, - 0x0000800a, 0x00000000, - 0x8000000a, 0x80000000, - 0x80008081, 0x80000000, - 0x00008080, 0x80000000, - 0x80000001, 0x00000000, - 0x80008008, 0x80000000 -]); - -function keccak_f1600(outState, outOffset, outSize, inState) -{ - // todo, handle big endian loads - var a00l = inState[0]|0; - var a00h = inState[1]|0; - var a01l = inState[2]|0; - var a01h = inState[3]|0; - var a02l = inState[4]|0; - var a02h = inState[5]|0; - var a03l = inState[6]|0; - var a03h = inState[7]|0; - var a04l = inState[8]|0; - var a04h = inState[9]|0; - var a05l = inState[10]|0; - var a05h = inState[11]|0; - var a06l = inState[12]|0; - var a06h = inState[13]|0; - var a07l = inState[14]|0; - var a07h = inState[15]|0; - var a08l = inState[16]|0; - var a08h = inState[17]|0; - var a09l = inState[18]|0; - var a09h = inState[19]|0; - var a10l = inState[20]|0; - var a10h = inState[21]|0; - var a11l = inState[22]|0; - var a11h = inState[23]|0; - var a12l = inState[24]|0; - var a12h = inState[25]|0; - var a13l = inState[26]|0; - var a13h = inState[27]|0; - var a14l = inState[28]|0; - var a14h = inState[29]|0; - var a15l = inState[30]|0; - var a15h = inState[31]|0; - var a16l = inState[32]|0; - var a16h = inState[33]|0; - var a17l = inState[34]|0; - var a17h = inState[35]|0; - var a18l = inState[36]|0; - var a18h = inState[37]|0; - var a19l = inState[38]|0; - var a19h = inState[39]|0; - var a20l = inState[40]|0; - var a20h = inState[41]|0; - var a21l = inState[42]|0; - var a21h = inState[43]|0; - var a22l = inState[44]|0; - var a22h = inState[45]|0; - var a23l = inState[46]|0; - var a23h = inState[47]|0; - var a24l = inState[48]|0; - var a24h = inState[49]|0; - var b00l, b00h, b01l, b01h, b02l, b02h, b03l, b03h, b04l, b04h; - var b05l, b05h, b06l, b06h, b07l, b07h, b08l, b08h, b09l, b09h; - var b10l, b10h, b11l, b11h, b12l, b12h, b13l, b13h, b14l, b14h; - var b15l, b15h, b16l, b16h, b17l, b17h, b18l, b18h, b19l, b19h; - var b20l, b20h, b21l, b21h, b22l, b22h, b23l, b23h, b24l, b24h; - var tl, nl; - var th, nh; - - for (var r = 0; r < 48; r = (r+2)|0) - { - // Theta - b00l = a00l ^ a05l ^ a10l ^ a15l ^ a20l; - b00h = a00h ^ a05h ^ a10h ^ a15h ^ a20h; - b01l = a01l ^ a06l ^ a11l ^ a16l ^ a21l; - b01h = a01h ^ a06h ^ a11h ^ a16h ^ a21h; - b02l = a02l ^ a07l ^ a12l ^ a17l ^ a22l; - b02h = a02h ^ a07h ^ a12h ^ a17h ^ a22h; - b03l = a03l ^ a08l ^ a13l ^ a18l ^ a23l; - b03h = a03h ^ a08h ^ a13h ^ a18h ^ a23h; - b04l = a04l ^ a09l ^ a14l ^ a19l ^ a24l; - b04h = a04h ^ a09h ^ a14h ^ a19h ^ a24h; - tl = b04l ^ (b01l << 1 | b01h >>> 31); - th = b04h ^ (b01h << 1 | b01l >>> 31); - a00l ^= tl; - a00h ^= th; - a05l ^= tl; - a05h ^= th; - a10l ^= tl; - a10h ^= th; - a15l ^= tl; - a15h ^= th; - a20l ^= tl; - a20h ^= th; - tl = b00l ^ (b02l << 1 | b02h >>> 31); - th = b00h ^ (b02h << 1 | b02l >>> 31); - a01l ^= tl; - a01h ^= th; - a06l ^= tl; - a06h ^= th; - a11l ^= tl; - a11h ^= th; - a16l ^= tl; - a16h ^= th; - a21l ^= tl; - a21h ^= th; - tl = b01l ^ (b03l << 1 | b03h >>> 31); - th = b01h ^ (b03h << 1 | b03l >>> 31); - a02l ^= tl; - a02h ^= th; - a07l ^= tl; - a07h ^= th; - a12l ^= tl; - a12h ^= th; - a17l ^= tl; - a17h ^= th; - a22l ^= tl; - a22h ^= th; - tl = b02l ^ (b04l << 1 | b04h >>> 31); - th = b02h ^ (b04h << 1 | b04l >>> 31); - a03l ^= tl; - a03h ^= th; - a08l ^= tl; - a08h ^= th; - a13l ^= tl; - a13h ^= th; - a18l ^= tl; - a18h ^= th; - a23l ^= tl; - a23h ^= th; - tl = b03l ^ (b00l << 1 | b00h >>> 31); - th = b03h ^ (b00h << 1 | b00l >>> 31); - a04l ^= tl; - a04h ^= th; - a09l ^= tl; - a09h ^= th; - a14l ^= tl; - a14h ^= th; - a19l ^= tl; - a19h ^= th; - a24l ^= tl; - a24h ^= th; - - // Rho Pi - b00l = a00l; - b00h = a00h; - b10l = a01l << 1 | a01h >>> 31; - b10h = a01h << 1 | a01l >>> 31; - b07l = a10l << 3 | a10h >>> 29; - b07h = a10h << 3 | a10l >>> 29; - b11l = a07l << 6 | a07h >>> 26; - b11h = a07h << 6 | a07l >>> 26; - b17l = a11l << 10 | a11h >>> 22; - b17h = a11h << 10 | a11l >>> 22; - b18l = a17l << 15 | a17h >>> 17; - b18h = a17h << 15 | a17l >>> 17; - b03l = a18l << 21 | a18h >>> 11; - b03h = a18h << 21 | a18l >>> 11; - b05l = a03l << 28 | a03h >>> 4; - b05h = a03h << 28 | a03l >>> 4; - b16l = a05h << 4 | a05l >>> 28; - b16h = a05l << 4 | a05h >>> 28; - b08l = a16h << 13 | a16l >>> 19; - b08h = a16l << 13 | a16h >>> 19; - b21l = a08h << 23 | a08l >>> 9; - b21h = a08l << 23 | a08h >>> 9; - b24l = a21l << 2 | a21h >>> 30; - b24h = a21h << 2 | a21l >>> 30; - b04l = a24l << 14 | a24h >>> 18; - b04h = a24h << 14 | a24l >>> 18; - b15l = a04l << 27 | a04h >>> 5; - b15h = a04h << 27 | a04l >>> 5; - b23l = a15h << 9 | a15l >>> 23; - b23h = a15l << 9 | a15h >>> 23; - b19l = a23h << 24 | a23l >>> 8; - b19h = a23l << 24 | a23h >>> 8; - b13l = a19l << 8 | a19h >>> 24; - b13h = a19h << 8 | a19l >>> 24; - b12l = a13l << 25 | a13h >>> 7; - b12h = a13h << 25 | a13l >>> 7; - b02l = a12h << 11 | a12l >>> 21; - b02h = a12l << 11 | a12h >>> 21; - b20l = a02h << 30 | a02l >>> 2; - b20h = a02l << 30 | a02h >>> 2; - b14l = a20l << 18 | a20h >>> 14; - b14h = a20h << 18 | a20l >>> 14; - b22l = a14h << 7 | a14l >>> 25; - b22h = a14l << 7 | a14h >>> 25; - b09l = a22h << 29 | a22l >>> 3; - b09h = a22l << 29 | a22h >>> 3; - b06l = a09l << 20 | a09h >>> 12; - b06h = a09h << 20 | a09l >>> 12; - b01l = a06h << 12 | a06l >>> 20; - b01h = a06l << 12 | a06h >>> 20; - - // Chi - a00l = b00l ^ ~b01l & b02l; - a00h = b00h ^ ~b01h & b02h; - a01l = b01l ^ ~b02l & b03l; - a01h = b01h ^ ~b02h & b03h; - a02l = b02l ^ ~b03l & b04l; - a02h = b02h ^ ~b03h & b04h; - a03l = b03l ^ ~b04l & b00l; - a03h = b03h ^ ~b04h & b00h; - a04l = b04l ^ ~b00l & b01l; - a04h = b04h ^ ~b00h & b01h; - a05l = b05l ^ ~b06l & b07l; - a05h = b05h ^ ~b06h & b07h; - a06l = b06l ^ ~b07l & b08l; - a06h = b06h ^ ~b07h & b08h; - a07l = b07l ^ ~b08l & b09l; - a07h = b07h ^ ~b08h & b09h; - a08l = b08l ^ ~b09l & b05l; - a08h = b08h ^ ~b09h & b05h; - a09l = b09l ^ ~b05l & b06l; - a09h = b09h ^ ~b05h & b06h; - a10l = b10l ^ ~b11l & b12l; - a10h = b10h ^ ~b11h & b12h; - a11l = b11l ^ ~b12l & b13l; - a11h = b11h ^ ~b12h & b13h; - a12l = b12l ^ ~b13l & b14l; - a12h = b12h ^ ~b13h & b14h; - a13l = b13l ^ ~b14l & b10l; - a13h = b13h ^ ~b14h & b10h; - a14l = b14l ^ ~b10l & b11l; - a14h = b14h ^ ~b10h & b11h; - a15l = b15l ^ ~b16l & b17l; - a15h = b15h ^ ~b16h & b17h; - a16l = b16l ^ ~b17l & b18l; - a16h = b16h ^ ~b17h & b18h; - a17l = b17l ^ ~b18l & b19l; - a17h = b17h ^ ~b18h & b19h; - a18l = b18l ^ ~b19l & b15l; - a18h = b18h ^ ~b19h & b15h; - a19l = b19l ^ ~b15l & b16l; - a19h = b19h ^ ~b15h & b16h; - a20l = b20l ^ ~b21l & b22l; - a20h = b20h ^ ~b21h & b22h; - a21l = b21l ^ ~b22l & b23l; - a21h = b21h ^ ~b22h & b23h; - a22l = b22l ^ ~b23l & b24l; - a22h = b22h ^ ~b23h & b24h; - a23l = b23l ^ ~b24l & b20l; - a23h = b23h ^ ~b24h & b20h; - a24l = b24l ^ ~b20l & b21l; - a24h = b24h ^ ~b20h & b21h; - - // Iota - a00l ^= Keccak_f1600_RC[r|0]; - a00h ^= Keccak_f1600_RC[r|1]; - } - - // todo, handle big-endian stores - outState[outOffset|0] = a00l; - outState[outOffset|1] = a00h; - outState[outOffset|2] = a01l; - outState[outOffset|3] = a01h; - outState[outOffset|4] = a02l; - outState[outOffset|5] = a02h; - outState[outOffset|6] = a03l; - outState[outOffset|7] = a03h; - if (outSize == 8) - return; - outState[outOffset|8] = a04l; - outState[outOffset|9] = a04h; - outState[outOffset|10] = a05l; - outState[outOffset|11] = a05h; - outState[outOffset|12] = a06l; - outState[outOffset|13] = a06h; - outState[outOffset|14] = a07l; - outState[outOffset|15] = a07h; - if (outSize == 16) - return; - outState[outOffset|16] = a08l; - outState[outOffset|17] = a08h; - outState[outOffset|18] = a09l; - outState[outOffset|19] = a09h; - outState[outOffset|20] = a10l; - outState[outOffset|21] = a10h; - outState[outOffset|22] = a11l; - outState[outOffset|23] = a11h; - outState[outOffset|24] = a12l; - outState[outOffset|25] = a12h; - outState[outOffset|26] = a13l; - outState[outOffset|27] = a13h; - outState[outOffset|28] = a14l; - outState[outOffset|29] = a14h; - outState[outOffset|30] = a15l; - outState[outOffset|31] = a15h; - outState[outOffset|32] = a16l; - outState[outOffset|33] = a16h; - outState[outOffset|34] = a17l; - outState[outOffset|35] = a17h; - outState[outOffset|36] = a18l; - outState[outOffset|37] = a18h; - outState[outOffset|38] = a19l; - outState[outOffset|39] = a19h; - outState[outOffset|40] = a20l; - outState[outOffset|41] = a20h; - outState[outOffset|42] = a21l; - outState[outOffset|43] = a21h; - outState[outOffset|44] = a22l; - outState[outOffset|45] = a22h; - outState[outOffset|46] = a23l; - outState[outOffset|47] = a23h; - outState[outOffset|48] = a24l; - outState[outOffset|49] = a24h; -} - -var Keccak = function() -{ - var stateBuf = new ArrayBuffer(200); - var stateBytes = new Uint8Array(stateBuf); - var stateWords = new Uint32Array(stateBuf); - - this.digest = function(oSize, iBytes) - { - for (var i = 0; i < 50; ++i) - { - stateWords[i] = 0; - } - - var r = 200 - oSize*2; - var iLength = iBytes.length; - var iOffset = 0; - for ( ; ;) - { - var len = iLength < r ? iLength : r; - for (i = 0; i < len; ++i, ++iOffset) - { - stateBytes[i] ^= iBytes[iOffset]; - } - - if (iLength < r) - break; - iLength -= len; - - keccak_f1600(stateWords, 0, 50, stateWords); - } - - stateBytes[iLength] ^= 1; - stateBytes[r-1] ^= 0x80; - keccak_f1600(stateWords, 0, 50, stateWords); - return stateBytes.subarray(0, oSize); - }; - - this.digestWords = function(oWords, oOffset, oLength, iWords, iOffset, iLength) - { - for (var i = 0; i < 50; ++i) - { - stateWords[i] = 0; - } - - var r = 50 - oLength*2; - for (; ; ) - { - var len = iLength < r ? iLength : r; - for (i = 0; i < len; ++i, ++iOffset) - { - stateWords[i] ^= iWords[iOffset]; - } - - if (iLength < r) - break; - iLength -= len; - - keccak_f1600(stateWords, 0, 50, stateWords); - } - - stateBytes[iLength<<2] ^= 1; - stateBytes[(r<<2) - 1] ^= 0x80; - keccak_f1600(oWords, oOffset, oLength, stateWords); - }; -}; - -module.exports = Keccak; - - diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/makekeccak.js b/Godeps/_workspace/src/github.com/expanse-org/ethash/js/makekeccak.js deleted file mode 100644 index c4db2b80a0614..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/makekeccak.js +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env node -// makekeccak.js -// Tim Hughes - -/*jslint node: true, shadow:true */ -"use strict"; - -var Keccak_f1600_Rho = [ - 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, - 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 -]; - -var Keccak_f1600_Pi= [ - 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, - 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 -]; - -var Keccak_f1600_RC = [ - 0x00000001, 0x00000000, - 0x00008082, 0x00000000, - 0x0000808a, 0x80000000, - 0x80008000, 0x80000000, - 0x0000808b, 0x00000000, - 0x80000001, 0x00000000, - 0x80008081, 0x80000000, - 0x00008009, 0x80000000, - 0x0000008a, 0x00000000, - 0x00000088, 0x00000000, - 0x80008009, 0x00000000, - 0x8000000a, 0x00000000, - 0x8000808b, 0x00000000, - 0x0000008b, 0x80000000, - 0x00008089, 0x80000000, - 0x00008003, 0x80000000, - 0x00008002, 0x80000000, - 0x00000080, 0x80000000, - 0x0000800a, 0x00000000, - 0x8000000a, 0x80000000, - 0x80008081, 0x80000000, - 0x00008080, 0x80000000, - 0x80000001, 0x00000000, - 0x80008008, 0x80000000, -]; - -function makeRotLow(lo, hi, n) -{ - if (n === 0 || n === 32) throw Error("unsupported"); - if ((n & 0x20) !== 0) - { - n &= ~0x20; - var t = hi; - hi = lo; - lo = t; - } - var hir = hi + " >>> " + (32 - n); - var los = lo + " << " + n; - return los + " | " + hir; -} - -function makeRotHigh(lo, hi, n) -{ - if (n === 0 || n === 32) throw Error("unsupported"); - if ((n & 0x20) !== 0) - { - n &= ~0x20; - var t = hi; - hi = lo; - lo = t; - } - var his = hi + " << " + n; - var lor = lo + " >>> " + (32 - n); - return his + " | " + lor; -} - -function makeKeccak_f1600() -{ - var format = function(n) - { - return n < 10 ? "0"+n : ""+n; - }; - - var a = function(n, w) - { - return "a" + format(n) + (w !== 0?'h':'l'); - }; - - var b = function(n, w) - { - return "b" + format(n) + (w !== 0?'h':'l'); - }; - - var str = ""; - str += "function keccak_f1600(outState, outOffset, outSize, inState)\n"; - str += "{\n"; - - for (var i = 0; i < 25; ++i) - { - for (var w = 0; w <= 1; ++w) - { - str += "\tvar " + a(i,w) + " = inState["+(i<<1|w)+"]|0;\n"; - } - } - - for (var j = 0; j < 5; ++j) - { - str += "\tvar "; - for (var i = 0; i < 5; ++i) - { - if (i !== 0) - str += ", "; - str += b(j*5+i,0) + ", " + b(j*5+i,1); - } - str += ";\n"; - } - - str += "\tvar tl, th;\n"; - str += "\n"; - str += "\tfor (var r = 0; r < 48; r = (r+2)|0)\n"; - str += "\t{\n"; - - - // Theta - str += "\t\t// Theta\n"; - for (var i = 0; i < 5; ++i) - { - for (var w = 0; w <= 1; ++w) - { - str += "\t\t" + b(i,w) + " = " + a(i,w) + " ^ " + a(i+5,w) + " ^ " + a(i+10,w) + " ^ " + a(i+15,w) + " ^ " + a(i+20,w) + ";\n"; - } - } - - for (var i = 0; i < 5; ++i) - { - var i4 = (i + 4) % 5; - var i1 = (i + 1) % 5; - str += "\t\ttl = " + b(i4,0) + " ^ (" + b(i1,0) + " << 1 | " + b(i1,1) + " >>> 31);\n"; - str += "\t\tth = " + b(i4,1) + " ^ (" + b(i1,1) + " << 1 | " + b(i1,0) + " >>> 31);\n"; - - for (var j = 0; j < 25; j = (j+5)|0) - { - str += "\t\t" + a((j+i),0) + " ^= tl;\n"; - str += "\t\t" + a((j+i),1) + " ^= th;\n"; - } - } - - - // Rho Pi - str += "\n\t\t// Rho Pi\n"; - for (var w = 0; w <= 1; ++w) - { - str += "\t\t" + b(0,w) + " = " + a(0,w) + ";\n"; - } - var opi = 1; - for (var i = 0; i < 24; ++i) - { - var pi = Keccak_f1600_Pi[i]; - str += "\t\t" + b(pi,0) + " = " + makeRotLow(a(opi,0), a(opi,1), Keccak_f1600_Rho[i]) + ";\n"; - str += "\t\t" + b(pi,1) + " = " + makeRotHigh(a(opi,0), a(opi,1), Keccak_f1600_Rho[i]) + ";\n"; - opi = pi; - } - - // Chi - str += "\n\t\t// Chi\n"; - for (var j = 0; j < 25; j += 5) - { - for (var i = 0; i < 5; ++i) - { - for (var w = 0; w <= 1; ++w) - { - str += "\t\t" + a(j+i,w) + " = " + b(j+i,w) + " ^ ~" + b(j+(i+1)%5,w) + " & " + b(j+(i+2)%5,w) + ";\n"; - } - } - } - - // Iota - str += "\n\t\t// Iota\n"; - for (var w = 0; w <= 1; ++w) - { - str += "\t\t" + a(0,w) + " ^= Keccak_f1600_RC[r|" + w + "];\n"; - } - - - str += "\t}\n"; - - for (var i = 0; i < 25; ++i) - { - if (i == 4 || i == 8) - { - str += "\tif (outSize == " + i*2 + ")\n\t\treturn;\n"; - } - for (var w = 0; w <= 1; ++w) - { - str += "\toutState[outOffset|"+(i<<1|w)+"] = " + a(i,w) + ";\n"; - } - } - str += "}\n"; - - return str; -} - -console.log(makeKeccak_f1600()); diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/test.js b/Godeps/_workspace/src/github.com/expanse-org/ethash/js/test.js deleted file mode 100644 index 7ebb733ff5784..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/test.js +++ /dev/null @@ -1,53 +0,0 @@ -// test.js -// Tim Hughes - -/*jslint node: true, shadow:true */ -"use strict"; - -var ethash = require('./ethash'); -var util = require('./util'); -var Keccak = require('./keccak'); - -// sanity check hash functions -var src = util.stringToBytes(""); -if (util.bytesToHexString(new Keccak().digest(32, src)) != "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") throw Error("Keccak-256 failed"); -if (util.bytesToHexString(new Keccak().digest(64, src)) != "0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e") throw Error("Keccak-512 failed"); - -src = new Uint32Array(src.buffer); -var dst = new Uint32Array(8); -new Keccak().digestWords(dst, 0, dst.length, src, 0, src.length); -if (util.wordsToHexString(dst) != "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") throw Error("Keccak-256 Fast failed"); - -var dst = new Uint32Array(16); -new Keccak().digestWords(dst, 0, dst.length, src, 0, src.length); -if (util.wordsToHexString(dst) != "0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e") throw Error("Keccak-512 Fast failed"); - - -// init params -var ethashParams = ethash.defaultParams(); -//ethashParams.cacheRounds = 0; - -// create hasher -var seed = util.hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466") -var startTime = new Date().getTime(); -var hasher = new ethash.Ethash(ethashParams, seed); -console.log('Ethash startup took: '+(new Date().getTime() - startTime) + "ms"); -console.log('Ethash cache hash: ' + util.bytesToHexString(hasher.cacheDigest())); - -var testHexString = "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"; -if (testHexString != util.bytesToHexString(util.hexStringToBytes(testHexString))) - throw Error("bytesToHexString or hexStringToBytes broken"); - - -var header = util.hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); -var nonce = util.hexStringToBytes("0000000000000000"); -var hash; - -startTime = new Date().getTime(); -var trials = 10; -for (var i = 0; i < trials; ++i) -{ - hash = hasher.hash(header, nonce); -} -console.log("Light client hashes averaged: " + (new Date().getTime() - startTime)/trials + "ms"); -console.log("Hash = " + util.bytesToHexString(hash)); diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/util.js b/Godeps/_workspace/src/github.com/expanse-org/ethash/js/util.js deleted file mode 100644 index 79743cd915e95..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/js/util.js +++ /dev/null @@ -1,100 +0,0 @@ -// util.js -// Tim Hughes - -/*jslint node: true, shadow:true */ -"use strict"; - -function nibbleToChar(nibble) -{ - return String.fromCharCode((nibble < 10 ? 48 : 87) + nibble); -} - -function charToNibble(chr) -{ - if (chr >= 48 && chr <= 57) - { - return chr - 48; - } - if (chr >= 65 && chr <= 70) - { - return chr - 65 + 10; - } - if (chr >= 97 && chr <= 102) - { - return chr - 97 + 10; - } - return 0; -} - -function stringToBytes(str) -{ - var bytes = new Uint8Array(str.length); - for (var i = 0; i != str.length; ++i) - { - bytes[i] = str.charCodeAt(i); - } - return bytes; -} - -function hexStringToBytes(str) -{ - var bytes = new Uint8Array(str.length>>>1); - for (var i = 0; i != bytes.length; ++i) - { - bytes[i] = charToNibble(str.charCodeAt(i<<1 | 0)) << 4; - bytes[i] |= charToNibble(str.charCodeAt(i<<1 | 1)); - } - return bytes; -} - -function bytesToHexString(bytes) -{ - var str = ""; - for (var i = 0; i != bytes.length; ++i) - { - str += nibbleToChar(bytes[i] >>> 4); - str += nibbleToChar(bytes[i] & 0xf); - } - return str; -} - -function wordsToHexString(words) -{ - return bytesToHexString(new Uint8Array(words.buffer)); -} - -function uint32ToHexString(num) -{ - var buf = new Uint8Array(4); - buf[0] = (num >> 24) & 0xff; - buf[1] = (num >> 16) & 0xff; - buf[2] = (num >> 8) & 0xff; - buf[3] = (num >> 0) & 0xff; - return bytesToHexString(buf); -} - -function toWords(input) -{ - if (input instanceof Uint32Array) - { - return input; - } - else if (input instanceof Uint8Array) - { - var tmp = new Uint8Array((input.length + 3) & ~3); - tmp.set(input); - return new Uint32Array(tmp.buffer); - } - else if (typeof input === typeof "") - { - return toWords(stringToBytes(input)); - } - return null; -} - -exports.stringToBytes = stringToBytes; -exports.hexStringToBytes = hexStringToBytes; -exports.bytesToHexString = bytesToHexString; -exports.wordsToHexString = wordsToHexString; -exports.uint32ToHexString = uint32ToHexString; -exports.toWords = toWords; \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/setup.py b/Godeps/_workspace/src/github.com/expanse-org/ethash/setup.py deleted file mode 100644 index 7b9aa708fbf62..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/setup.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -import os -from distutils.core import setup, Extension -sources = [ - 'src/python/core.c', - 'src/libethash/io.c', - 'src/libethash/internal.c', - 'src/libethash/sha3.c'] -if os.name == 'nt': - sources += [ - 'src/libethash/util_win32.c', - 'src/libethash/io_win32.c', - 'src/libethash/mmap_win32.c', - ] -else: - sources += [ - 'src/libethash/io_posix.c' - ] -depends = [ - 'src/libethash/ethash.h', - 'src/libethash/compiler.h', - 'src/libethash/data_sizes.h', - 'src/libethash/endian.h', - 'src/libethash/ethash.h', - 'src/libethash/io.h', - 'src/libethash/fnv.h', - 'src/libethash/internal.h', - 'src/libethash/sha3.h', - 'src/libethash/util.h', -] -pyethash = Extension('pyethash', - sources=sources, - depends=depends, - extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"]) - -setup( - name='pyethash', - author="Matthew Wampler-Doty", - author_email="matthew.wampler.doty@gmail.com", - license='GPL', - version='0.1.23', - url='https://github.com/expanse-org/ethash', - download_url='https://github.com/expanse-org/ethash/tarball/v23', - description=('Python wrappers for ethash, the expanse proof of work' - 'hashing function'), - ext_modules=[pyethash], -) diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/benchmark/CMakeLists.txt b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/benchmark/CMakeLists.txt deleted file mode 100644 index 3df4ab5967b0b..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/benchmark/CMakeLists.txt +++ /dev/null @@ -1,58 +0,0 @@ -include_directories(..) - -set(CMAKE_BUILD_TYPE Release) - -if (MSVC) - add_definitions("/openmp") -endif() - -# enable C++11, should probably be a bit more specific about compiler -if (NOT MSVC) - SET(CMAKE_CXX_FLAGS "-std=c++11") -endif() - -if (NOT MPI_FOUND) - find_package(MPI) -endif() - -if (NOT CRYPTOPP_FOUND) - find_package(CryptoPP 5.6.2) -endif() - -if (CRYPTOPP_FOUND) - add_definitions(-DWITH_CRYPTOPP) - find_package (Threads REQUIRED) -endif() - -if (NOT OpenCL_FOUND) - find_package(OpenCL) -endif() -if (OpenCL_FOUND) - add_definitions(-DWITH_OPENCL) - include_directories(${OpenCL_INCLUDE_DIRS}) - list(APPEND FILES ethash_cl_miner.cpp ethash_cl_miner.h) -endif() - -if (MPI_FOUND) - include_directories(${MPI_INCLUDE_PATH}) - add_executable (Benchmark_MPI_FULL benchmark.cpp) - target_link_libraries (Benchmark_MPI_FULL ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) - SET_TARGET_PROPERTIES(Benchmark_MPI_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DFULL -DMPI") - - add_executable (Benchmark_MPI_LIGHT benchmark.cpp) - target_link_libraries (Benchmark_MPI_LIGHT ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) - SET_TARGET_PROPERTIES(Benchmark_MPI_LIGHT PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DMPI") -endif() - -add_executable (Benchmark_FULL benchmark.cpp) -target_link_libraries (Benchmark_FULL ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT}) -SET_TARGET_PROPERTIES(Benchmark_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DFULL") - -add_executable (Benchmark_LIGHT benchmark.cpp) -target_link_libraries (Benchmark_LIGHT ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT}) - -if (OpenCL_FOUND) - add_executable (Benchmark_CL benchmark.cpp) - target_link_libraries (Benchmark_CL ${ETHHASH_LIBS} ethash-cl ${CMAKE_THREAD_LIBS_INIT}) - SET_TARGET_PROPERTIES(Benchmark_CL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DOPENCL") -endif() \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/benchmark/benchmark.cpp b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/benchmark/benchmark.cpp deleted file mode 100644 index dae41248242cb..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/benchmark/benchmark.cpp +++ /dev/null @@ -1,278 +0,0 @@ -/* - This file is part of cpp-expanse. - - cpp-expanse is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - cpp-expanse is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with cpp-expanse. If not, see . -*/ -/** @file benchmark.cpp - * @author Tim Hughes - * @date 2015 - */ - -#include -#include -#include -#include -#include -#ifdef OPENCL -#include -#endif -#include -#include - -#ifdef WITH_CRYPTOPP -#include -#include - -#else -#include "libethash/sha3.h" -#endif // WITH_CRYPTOPP - -#undef min -#undef max - -using std::chrono::high_resolution_clock; - -#if defined(OPENCL) -const unsigned trials = 1024*1024*32; -#elif defined(FULL) -const unsigned trials = 1024*1024/8; -#else -const unsigned trials = 1024*1024/1024; -#endif -uint8_t g_hashes[1024*32]; - -static char nibbleToChar(unsigned nibble) -{ - return (char) ((nibble >= 10 ? 'a'-10 : '0') + nibble); -} - -static uint8_t charToNibble(char chr) -{ - if (chr >= '0' && chr <= '9') - { - return (uint8_t) (chr - '0'); - } - if (chr >= 'a' && chr <= 'z') - { - return (uint8_t) (chr - 'a' + 10); - } - if (chr >= 'A' && chr <= 'Z') - { - return (uint8_t) (chr - 'A' + 10); - } - return 0; -} - -static std::vector hexStringToBytes(char const* str) -{ - std::vector bytes(strlen(str) >> 1); - for (unsigned i = 0; i != bytes.size(); ++i) - { - bytes[i] = charToNibble(str[i*2 | 0]) << 4; - bytes[i] |= charToNibble(str[i*2 | 1]); - } - return bytes; -} - -static std::string bytesToHexString(uint8_t const* bytes, unsigned size) -{ - std::string str; - for (unsigned i = 0; i != size; ++i) - { - str += nibbleToChar(bytes[i] >> 4); - str += nibbleToChar(bytes[i] & 0xf); - } - return str; -} - -static std::string bytesToHexString(ethash_h256_t const *hash, unsigned size) -{ - return bytesToHexString((uint8_t*)hash, size); -} - -extern "C" int main(void) -{ - // params for ethash - ethash_params params; - ethash_params_init(¶ms, 0); - //params.full_size = 262147 * 4096; // 1GBish; - //params.full_size = 32771 * 4096; // 128MBish; - //params.full_size = 8209 * 4096; // 8MBish; - //params.cache_size = 8209*4096; - //params.cache_size = 2053*4096; - ethash_h256_t seed; - ethash_h256_t previous_hash; - - memcpy(&seed, hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466").data(), 32); - memcpy(&previous_hash, hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").data(), 32); - - // allocate page aligned buffer for dataset -#ifdef FULL - void* full_mem_buf = malloc(params.full_size + 4095); - void* full_mem = (void*)((uintptr_t(full_mem_buf) + 4095) & ~4095); -#endif - void* cache_mem_buf = malloc(params.cache_size + 63); - void* cache_mem = (void*)((uintptr_t(cache_mem_buf) + 63) & ~63); - - ethash_cache cache; - cache.mem = cache_mem; - - // compute cache or full data - { - auto startTime = high_resolution_clock::now(); - ethash_mkcache(&cache, ¶ms, &seed); - auto time = std::chrono::duration_cast(high_resolution_clock::now() - startTime).count(); - - ethash_h256_t cache_hash; - SHA3_256(&cache_hash, (uint8_t const*)cache_mem, params.cache_size); - debugf("ethash_mkcache: %ums, sha3: %s\n", (unsigned)((time*1000)/CLOCKS_PER_SEC), bytesToHexString(&cache_hash, sizeof(cache_hash)).data()); - - // print a couple of test hashes - { - auto startTime = high_resolution_clock::now(); - ethash_return_value hash; - ethash_light(&hash, &cache, ¶ms, &previous_hash, 0); - auto time = std::chrono::duration_cast(high_resolution_clock::now() - startTime).count(); - debugf("ethash_light test: %ums, %s\n", (unsigned)time, bytesToHexString(&hash.result, 32).data()); - } - - #ifdef FULL - startTime = high_resolution_clock::now(); - ethash_compute_full_data(full_mem, ¶ms, &cache); - time = std::chrono::duration_cast(high_resolution_clock::now() - startTime).count(); - debugf("ethash_compute_full_data: %ums\n", (unsigned)time); - #endif // FULL - } - -#ifdef OPENCL - ethash_cl_miner miner; - { - auto startTime = high_resolution_clock::now(); - if (!miner.init(params, &seed)) - exit(-1); - auto time = std::chrono::duration_cast(high_resolution_clock::now() - startTime).count(); - debugf("ethash_cl_miner init: %ums\n", (unsigned)time); - } -#endif - - -#ifdef FULL - { - auto startTime = high_resolution_clock::now(); - ethash_return_value hash; - ethash_full(&hash, full_mem, ¶ms, &previous_hash, 0); - auto time = std::chrono::duration_cast(high_resolution_clock::now() - startTime).count(); - debugf("ethash_full test: %uns\n", (unsigned)time); - } -#endif - -#ifdef OPENCL - // validate 1024 hashes against CPU - miner.hash(g_hashes, (uint8_t*)&previous_hash, 0, 1024); - for (unsigned i = 0; i != 1024; ++i) - { - ethash_return_value hash; - ethash_light(&hash, &cache, ¶ms, &previous_hash, i); - if (memcmp(&hash.result, g_hashes + 32*i, 32) != 0) - { - debugf("nonce %u failed: %s %s\n", i, bytesToHexString(g_hashes + 32*i, 32).c_str(), bytesToHexString(&hash.result, 32).c_str()); - static unsigned c = 0; - if (++c == 16) - { - exit(-1); - } - } - } - - // ensure nothing else is going on - miner.finish(); -#endif - - auto startTime = high_resolution_clock::now(); - unsigned hash_count = trials; - - #ifdef OPENCL - { - struct search_hook : ethash_cl_miner::search_hook - { - unsigned hash_count; - std::vector nonce_vec; - - virtual bool found(uint64_t const* nonces, uint32_t count) - { - nonce_vec.insert(nonce_vec.end(), nonces, nonces + count); - return false; - } - - virtual bool searched(uint64_t start_nonce, uint32_t count) - { - // do nothing - hash_count += count; - return hash_count >= trials; - } - }; - search_hook hook; - hook.hash_count = 0; - - miner.search((uint8_t*)&previous_hash, 0x000000ffffffffff, hook); - - for (unsigned i = 0; i != hook.nonce_vec.size(); ++i) - { - uint64_t nonce = hook.nonce_vec[i]; - ethash_return_value hash; - ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce); - debugf("found: %.8x%.8x -> %s\n", unsigned(nonce>>32), unsigned(nonce), bytesToHexString(&hash.result, 32).c_str()); - } - - hash_count = hook.hash_count; - } - #else - { - //#pragma omp parallel for - for (int nonce = 0; nonce < trials; ++nonce) - { - ethash_return_value hash; - #ifdef FULL - ethash_full(&hash, full_mem, ¶ms, &previous_hash, nonce); - #else - ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce); - #endif // FULL - } - } - #endif - auto time = std::chrono::duration_cast(high_resolution_clock::now() - startTime).count(); - debugf("Search took: %ums\n", (unsigned)time/1000); - - unsigned read_size = ETHASH_ACCESSES * ETHASH_MIX_BYTES; -#if defined(OPENCL) || defined(FULL) - debugf( - "hashrate: %8.2f Mh/s, bw: %8.2f GB/s\n", - (double)hash_count * (1000*1000)/time / (1000*1000), - (double)hash_count*read_size * (1000*1000)/time / (1024*1024*1024) - ); -#else - debugf( - "hashrate: %8.2f Kh/s, bw: %8.2f MB/s\n", - (double)hash_count * (1000*1000)/time / (1000), - (double)hash_count*read_size * (1000*1000)/time / (1024*1024) - ); -#endif - - free(cache_mem_buf); -#ifdef FULL - free(full_mem_buf); -#endif - - return 0; -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/CMakeLists.txt b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/CMakeLists.txt deleted file mode 100644 index a65621c3e9e66..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/CMakeLists.txt +++ /dev/null @@ -1,44 +0,0 @@ -set(LIBRARY ethash) - -if (CPPETHEREUM) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") -endif () - -set(CMAKE_BUILD_TYPE Release) - -if (NOT MSVC) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") -endif() - -set(FILES util.h - io.c - internal.c - ethash.h - endian.h - compiler.h - fnv.h - data_sizes.h) - -if (MSVC) - list(APPEND FILES util_win32.c io_win32.c mmap_win32.c) -else() - list(APPEND FILES io_posix.c) -endif() - -if (NOT CRYPTOPP_FOUND) - find_package(CryptoPP 5.6.2) -endif() - -if (CRYPTOPP_FOUND) - add_definitions(-DWITH_CRYPTOPP) - include_directories( ${CRYPTOPP_INCLUDE_DIRS} ) - list(APPEND FILES sha3_cryptopp.cpp sha3_cryptopp.h) -else() - list(APPEND FILES sha3.c sha3.h) -endif() - -add_library(${LIBRARY} ${FILES}) - -if (CRYPTOPP_FOUND) - TARGET_LINK_LIBRARIES(${LIBRARY} ${CRYPTOPP_LIBRARIES}) -endif() diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/compiler.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/compiler.h deleted file mode 100644 index 7750ab9b7c777..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/compiler.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - This file is part of cpp-expanse. - - cpp-expanse is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - cpp-expanse is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with cpp-expanse. If not, see . -*/ -/** @file compiler.h - * @date 2014 - */ -#pragma once - -// Visual Studio doesn't support the inline keyword in C mode -#if defined(_MSC_VER) && !defined(__cplusplus) -#define inline __inline -#endif - -// pretend restrict is a standard keyword -#if defined(_MSC_VER) -#define restrict __restrict -#else -#define restrict __restrict__ -#endif - diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/data_sizes.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/data_sizes.h deleted file mode 100644 index 4eadea6127cf9..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/data_sizes.h +++ /dev/null @@ -1,812 +0,0 @@ -/* - This file is part of cpp-expanse. - - cpp-expanse is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software FoundationUUU,either version 3 of the LicenseUUU,or - (at your option) any later version. - - cpp-expanse is distributed in the hope that it will be usefulU, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with cpp-expanse. If notUUU,see . -*/ - -/** @file data_sizes.h -* @author Matthew Wampler-Doty -* @date 2015 -*/ - -#pragma once - -#include -#include "compiler.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -// 2048 Epochs (~20 years) worth of tabulated DAG sizes - -// Generated with the following Mathematica Code: - -// GetCacheSizes[n_] := Module[{ -// CacheSizeBytesInit = 2^24, -// CacheGrowth = 2^17, -// HashBytes = 64, -// j = 0}, -// Reap[ -// While[j < n, -// Module[{i = -// Floor[(CacheSizeBytesInit + CacheGrowth * j) / HashBytes]}, -// While[! PrimeQ[i], i--]; -// Sow[i*HashBytes]; j++]]]][[2]][[1]] - - -static const uint64_t dag_sizes[2048] = { - 1073739904U, 1082130304U, 1090514816U, 1098906752U, 1107293056U, - 1115684224U, 1124070016U, 1132461952U, 1140849536U, 1149232768U, - 1157627776U, 1166013824U, 1174404736U, 1182786944U, 1191180416U, - 1199568512U, 1207958912U, 1216345216U, 1224732032U, 1233124736U, - 1241513344U, 1249902464U, 1258290304U, 1266673792U, 1275067264U, - 1283453312U, 1291844992U, 1300234112U, 1308619904U, 1317010048U, - 1325397376U, 1333787776U, 1342176128U, 1350561664U, 1358954368U, - 1367339392U, 1375731584U, 1384118144U, 1392507008U, 1400897408U, - 1409284736U, 1417673344U, 1426062464U, 1434451072U, 1442839168U, - 1451229056U, 1459615616U, 1468006016U, 1476394112U, 1484782976U, - 1493171584U, 1501559168U, 1509948032U, 1518337664U, 1526726528U, - 1535114624U, 1543503488U, 1551892096U, 1560278656U, 1568669056U, - 1577056384U, 1585446272U, 1593831296U, 1602219392U, 1610610304U, - 1619000192U, 1627386752U, 1635773824U, 1644164224U, 1652555648U, - 1660943488U, 1669332608U, 1677721216U, 1686109312U, 1694497664U, - 1702886272U, 1711274624U, 1719661184U, 1728047744U, 1736434816U, - 1744829056U, 1753218944U, 1761606272U, 1769995904U, 1778382464U, - 1786772864U, 1795157888U, 1803550592U, 1811937664U, 1820327552U, - 1828711552U, 1837102976U, 1845488768U, 1853879936U, 1862269312U, - 1870656896U, 1879048064U, 1887431552U, 1895825024U, 1904212096U, - 1912601216U, 1920988544U, 1929379456U, 1937765504U, 1946156672U, - 1954543232U, 1962932096U, 1971321728U, 1979707264U, 1988093056U, - 1996487552U, 2004874624U, 2013262208U, 2021653888U, 2030039936U, - 2038430848U, 2046819968U, 2055208576U, 2063596672U, 2071981952U, - 2080373632U, 2088762752U, 2097149056U, 2105539712U, 2113928576U, - 2122315136U, 2130700672U, 2139092608U, 2147483264U, 2155872128U, - 2164257664U, 2172642176U, 2181035392U, 2189426048U, 2197814912U, - 2206203008U, 2214587264U, 2222979712U, 2231367808U, 2239758208U, - 2248145024U, 2256527744U, 2264922752U, 2273312128U, 2281701248U, - 2290086272U, 2298476672U, 2306867072U, 2315251072U, 2323639168U, - 2332032128U, 2340420224U, 2348808064U, 2357196416U, 2365580416U, - 2373966976U, 2382363008U, 2390748544U, 2399139968U, 2407530368U, - 2415918976U, 2424307328U, 2432695424U, 2441084288U, 2449472384U, - 2457861248U, 2466247808U, 2474637184U, 2483026816U, 2491414144U, - 2499803776U, 2508191872U, 2516582272U, 2524970368U, 2533359232U, - 2541743488U, 2550134144U, 2558525056U, 2566913408U, 2575301504U, - 2583686528U, 2592073856U, 2600467328U, 2608856192U, 2617240448U, - 2625631616U, 2634022016U, 2642407552U, 2650796416U, 2659188352U, - 2667574912U, 2675965312U, 2684352896U, 2692738688U, 2701130624U, - 2709518464U, 2717907328U, 2726293376U, 2734685056U, 2743073152U, - 2751462016U, 2759851648U, 2768232832U, 2776625536U, 2785017728U, - 2793401984U, 2801794432U, 2810182016U, 2818571648U, 2826959488U, - 2835349376U, 2843734144U, 2852121472U, 2860514432U, 2868900992U, - 2877286784U, 2885676928U, 2894069632U, 2902451584U, 2910843008U, - 2919234688U, 2927622784U, 2936011648U, 2944400768U, 2952789376U, - 2961177728U, 2969565568U, 2977951616U, 2986338944U, 2994731392U, - 3003120256U, 3011508352U, 3019895936U, 3028287104U, 3036675968U, - 3045063808U, 3053452928U, 3061837696U, 3070228352U, 3078615424U, - 3087003776U, 3095394944U, 3103782272U, 3112173184U, 3120562048U, - 3128944768U, 3137339264U, 3145725056U, 3154109312U, 3162505088U, - 3170893184U, 3179280256U, 3187669376U, 3196056704U, 3204445568U, - 3212836736U, 3221224064U, 3229612928U, 3238002304U, 3246391168U, - 3254778496U, 3263165824U, 3271556224U, 3279944576U, 3288332416U, - 3296719232U, 3305110912U, 3313500032U, 3321887104U, 3330273152U, - 3338658944U, 3347053184U, 3355440512U, 3363827072U, 3372220288U, - 3380608384U, 3388997504U, 3397384576U, 3405774208U, 3414163072U, - 3422551936U, 3430937984U, 3439328384U, 3447714176U, 3456104576U, - 3464493952U, 3472883584U, 3481268864U, 3489655168U, 3498048896U, - 3506434432U, 3514826368U, 3523213952U, 3531603584U, 3539987072U, - 3548380288U, 3556763264U, 3565157248U, 3573545344U, 3581934464U, - 3590324096U, 3598712704U, 3607098752U, 3615488384U, 3623877248U, - 3632265856U, 3640646528U, 3649043584U, 3657430144U, 3665821568U, - 3674207872U, 3682597504U, 3690984832U, 3699367808U, 3707764352U, - 3716152448U, 3724541056U, 3732925568U, 3741318016U, 3749706368U, - 3758091136U, 3766481536U, 3774872704U, 3783260032U, 3791650432U, - 3800036224U, 3808427648U, 3816815488U, 3825204608U, 3833592704U, - 3841981568U, 3850370432U, 3858755968U, 3867147904U, 3875536256U, - 3883920512U, 3892313728U, 3900702592U, 3909087872U, 3917478784U, - 3925868416U, 3934256512U, 3942645376U, 3951032192U, 3959422336U, - 3967809152U, 3976200064U, 3984588416U, 3992974976U, 4001363584U, - 4009751168U, 4018141312U, 4026530432U, 4034911616U, 4043308928U, - 4051695488U, 4060084352U, 4068472448U, 4076862848U, 4085249408U, - 4093640576U, 4102028416U, 4110413696U, 4118805632U, 4127194496U, - 4135583104U, 4143971968U, 4152360832U, 4160746112U, 4169135744U, - 4177525888U, 4185912704U, 4194303616U, 4202691968U, 4211076736U, - 4219463552U, 4227855488U, 4236246656U, 4244633728U, 4253022848U, - 4261412224U, 4269799808U, 4278184832U, 4286578048U, 4294962304U, - 4303349632U, 4311743104U, 4320130432U, 4328521088U, 4336909184U, - 4345295488U, 4353687424U, 4362073472U, 4370458496U, 4378852736U, - 4387238528U, 4395630208U, 4404019072U, 4412407424U, 4420790656U, - 4429182848U, 4437571456U, 4445962112U, 4454344064U, 4462738048U, - 4471119232U, 4479516544U, 4487904128U, 4496289664U, 4504682368U, - 4513068416U, 4521459584U, 4529846144U, 4538232704U, 4546619776U, - 4555010176U, 4563402112U, 4571790208U, 4580174464U, 4588567936U, - 4596957056U, 4605344896U, 4613734016U, 4622119808U, 4630511488U, - 4638898816U, 4647287936U, 4655675264U, 4664065664U, 4672451968U, - 4680842624U, 4689231488U, 4697620352U, 4706007424U, 4714397056U, - 4722786176U, 4731173248U, 4739562368U, 4747951744U, 4756340608U, - 4764727936U, 4773114496U, 4781504384U, 4789894784U, 4798283648U, - 4806667648U, 4815059584U, 4823449472U, 4831835776U, 4840226176U, - 4848612224U, 4857003392U, 4865391488U, 4873780096U, 4882169728U, - 4890557312U, 4898946944U, 4907333248U, 4915722368U, 4924110976U, - 4932499328U, 4940889728U, 4949276032U, 4957666432U, 4966054784U, - 4974438016U, 4982831488U, 4991221376U, 4999607168U, 5007998848U, - 5016386432U, 5024763776U, 5033164672U, 5041544576U, 5049941888U, - 5058329728U, 5066717056U, 5075107456U, 5083494272U, 5091883904U, - 5100273536U, 5108662144U, 5117048192U, 5125436032U, 5133827456U, - 5142215296U, 5150605184U, 5158993024U, 5167382144U, 5175769472U, - 5184157568U, 5192543872U, 5200936064U, 5209324928U, 5217711232U, - 5226102656U, 5234490496U, 5242877312U, 5251263872U, 5259654016U, - 5268040832U, 5276434304U, 5284819328U, 5293209728U, 5301598592U, - 5309986688U, 5318374784U, 5326764416U, 5335151488U, 5343542144U, - 5351929472U, 5360319872U, 5368706944U, 5377096576U, 5385484928U, - 5393871232U, 5402263424U, 5410650496U, 5419040384U, 5427426944U, - 5435816576U, 5444205952U, 5452594816U, 5460981376U, 5469367936U, - 5477760896U, 5486148736U, 5494536832U, 5502925952U, 5511315328U, - 5519703424U, 5528089984U, 5536481152U, 5544869504U, 5553256064U, - 5561645696U, 5570032768U, 5578423936U, 5586811264U, 5595193216U, - 5603585408U, 5611972736U, 5620366208U, 5628750464U, 5637143936U, - 5645528192U, 5653921408U, 5662310272U, 5670694784U, 5679082624U, - 5687474048U, 5695864448U, 5704251008U, 5712641408U, 5721030272U, - 5729416832U, 5737806208U, 5746194304U, 5754583936U, 5762969984U, - 5771358592U, 5779748224U, 5788137856U, 5796527488U, 5804911232U, - 5813300608U, 5821692544U, 5830082176U, 5838468992U, 5846855552U, - 5855247488U, 5863636096U, 5872024448U, 5880411008U, 5888799872U, - 5897186432U, 5905576832U, 5913966976U, 5922352768U, 5930744704U, - 5939132288U, 5947522432U, 5955911296U, 5964299392U, 5972688256U, - 5981074304U, 5989465472U, 5997851008U, 6006241408U, 6014627968U, - 6023015552U, 6031408256U, 6039796096U, 6048185216U, 6056574848U, - 6064963456U, 6073351808U, 6081736064U, 6090128768U, 6098517632U, - 6106906496U, 6115289216U, 6123680896U, 6132070016U, 6140459648U, - 6148849024U, 6157237376U, 6165624704U, 6174009728U, 6182403712U, - 6190792064U, 6199176064U, 6207569792U, 6215952256U, 6224345216U, - 6232732544U, 6241124224U, 6249510272U, 6257899136U, 6266287744U, - 6274676864U, 6283065728U, 6291454336U, 6299843456U, 6308232064U, - 6316620928U, 6325006208U, 6333395584U, 6341784704U, 6350174848U, - 6358562176U, 6366951296U, 6375337856U, 6383729536U, 6392119168U, - 6400504192U, 6408895616U, 6417283456U, 6425673344U, 6434059136U, - 6442444672U, 6450837376U, 6459223424U, 6467613056U, 6476004224U, - 6484393088U, 6492781952U, 6501170048U, 6509555072U, 6517947008U, - 6526336384U, 6534725504U, 6543112832U, 6551500672U, 6559888768U, - 6568278656U, 6576662912U, 6585055616U, 6593443456U, 6601834112U, - 6610219648U, 6618610304U, 6626999168U, 6635385472U, 6643777408U, - 6652164224U, 6660552832U, 6668941952U, 6677330048U, 6685719424U, - 6694107776U, 6702493568U, 6710882176U, 6719274112U, 6727662976U, - 6736052096U, 6744437632U, 6752825984U, 6761213824U, 6769604224U, - 6777993856U, 6786383488U, 6794770816U, 6803158144U, 6811549312U, - 6819937664U, 6828326528U, 6836706176U, 6845101696U, 6853491328U, - 6861880448U, 6870269312U, 6878655104U, 6887046272U, 6895433344U, - 6903822208U, 6912212864U, 6920596864U, 6928988288U, 6937377152U, - 6945764992U, 6954149248U, 6962544256U, 6970928768U, 6979317376U, - 6987709312U, 6996093824U, 7004487296U, 7012875392U, 7021258624U, - 7029652352U, 7038038912U, 7046427776U, 7054818944U, 7063207808U, - 7071595136U, 7079980928U, 7088372608U, 7096759424U, 7105149824U, - 7113536896U, 7121928064U, 7130315392U, 7138699648U, 7147092352U, - 7155479168U, 7163865728U, 7172249984U, 7180648064U, 7189036672U, - 7197424768U, 7205810816U, 7214196608U, 7222589824U, 7230975104U, - 7239367552U, 7247755904U, 7256145536U, 7264533376U, 7272921472U, - 7281308032U, 7289694848U, 7298088832U, 7306471808U, 7314864512U, - 7323253888U, 7331643008U, 7340029568U, 7348419712U, 7356808832U, - 7365196672U, 7373585792U, 7381973888U, 7390362752U, 7398750592U, - 7407138944U, 7415528576U, 7423915648U, 7432302208U, 7440690304U, - 7449080192U, 7457472128U, 7465860992U, 7474249088U, 7482635648U, - 7491023744U, 7499412608U, 7507803008U, 7516192384U, 7524579968U, - 7532967296U, 7541358464U, 7549745792U, 7558134656U, 7566524032U, - 7574912896U, 7583300992U, 7591690112U, 7600075136U, 7608466816U, - 7616854912U, 7625244544U, 7633629824U, 7642020992U, 7650410368U, - 7658794112U, 7667187328U, 7675574912U, 7683961984U, 7692349568U, - 7700739712U, 7709130368U, 7717519232U, 7725905536U, 7734295424U, - 7742683264U, 7751069056U, 7759457408U, 7767849088U, 7776238208U, - 7784626816U, 7793014912U, 7801405312U, 7809792128U, 7818179968U, - 7826571136U, 7834957184U, 7843347328U, 7851732352U, 7860124544U, - 7868512384U, 7876902016U, 7885287808U, 7893679744U, 7902067072U, - 7910455936U, 7918844288U, 7927230848U, 7935622784U, 7944009344U, - 7952400256U, 7960786048U, 7969176704U, 7977565312U, 7985953408U, - 7994339968U, 8002730368U, 8011119488U, 8019508096U, 8027896192U, - 8036285056U, 8044674688U, 8053062272U, 8061448832U, 8069838464U, - 8078227328U, 8086616704U, 8095006592U, 8103393664U, 8111783552U, - 8120171392U, 8128560256U, 8136949376U, 8145336704U, 8153726848U, - 8162114944U, 8170503296U, 8178891904U, 8187280768U, 8195669632U, - 8204058496U, 8212444544U, 8220834176U, 8229222272U, 8237612672U, - 8246000768U, 8254389376U, 8262775168U, 8271167104U, 8279553664U, - 8287944064U, 8296333184U, 8304715136U, 8313108352U, 8321497984U, - 8329885568U, 8338274432U, 8346663296U, 8355052928U, 8363441536U, - 8371828352U, 8380217984U, 8388606592U, 8396996224U, 8405384576U, - 8413772672U, 8422161536U, 8430549376U, 8438939008U, 8447326592U, - 8455715456U, 8464104832U, 8472492928U, 8480882048U, 8489270656U, - 8497659776U, 8506045312U, 8514434944U, 8522823808U, 8531208832U, - 8539602304U, 8547990656U, 8556378752U, 8564768384U, 8573154176U, - 8581542784U, 8589933952U, 8598322816U, 8606705024U, 8615099264U, - 8623487872U, 8631876992U, 8640264064U, 8648653952U, 8657040256U, - 8665430656U, 8673820544U, 8682209152U, 8690592128U, 8698977152U, - 8707374464U, 8715763328U, 8724151424U, 8732540032U, 8740928384U, - 8749315712U, 8757704576U, 8766089344U, 8774480768U, 8782871936U, - 8791260032U, 8799645824U, 8808034432U, 8816426368U, 8824812928U, - 8833199488U, 8841591424U, 8849976448U, 8858366336U, 8866757248U, - 8875147136U, 8883532928U, 8891923328U, 8900306816U, 8908700288U, - 8917088384U, 8925478784U, 8933867392U, 8942250368U, 8950644608U, - 8959032704U, 8967420544U, 8975809664U, 8984197504U, 8992584064U, - 9000976256U, 9009362048U, 9017752448U, 9026141312U, 9034530688U, - 9042917504U, 9051307904U, 9059694208U, 9068084864U, 9076471424U, - 9084861824U, 9093250688U, 9101638528U, 9110027648U, 9118416512U, - 9126803584U, 9135188096U, 9143581312U, 9151969664U, 9160356224U, - 9168747136U, 9177134464U, 9185525632U, 9193910144U, 9202302848U, - 9210690688U, 9219079552U, 9227465344U, 9235854464U, 9244244864U, - 9252633472U, 9261021824U, 9269411456U, 9277799296U, 9286188928U, - 9294574208U, 9302965888U, 9311351936U, 9319740032U, 9328131968U, - 9336516736U, 9344907392U, 9353296768U, 9361685888U, 9370074752U, - 9378463616U, 9386849408U, 9395239808U, 9403629184U, 9412016512U, - 9420405376U, 9428795008U, 9437181568U, 9445570688U, 9453960832U, - 9462346624U, 9470738048U, 9479121536U, 9487515008U, 9495903616U, - 9504289664U, 9512678528U, 9521067904U, 9529456256U, 9537843584U, - 9546233728U, 9554621312U, 9563011456U, 9571398784U, 9579788672U, - 9588178304U, 9596567168U, 9604954496U, 9613343104U, 9621732992U, - 9630121856U, 9638508416U, 9646898816U, 9655283584U, 9663675776U, - 9672061312U, 9680449664U, 9688840064U, 9697230464U, 9705617536U, - 9714003584U, 9722393984U, 9730772608U, 9739172224U, 9747561088U, - 9755945344U, 9764338816U, 9772726144U, 9781116544U, 9789503872U, - 9797892992U, 9806282624U, 9814670464U, 9823056512U, 9831439232U, - 9839833984U, 9848224384U, 9856613504U, 9865000576U, 9873391232U, - 9881772416U, 9890162816U, 9898556288U, 9906940544U, 9915333248U, - 9923721088U, 9932108672U, 9940496512U, 9948888448U, 9957276544U, - 9965666176U, 9974048384U, 9982441088U, 9990830464U, 9999219584U, - 10007602816U, 10015996544U, 10024385152U, 10032774016U, 10041163648U, - 10049548928U, 10057940096U, 10066329472U, 10074717824U, 10083105152U, - 10091495296U, 10099878784U, 10108272256U, 10116660608U, 10125049216U, - 10133437312U, 10141825664U, 10150213504U, 10158601088U, 10166991232U, - 10175378816U, 10183766144U, 10192157312U, 10200545408U, 10208935552U, - 10217322112U, 10225712768U, 10234099328U, 10242489472U, 10250876032U, - 10259264896U, 10267656064U, 10276042624U, 10284429184U, 10292820352U, - 10301209472U, 10309598848U, 10317987712U, 10326375296U, 10334763392U, - 10343153536U, 10351541632U, 10359930752U, 10368318592U, 10376707456U, - 10385096576U, 10393484672U, 10401867136U, 10410262144U, 10418647424U, - 10427039104U, 10435425664U, 10443810176U, 10452203648U, 10460589952U, - 10468982144U, 10477369472U, 10485759104U, 10494147712U, 10502533504U, - 10510923392U, 10519313536U, 10527702656U, 10536091264U, 10544478592U, - 10552867712U, 10561255808U, 10569642368U, 10578032768U, 10586423168U, - 10594805632U, 10603200128U, 10611588992U, 10619976064U, 10628361344U, - 10636754048U, 10645143424U, 10653531776U, 10661920384U, 10670307968U, - 10678696832U, 10687086464U, 10695475072U, 10703863168U, 10712246144U, - 10720639616U, 10729026688U, 10737414784U, 10745806208U, 10754190976U, - 10762581376U, 10770971264U, 10779356288U, 10787747456U, 10796135552U, - 10804525184U, 10812915584U, 10821301888U, 10829692288U, 10838078336U, - 10846469248U, 10854858368U, 10863247232U, 10871631488U, 10880023424U, - 10888412032U, 10896799616U, 10905188992U, 10913574016U, 10921964672U, - 10930352768U, 10938742912U, 10947132544U, 10955518592U, 10963909504U, - 10972298368U, 10980687488U, 10989074816U, 10997462912U, 11005851776U, - 11014241152U, 11022627712U, 11031017344U, 11039403904U, 11047793024U, - 11056184704U, 11064570752U, 11072960896U, 11081343872U, 11089737856U, - 11098128256U, 11106514816U, 11114904448U, 11123293568U, 11131680128U, - 11140065152U, 11148458368U, 11156845696U, 11165236864U, 11173624192U, - 11182013824U, 11190402688U, 11198790784U, 11207179136U, 11215568768U, - 11223957376U, 11232345728U, 11240734592U, 11249122688U, 11257511296U, - 11265899648U, 11274285952U, 11282675584U, 11291065472U, 11299452544U, - 11307842432U, 11316231296U, 11324616832U, 11333009024U, 11341395584U, - 11349782656U, 11358172288U, 11366560384U, 11374950016U, 11383339648U, - 11391721856U, 11400117376U, 11408504192U, 11416893568U, 11425283456U, - 11433671552U, 11442061184U, 11450444672U, 11458837888U, 11467226752U, - 11475611776U, 11484003968U, 11492392064U, 11500780672U, 11509169024U, - 11517550976U, 11525944448U, 11534335616U, 11542724224U, 11551111808U, - 11559500672U, 11567890304U, 11576277376U, 11584667008U, 11593056128U, - 11601443456U, 11609830016U, 11618221952U, 11626607488U, 11634995072U, - 11643387776U, 11651775104U, 11660161664U, 11668552576U, 11676940928U, - 11685330304U, 11693718656U, 11702106496U, 11710496128U, 11718882688U, - 11727273088U, 11735660416U, 11744050048U, 11752437376U, 11760824704U, - 11769216128U, 11777604736U, 11785991296U, 11794381952U, 11802770048U, - 11811157888U, 11819548544U, 11827932544U, 11836324736U, 11844713344U, - 11853100928U, 11861486464U, 11869879936U, 11878268032U, 11886656896U, - 11895044992U, 11903433088U, 11911822976U, 11920210816U, 11928600448U, - 11936987264U, 11945375872U, 11953761152U, 11962151296U, 11970543488U, - 11978928512U, 11987320448U, 11995708288U, 12004095104U, 12012486272U, - 12020875136U, 12029255552U, 12037652096U, 12046039168U, 12054429568U, - 12062813824U, 12071206528U, 12079594624U, 12087983744U, 12096371072U, - 12104759936U, 12113147264U, 12121534592U, 12129924992U, 12138314624U, - 12146703232U, 12155091584U, 12163481216U, 12171864704U, 12180255872U, - 12188643968U, 12197034112U, 12205424512U, 12213811328U, 12222199424U, - 12230590336U, 12238977664U, 12247365248U, 12255755392U, 12264143488U, - 12272531584U, 12280920448U, 12289309568U, 12297694592U, 12306086528U, - 12314475392U, 12322865024U, 12331253632U, 12339640448U, 12348029312U, - 12356418944U, 12364805248U, 12373196672U, 12381580928U, 12389969024U, - 12398357632U, 12406750592U, 12415138432U, 12423527552U, 12431916416U, - 12440304512U, 12448692352U, 12457081216U, 12465467776U, 12473859968U, - 12482245504U, 12490636672U, 12499025536U, 12507411584U, 12515801728U, - 12524190592U, 12532577152U, 12540966272U, 12549354368U, 12557743232U, - 12566129536U, 12574523264U, 12582911872U, 12591299456U, 12599688064U, - 12608074624U, 12616463488U, 12624845696U, 12633239936U, 12641631616U, - 12650019968U, 12658407296U, 12666795136U, 12675183232U, 12683574656U, - 12691960192U, 12700350592U, 12708740224U, 12717128576U, 12725515904U, - 12733906816U, 12742295168U, 12750680192U, 12759071872U, 12767460736U, - 12775848832U, 12784236928U, 12792626816U, 12801014656U, 12809404288U, - 12817789312U, 12826181504U, 12834568832U, 12842954624U, 12851345792U, - 12859732352U, 12868122496U, 12876512128U, 12884901248U, 12893289088U, - 12901672832U, 12910067584U, 12918455168U, 12926842496U, 12935232896U, - 12943620736U, 12952009856U, 12960396928U, 12968786816U, 12977176192U, - 12985563776U, 12993951104U, 13002341504U, 13010730368U, 13019115392U, - 13027506304U, 13035895168U, 13044272512U, 13052673152U, 13061062528U, - 13069446272U, 13077838976U, 13086227072U, 13094613632U, 13103000192U, - 13111393664U, 13119782528U, 13128157568U, 13136559232U, 13144945024U, - 13153329536U, 13161724288U, 13170111872U, 13178502784U, 13186884736U, - 13195279744U, 13203667072U, 13212057472U, 13220445824U, 13228832128U, - 13237221248U, 13245610624U, 13254000512U, 13262388352U, 13270777472U, - 13279166336U, 13287553408U, 13295943296U, 13304331904U, 13312719488U, - 13321108096U, 13329494656U, 13337885824U, 13346274944U, 13354663808U, - 13363051136U, 13371439232U, 13379825024U, 13388210816U, 13396605056U, - 13404995456U, 13413380224U, 13421771392U, 13430159744U, 13438546048U, - 13446937216U, 13455326848U, 13463708288U, 13472103808U, 13480492672U, - 13488875648U, 13497269888U, 13505657728U, 13514045312U, 13522435712U, - 13530824576U, 13539210112U, 13547599232U, 13555989376U, 13564379008U, - 13572766336U, 13581154432U, 13589544832U, 13597932928U, 13606320512U, - 13614710656U, 13623097472U, 13631477632U, 13639874944U, 13648264064U, - 13656652928U, 13665041792U, 13673430656U, 13681818496U, 13690207616U, - 13698595712U, 13706982272U, 13715373184U, 13723762048U, 13732150144U, - 13740536704U, 13748926592U, 13757316224U, 13765700992U, 13774090112U, - 13782477952U, 13790869376U, 13799259008U, 13807647872U, 13816036736U, - 13824425344U, 13832814208U, 13841202304U, 13849591424U, 13857978752U, - 13866368896U, 13874754688U, 13883145344U, 13891533184U, 13899919232U, - 13908311168U, 13916692096U, 13925085056U, 13933473152U, 13941866368U, - 13950253696U, 13958643584U, 13967032192U, 13975417216U, 13983807616U, - 13992197504U, 14000582272U, 14008973696U, 14017363072U, 14025752192U, - 14034137984U, 14042528384U, 14050918016U, 14059301504U, 14067691648U, - 14076083584U, 14084470144U, 14092852352U, 14101249664U, 14109635968U, - 14118024832U, 14126407552U, 14134804352U, 14143188608U, 14151577984U, - 14159968384U, 14168357248U, 14176741504U, 14185127296U, 14193521024U, - 14201911424U, 14210301824U, 14218685056U, 14227067264U, 14235467392U, - 14243855488U, 14252243072U, 14260630144U, 14269021568U, 14277409408U, - 14285799296U, 14294187904U, 14302571392U, 14310961792U, 14319353728U, - 14327738752U, 14336130944U, 14344518784U, 14352906368U, 14361296512U, - 14369685376U, 14378071424U, 14386462592U, 14394848128U, 14403230848U, - 14411627392U, 14420013952U, 14428402304U, 14436793472U, 14445181568U, - 14453569664U, 14461959808U, 14470347904U, 14478737024U, 14487122816U, - 14495511424U, 14503901824U, 14512291712U, 14520677504U, 14529064832U, - 14537456768U, 14545845632U, 14554234496U, 14562618496U, 14571011456U, - 14579398784U, 14587789184U, 14596172672U, 14604564608U, 14612953984U, - 14621341312U, 14629724288U, 14638120832U, 14646503296U, 14654897536U, - 14663284864U, 14671675264U, 14680061056U, 14688447616U, 14696835968U, - 14705228416U, 14713616768U, 14722003328U, 14730392192U, 14738784128U, - 14747172736U, 14755561088U, 14763947648U, 14772336512U, 14780725376U, - 14789110144U, 14797499776U, 14805892736U, 14814276992U, 14822670208U, - 14831056256U, 14839444352U, 14847836032U, 14856222848U, 14864612992U, - 14872997504U, 14881388672U, 14889775744U, 14898165376U, 14906553472U, - 14914944896U, 14923329664U, 14931721856U, 14940109696U, 14948497024U, - 14956887424U, 14965276544U, 14973663616U, 14982053248U, 14990439808U, - 14998830976U, 15007216768U, 15015605888U, 15023995264U, 15032385152U, - 15040768384U, 15049154944U, 15057549184U, 15065939072U, 15074328448U, - 15082715008U, 15091104128U, 15099493504U, 15107879296U, 15116269184U, - 15124659584U, 15133042304U, 15141431936U, 15149824384U, 15158214272U, - 15166602368U, 15174991232U, 15183378304U, 15191760512U, 15200154496U, - 15208542592U, 15216931712U, 15225323392U, 15233708416U, 15242098048U, - 15250489216U, 15258875264U, 15267265408U, 15275654528U, 15284043136U, - 15292431488U, 15300819584U, 15309208192U, 15317596544U, 15325986176U, - 15334374784U, 15342763648U, 15351151744U, 15359540608U, 15367929728U, - 15376318336U, 15384706432U, 15393092992U, 15401481856U, 15409869952U, - 15418258816U, 15426649984U, 15435037568U, 15443425664U, 15451815296U, - 15460203392U, 15468589184U, 15476979328U, 15485369216U, 15493755776U, - 15502146944U, 15510534272U, 15518924416U, 15527311232U, 15535699072U, - 15544089472U, 15552478336U, 15560866688U, 15569254528U, 15577642624U, - 15586031488U, 15594419072U, 15602809472U, 15611199104U, 15619586432U, - 15627975296U, 15636364928U, 15644753792U, 15653141888U, 15661529216U, - 15669918848U, 15678305152U, 15686696576U, 15695083136U, 15703474048U, - 15711861632U, 15720251264U, 15728636288U, 15737027456U, 15745417088U, - 15753804928U, 15762194048U, 15770582656U, 15778971008U, 15787358336U, - 15795747712U, 15804132224U, 15812523392U, 15820909696U, 15829300096U, - 15837691264U, 15846071936U, 15854466944U, 15862855808U, 15871244672U, - 15879634816U, 15888020608U, 15896409728U, 15904799104U, 15913185152U, - 15921577088U, 15929966464U, 15938354816U, 15946743424U, 15955129472U, - 15963519872U, 15971907968U, 15980296064U, 15988684928U, 15997073024U, - 16005460864U, 16013851264U, 16022241152U, 16030629248U, 16039012736U, - 16047406976U, 16055794816U, 16064181376U, 16072571264U, 16080957824U, - 16089346688U, 16097737856U, 16106125184U, 16114514816U, 16122904192U, - 16131292544U, 16139678848U, 16148066944U, 16156453504U, 16164839552U, - 16173236096U, 16181623424U, 16190012032U, 16198401152U, 16206790528U, - 16215177344U, 16223567744U, 16231956352U, 16240344704U, 16248731008U, - 16257117824U, 16265504384U, 16273898624U, 16282281856U, 16290668672U, - 16299064192U, 16307449216U, 16315842176U, 16324230016U, 16332613504U, - 16341006464U, 16349394304U, 16357783168U, 16366172288U, 16374561664U, - 16382951296U, 16391337856U, 16399726208U, 16408116352U, 16416505472U, - 16424892032U, 16433282176U, 16441668224U, 16450058624U, 16458448768U, - 16466836864U, 16475224448U, 16483613056U, 16492001408U, 16500391808U, - 16508779648U, 16517166976U, 16525555328U, 16533944192U, 16542330752U, - 16550719616U, 16559110528U, 16567497088U, 16575888512U, 16584274816U, - 16592665472U, 16601051008U, 16609442944U, 16617832064U, 16626218624U, - 16634607488U, 16642996096U, 16651385728U, 16659773824U, 16668163712U, - 16676552576U, 16684938112U, 16693328768U, 16701718144U, 16710095488U, - 16718492288U, 16726883968U, 16735272832U, 16743661184U, 16752049792U, - 16760436608U, 16768827008U, 16777214336U, 16785599104U, 16793992832U, - 16802381696U, 16810768768U, 16819151744U, 16827542656U, 16835934848U, - 16844323712U, 16852711552U, 16861101952U, 16869489536U, 16877876864U, - 16886265728U, 16894653056U, 16903044736U, 16911431296U, 16919821696U, - 16928207488U, 16936592768U, 16944987776U, 16953375616U, 16961763968U, - 16970152832U, 16978540928U, 16986929536U, 16995319168U, 17003704448U, - 17012096896U, 17020481152U, 17028870784U, 17037262208U, 17045649536U, - 17054039936U, 17062426496U, 17070814336U, 17079205504U, 17087592064U, - 17095978112U, 17104369024U, 17112759424U, 17121147776U, 17129536384U, - 17137926016U, 17146314368U, 17154700928U, 17163089792U, 17171480192U, - 17179864192U, 17188256896U, 17196644992U, 17205033856U, 17213423488U, - 17221811072U, 17230198912U, 17238588032U, 17246976896U, 17255360384U, - 17263754624U, 17272143232U, 17280530048U, 17288918912U, 17297309312U, - 17305696384U, 17314085504U, 17322475136U, 17330863744U, 17339252096U, - 17347640192U, 17356026496U, 17364413824U, 17372796544U, 17381190016U, - 17389583488U, 17397972608U, 17406360704U, 17414748544U, 17423135872U, - 17431527296U, 17439915904U, 17448303232U, 17456691584U, 17465081728U, - 17473468288U, 17481857408U, 17490247552U, 17498635904U, 17507022464U, - 17515409024U, 17523801728U, 17532189824U, 17540577664U, 17548966016U, - 17557353344U, 17565741184U, 17574131584U, 17582519168U, 17590907008U, - 17599296128U, 17607687808U, 17616076672U, 17624455808U, 17632852352U, - 17641238656U, 17649630848U, 17658018944U, 17666403968U, 17674794112U, - 17683178368U, 17691573376U, 17699962496U, 17708350592U, 17716739968U, - 17725126528U, 17733517184U, 17741898112U, 17750293888U, 17758673024U, - 17767070336U, 17775458432U, 17783848832U, 17792236928U, 17800625536U, - 17809012352U, 17817402752U, 17825785984U, 17834178944U, 17842563968U, - 17850955648U, 17859344512U, 17867732864U, 17876119424U, 17884511872U, - 17892900224U, 17901287296U, 17909677696U, 17918058112U, 17926451072U, - 17934843776U, 17943230848U, 17951609216U, 17960008576U, 17968397696U, - 17976784256U, 17985175424U, 17993564032U, 18001952128U, 18010339712U, - 18018728576U, 18027116672U, 18035503232U, 18043894144U, 18052283264U, - 18060672128U, 18069056384U, 18077449856U, 18085837184U, 18094225792U, - 18102613376U, 18111004544U, 18119388544U, 18127781248U, 18136170368U, - 18144558976U, 18152947328U, 18161336192U, 18169724288U, 18178108544U, - 18186498944U, 18194886784U, 18203275648U, 18211666048U, 18220048768U, - 18228444544U, 18236833408U, 18245220736U -}; - - -// Generated with the following Mathematica Code: - -// GetCacheSizes[n_] := Module[{ -// DataSetSizeBytesInit = 2^30, -// MixBytes = 128, -// DataSetGrowth = 2^23, -// HashBytes = 64, -// CacheMultiplier = 1024, -// j = 0}, -// Reap[ -// While[j < n, -// Module[{i = Floor[(DataSetSizeBytesInit + DataSetGrowth * j) / (CacheMultiplier * HashBytes)]}, -// While[! PrimeQ[i], i--]; -// Sow[i*HashBytes]; j++]]]][[2]][[1]] - -const uint64_t cache_sizes[2048] = { - 16776896U, 16907456U, 17039296U, 17170112U, 17301056U, 17432512U, 17563072U, - 17693888U, 17824192U, 17955904U, 18087488U, 18218176U, 18349504U, 18481088U, - 18611392U, 18742336U, 18874304U, 19004224U, 19135936U, 19267264U, 19398208U, - 19529408U, 19660096U, 19791424U, 19922752U, 20053952U, 20184896U, 20315968U, - 20446912U, 20576576U, 20709184U, 20840384U, 20971072U, 21102272U, 21233216U, - 21364544U, 21494848U, 21626816U, 21757376U, 21887552U, 22019392U, 22151104U, - 22281536U, 22412224U, 22543936U, 22675264U, 22806464U, 22935872U, 23068096U, - 23198272U, 23330752U, 23459008U, 23592512U, 23723968U, 23854912U, 23986112U, - 24116672U, 24247616U, 24378688U, 24509504U, 24640832U, 24772544U, 24903488U, - 25034432U, 25165376U, 25296704U, 25427392U, 25558592U, 25690048U, 25820096U, - 25951936U, 26081728U, 26214208U, 26345024U, 26476096U, 26606656U, 26737472U, - 26869184U, 26998208U, 27131584U, 27262528U, 27393728U, 27523904U, 27655744U, - 27786688U, 27917888U, 28049344U, 28179904U, 28311488U, 28441792U, 28573504U, - 28700864U, 28835648U, 28966208U, 29096768U, 29228608U, 29359808U, 29490752U, - 29621824U, 29752256U, 29882816U, 30014912U, 30144448U, 30273728U, 30406976U, - 30538432U, 30670784U, 30799936U, 30932672U, 31063744U, 31195072U, 31325248U, - 31456192U, 31588288U, 31719232U, 31850432U, 31981504U, 32110784U, 32243392U, - 32372672U, 32505664U, 32636608U, 32767808U, 32897344U, 33029824U, 33160768U, - 33289664U, 33423296U, 33554368U, 33683648U, 33816512U, 33947456U, 34076992U, - 34208704U, 34340032U, 34471744U, 34600256U, 34734016U, 34864576U, 34993984U, - 35127104U, 35258176U, 35386688U, 35518528U, 35650624U, 35782336U, 35910976U, - 36044608U, 36175808U, 36305728U, 36436672U, 36568384U, 36699968U, 36830656U, - 36961984U, 37093312U, 37223488U, 37355072U, 37486528U, 37617472U, 37747904U, - 37879232U, 38009792U, 38141888U, 38272448U, 38403392U, 38535104U, 38660672U, - 38795584U, 38925632U, 39059264U, 39190336U, 39320768U, 39452096U, 39581632U, - 39713984U, 39844928U, 39974848U, 40107968U, 40238144U, 40367168U, 40500032U, - 40631744U, 40762816U, 40894144U, 41023552U, 41155904U, 41286208U, 41418304U, - 41547712U, 41680448U, 41811904U, 41942848U, 42073792U, 42204992U, 42334912U, - 42467008U, 42597824U, 42729152U, 42860096U, 42991552U, 43122368U, 43253696U, - 43382848U, 43515712U, 43646912U, 43777088U, 43907648U, 44039104U, 44170432U, - 44302144U, 44433344U, 44564288U, 44694976U, 44825152U, 44956864U, 45088448U, - 45219008U, 45350464U, 45481024U, 45612608U, 45744064U, 45874496U, 46006208U, - 46136768U, 46267712U, 46399424U, 46529344U, 46660672U, 46791488U, 46923328U, - 47053504U, 47185856U, 47316928U, 47447872U, 47579072U, 47710144U, 47839936U, - 47971648U, 48103232U, 48234176U, 48365248U, 48496192U, 48627136U, 48757312U, - 48889664U, 49020736U, 49149248U, 49283008U, 49413824U, 49545152U, 49675712U, - 49807168U, 49938368U, 50069056U, 50200256U, 50331584U, 50462656U, 50593472U, - 50724032U, 50853952U, 50986048U, 51117632U, 51248576U, 51379904U, 51510848U, - 51641792U, 51773248U, 51903296U, 52035136U, 52164032U, 52297664U, 52427968U, - 52557376U, 52690112U, 52821952U, 52952896U, 53081536U, 53213504U, 53344576U, - 53475776U, 53608384U, 53738816U, 53870528U, 54000832U, 54131776U, 54263744U, - 54394688U, 54525248U, 54655936U, 54787904U, 54918592U, 55049152U, 55181248U, - 55312064U, 55442752U, 55574336U, 55705024U, 55836224U, 55967168U, 56097856U, - 56228672U, 56358592U, 56490176U, 56621888U, 56753728U, 56884928U, 57015488U, - 57146816U, 57278272U, 57409216U, 57540416U, 57671104U, 57802432U, 57933632U, - 58064576U, 58195264U, 58326976U, 58457408U, 58588864U, 58720192U, 58849984U, - 58981696U, 59113024U, 59243456U, 59375552U, 59506624U, 59637568U, 59768512U, - 59897792U, 60030016U, 60161984U, 60293056U, 60423872U, 60554432U, 60683968U, - 60817216U, 60948032U, 61079488U, 61209664U, 61341376U, 61471936U, 61602752U, - 61733696U, 61865792U, 61996736U, 62127808U, 62259136U, 62389568U, 62520512U, - 62651584U, 62781632U, 62910784U, 63045056U, 63176128U, 63307072U, 63438656U, - 63569216U, 63700928U, 63831616U, 63960896U, 64093888U, 64225088U, 64355392U, - 64486976U, 64617664U, 64748608U, 64879424U, 65009216U, 65142464U, 65273792U, - 65402816U, 65535424U, 65666752U, 65797696U, 65927744U, 66060224U, 66191296U, - 66321344U, 66453056U, 66584384U, 66715328U, 66846656U, 66977728U, 67108672U, - 67239104U, 67370432U, 67501888U, 67631296U, 67763776U, 67895104U, 68026304U, - 68157248U, 68287936U, 68419264U, 68548288U, 68681408U, 68811968U, 68942912U, - 69074624U, 69205568U, 69337024U, 69467584U, 69599168U, 69729472U, 69861184U, - 69989824U, 70122944U, 70253888U, 70385344U, 70515904U, 70647232U, 70778816U, - 70907968U, 71040832U, 71171648U, 71303104U, 71432512U, 71564992U, 71695168U, - 71826368U, 71958464U, 72089536U, 72219712U, 72350144U, 72482624U, 72613568U, - 72744512U, 72875584U, 73006144U, 73138112U, 73268672U, 73400128U, 73530944U, - 73662272U, 73793344U, 73924544U, 74055104U, 74185792U, 74316992U, 74448832U, - 74579392U, 74710976U, 74841664U, 74972864U, 75102784U, 75233344U, 75364544U, - 75497024U, 75627584U, 75759296U, 75890624U, 76021696U, 76152256U, 76283072U, - 76414144U, 76545856U, 76676672U, 76806976U, 76937792U, 77070016U, 77200832U, - 77331392U, 77462464U, 77593664U, 77725376U, 77856448U, 77987776U, 78118336U, - 78249664U, 78380992U, 78511424U, 78642496U, 78773056U, 78905152U, 79033664U, - 79166656U, 79297472U, 79429568U, 79560512U, 79690816U, 79822784U, 79953472U, - 80084672U, 80214208U, 80346944U, 80477632U, 80608576U, 80740288U, 80870848U, - 81002048U, 81133504U, 81264448U, 81395648U, 81525952U, 81657536U, 81786304U, - 81919808U, 82050112U, 82181312U, 82311616U, 82443968U, 82573376U, 82705984U, - 82835776U, 82967744U, 83096768U, 83230528U, 83359552U, 83491264U, 83622464U, - 83753536U, 83886016U, 84015296U, 84147776U, 84277184U, 84409792U, 84540608U, - 84672064U, 84803008U, 84934336U, 85065152U, 85193792U, 85326784U, 85458496U, - 85589312U, 85721024U, 85851968U, 85982656U, 86112448U, 86244416U, 86370112U, - 86506688U, 86637632U, 86769344U, 86900672U, 87031744U, 87162304U, 87293632U, - 87424576U, 87555392U, 87687104U, 87816896U, 87947968U, 88079168U, 88211264U, - 88341824U, 88473152U, 88603712U, 88735424U, 88862912U, 88996672U, 89128384U, - 89259712U, 89390272U, 89521984U, 89652544U, 89783872U, 89914816U, 90045376U, - 90177088U, 90307904U, 90438848U, 90569152U, 90700096U, 90832832U, 90963776U, - 91093696U, 91223744U, 91356992U, 91486784U, 91618496U, 91749824U, 91880384U, - 92012224U, 92143552U, 92273344U, 92405696U, 92536768U, 92666432U, 92798912U, - 92926016U, 93060544U, 93192128U, 93322816U, 93453632U, 93583936U, 93715136U, - 93845056U, 93977792U, 94109504U, 94240448U, 94371776U, 94501184U, 94632896U, - 94764224U, 94895552U, 95023424U, 95158208U, 95287744U, 95420224U, 95550016U, - 95681216U, 95811904U, 95943872U, 96075328U, 96203584U, 96337856U, 96468544U, - 96599744U, 96731072U, 96860992U, 96992576U, 97124288U, 97254848U, 97385536U, - 97517248U, 97647808U, 97779392U, 97910464U, 98041408U, 98172608U, 98303168U, - 98434496U, 98565568U, 98696768U, 98827328U, 98958784U, 99089728U, 99220928U, - 99352384U, 99482816U, 99614272U, 99745472U, 99876416U, 100007104U, - 100138048U, 100267072U, 100401088U, 100529984U, 100662592U, 100791872U, - 100925248U, 101056064U, 101187392U, 101317952U, 101449408U, 101580608U, - 101711296U, 101841728U, 101973824U, 102104896U, 102235712U, 102366016U, - 102498112U, 102628672U, 102760384U, 102890432U, 103021888U, 103153472U, - 103284032U, 103415744U, 103545152U, 103677248U, 103808576U, 103939648U, - 104070976U, 104201792U, 104332736U, 104462528U, 104594752U, 104725952U, - 104854592U, 104988608U, 105118912U, 105247808U, 105381184U, 105511232U, - 105643072U, 105774784U, 105903296U, 106037056U, 106167872U, 106298944U, - 106429504U, 106561472U, 106691392U, 106822592U, 106954304U, 107085376U, - 107216576U, 107346368U, 107478464U, 107609792U, 107739712U, 107872192U, - 108003136U, 108131392U, 108265408U, 108396224U, 108527168U, 108657344U, - 108789568U, 108920384U, 109049792U, 109182272U, 109312576U, 109444928U, - 109572928U, 109706944U, 109837888U, 109969088U, 110099648U, 110230976U, - 110362432U, 110492992U, 110624704U, 110755264U, 110886208U, 111017408U, - 111148864U, 111279296U, 111410752U, 111541952U, 111673024U, 111803456U, - 111933632U, 112066496U, 112196416U, 112328512U, 112457792U, 112590784U, - 112715968U, 112852672U, 112983616U, 113114944U, 113244224U, 113376448U, - 113505472U, 113639104U, 113770304U, 113901376U, 114031552U, 114163264U, - 114294592U, 114425536U, 114556864U, 114687424U, 114818624U, 114948544U, - 115080512U, 115212224U, 115343296U, 115473472U, 115605184U, 115736128U, - 115867072U, 115997248U, 116128576U, 116260288U, 116391488U, 116522944U, - 116652992U, 116784704U, 116915648U, 117046208U, 117178304U, 117308608U, - 117440192U, 117569728U, 117701824U, 117833024U, 117964096U, 118094656U, - 118225984U, 118357312U, 118489024U, 118617536U, 118749632U, 118882112U, - 119012416U, 119144384U, 119275328U, 119406016U, 119537344U, 119668672U, - 119798464U, 119928896U, 120061376U, 120192832U, 120321728U, 120454336U, - 120584512U, 120716608U, 120848192U, 120979136U, 121109056U, 121241408U, - 121372352U, 121502912U, 121634752U, 121764416U, 121895744U, 122027072U, - 122157632U, 122289088U, 122421184U, 122550592U, 122682944U, 122813888U, - 122945344U, 123075776U, 123207488U, 123338048U, 123468736U, 123600704U, - 123731264U, 123861952U, 123993664U, 124124608U, 124256192U, 124386368U, - 124518208U, 124649024U, 124778048U, 124911296U, 125041088U, 125173696U, - 125303744U, 125432896U, 125566912U, 125696576U, 125829056U, 125958592U, - 126090304U, 126221248U, 126352832U, 126483776U, 126615232U, 126746432U, - 126876608U, 127008704U, 127139392U, 127270336U, 127401152U, 127532224U, - 127663552U, 127794752U, 127925696U, 128055232U, 128188096U, 128319424U, - 128449856U, 128581312U, 128712256U, 128843584U, 128973632U, 129103808U, - 129236288U, 129365696U, 129498944U, 129629888U, 129760832U, 129892288U, - 130023104U, 130154048U, 130283968U, 130416448U, 130547008U, 130678336U, - 130807616U, 130939456U, 131071552U, 131202112U, 131331776U, 131464384U, - 131594048U, 131727296U, 131858368U, 131987392U, 132120256U, 132250816U, - 132382528U, 132513728U, 132644672U, 132774976U, 132905792U, 133038016U, - 133168832U, 133299392U, 133429312U, 133562048U, 133692992U, 133823296U, - 133954624U, 134086336U, 134217152U, 134348608U, 134479808U, 134607296U, - 134741056U, 134872384U, 135002944U, 135134144U, 135265472U, 135396544U, - 135527872U, 135659072U, 135787712U, 135921472U, 136052416U, 136182848U, - 136313792U, 136444864U, 136576448U, 136707904U, 136837952U, 136970048U, - 137099584U, 137232064U, 137363392U, 137494208U, 137625536U, 137755712U, - 137887424U, 138018368U, 138149824U, 138280256U, 138411584U, 138539584U, - 138672832U, 138804928U, 138936128U, 139066688U, 139196864U, 139328704U, - 139460032U, 139590208U, 139721024U, 139852864U, 139984576U, 140115776U, - 140245696U, 140376512U, 140508352U, 140640064U, 140769856U, 140902336U, - 141032768U, 141162688U, 141294016U, 141426496U, 141556544U, 141687488U, - 141819584U, 141949888U, 142080448U, 142212544U, 142342336U, 142474432U, - 142606144U, 142736192U, 142868288U, 142997824U, 143129408U, 143258944U, - 143392448U, 143523136U, 143653696U, 143785024U, 143916992U, 144045632U, - 144177856U, 144309184U, 144440768U, 144570688U, 144701888U, 144832448U, - 144965056U, 145096384U, 145227584U, 145358656U, 145489856U, 145620928U, - 145751488U, 145883072U, 146011456U, 146144704U, 146275264U, 146407232U, - 146538176U, 146668736U, 146800448U, 146931392U, 147062336U, 147193664U, - 147324224U, 147455936U, 147586624U, 147717056U, 147848768U, 147979456U, - 148110784U, 148242368U, 148373312U, 148503232U, 148635584U, 148766144U, - 148897088U, 149028416U, 149159488U, 149290688U, 149420224U, 149551552U, - 149683136U, 149814976U, 149943616U, 150076352U, 150208064U, 150338624U, - 150470464U, 150600256U, 150732224U, 150862784U, 150993088U, 151125952U, - 151254976U, 151388096U, 151519168U, 151649728U, 151778752U, 151911104U, - 152042944U, 152174144U, 152304704U, 152435648U, 152567488U, 152698816U, - 152828992U, 152960576U, 153091648U, 153222976U, 153353792U, 153484096U, - 153616192U, 153747008U, 153878336U, 154008256U, 154139968U, 154270912U, - 154402624U, 154533824U, 154663616U, 154795712U, 154926272U, 155057984U, - 155188928U, 155319872U, 155450816U, 155580608U, 155712064U, 155843392U, - 155971136U, 156106688U, 156237376U, 156367424U, 156499264U, 156630976U, - 156761536U, 156892352U, 157024064U, 157155008U, 157284416U, 157415872U, - 157545536U, 157677248U, 157810496U, 157938112U, 158071744U, 158203328U, - 158334656U, 158464832U, 158596288U, 158727616U, 158858048U, 158988992U, - 159121216U, 159252416U, 159381568U, 159513152U, 159645632U, 159776192U, - 159906496U, 160038464U, 160169536U, 160300352U, 160430656U, 160563008U, - 160693952U, 160822208U, 160956352U, 161086784U, 161217344U, 161349184U, - 161480512U, 161611456U, 161742272U, 161873216U, 162002752U, 162135872U, - 162266432U, 162397888U, 162529216U, 162660032U, 162790976U, 162922048U, - 163052096U, 163184576U, 163314752U, 163446592U, 163577408U, 163707968U, - 163839296U, 163969984U, 164100928U, 164233024U, 164364224U, 164494912U, - 164625856U, 164756672U, 164887616U, 165019072U, 165150016U, 165280064U, - 165412672U, 165543104U, 165674944U, 165805888U, 165936832U, 166067648U, - 166198336U, 166330048U, 166461248U, 166591552U, 166722496U, 166854208U, - 166985408U, 167116736U, 167246656U, 167378368U, 167508416U, 167641024U, - 167771584U, 167903168U, 168034112U, 168164032U, 168295744U, 168427456U, - 168557632U, 168688448U, 168819136U, 168951616U, 169082176U, 169213504U, - 169344832U, 169475648U, 169605952U, 169738048U, 169866304U, 169999552U, - 170131264U, 170262464U, 170393536U, 170524352U, 170655424U, 170782016U, - 170917696U, 171048896U, 171179072U, 171310784U, 171439936U, 171573184U, - 171702976U, 171835072U, 171966272U, 172097216U, 172228288U, 172359232U, - 172489664U, 172621376U, 172747712U, 172883264U, 173014208U, 173144512U, - 173275072U, 173407424U, 173539136U, 173669696U, 173800768U, 173931712U, - 174063424U, 174193472U, 174325696U, 174455744U, 174586816U, 174718912U, - 174849728U, 174977728U, 175109696U, 175242688U, 175374272U, 175504832U, - 175636288U, 175765696U, 175898432U, 176028992U, 176159936U, 176291264U, - 176422592U, 176552512U, 176684864U, 176815424U, 176946496U, 177076544U, - 177209152U, 177340096U, 177470528U, 177600704U, 177731648U, 177864256U, - 177994816U, 178126528U, 178257472U, 178387648U, 178518464U, 178650176U, - 178781888U, 178912064U, 179044288U, 179174848U, 179305024U, 179436736U, - 179568448U, 179698496U, 179830208U, 179960512U, 180092608U, 180223808U, - 180354752U, 180485696U, 180617152U, 180748096U, 180877504U, 181009984U, - 181139264U, 181272512U, 181402688U, 181532608U, 181663168U, 181795136U, - 181926592U, 182057536U, 182190016U, 182320192U, 182451904U, 182582336U, - 182713792U, 182843072U, 182976064U, 183107264U, 183237056U, 183368384U, - 183494848U, 183631424U, 183762752U, 183893824U, 184024768U, 184154816U, - 184286656U, 184417984U, 184548928U, 184680128U, 184810816U, 184941248U, - 185072704U, 185203904U, 185335616U, 185465408U, 185596352U, 185727296U, - 185859904U, 185989696U, 186121664U, 186252992U, 186383552U, 186514112U, - 186645952U, 186777152U, 186907328U, 187037504U, 187170112U, 187301824U, - 187429184U, 187562048U, 187693504U, 187825472U, 187957184U, 188087104U, - 188218304U, 188349376U, 188481344U, 188609728U, 188743616U, 188874304U, - 189005248U, 189136448U, 189265088U, 189396544U, 189528128U, 189660992U, - 189791936U, 189923264U, 190054208U, 190182848U, 190315072U, 190447424U, - 190577984U, 190709312U, 190840768U, 190971328U, 191102656U, 191233472U, - 191364032U, 191495872U, 191626816U, 191758016U, 191888192U, 192020288U, - 192148928U, 192282176U, 192413504U, 192542528U, 192674752U, 192805952U, - 192937792U, 193068608U, 193198912U, 193330496U, 193462208U, 193592384U, - 193723456U, 193854272U, 193985984U, 194116672U, 194247232U, 194379712U, - 194508352U, 194641856U, 194772544U, 194900672U, 195035072U, 195166016U, - 195296704U, 195428032U, 195558592U, 195690304U, 195818176U, 195952576U, - 196083392U, 196214336U, 196345792U, 196476736U, 196607552U, 196739008U, - 196869952U, 197000768U, 197130688U, 197262784U, 197394368U, 197523904U, - 197656384U, 197787584U, 197916608U, 198049472U, 198180544U, 198310208U, - 198442432U, 198573632U, 198705088U, 198834368U, 198967232U, 199097792U, - 199228352U, 199360192U, 199491392U, 199621696U, 199751744U, 199883968U, - 200014016U, 200146624U, 200276672U, 200408128U, 200540096U, 200671168U, - 200801984U, 200933312U, 201062464U, 201194944U, 201326144U, 201457472U, - 201588544U, 201719744U, 201850816U, 201981632U, 202111552U, 202244032U, - 202374464U, 202505152U, 202636352U, 202767808U, 202898368U, 203030336U, - 203159872U, 203292608U, 203423296U, 203553472U, 203685824U, 203816896U, - 203947712U, 204078272U, 204208192U, 204341056U, 204472256U, 204603328U, - 204733888U, 204864448U, 204996544U, 205125568U, 205258304U, 205388864U, - 205517632U, 205650112U, 205782208U, 205913536U, 206044736U, 206176192U, - 206307008U, 206434496U, 206569024U, 206700224U, 206831168U, 206961856U, - 207093056U, 207223616U, 207355328U, 207486784U, 207616832U, 207749056U, - 207879104U, 208010048U, 208141888U, 208273216U, 208404032U, 208534336U, - 208666048U, 208796864U, 208927424U, 209059264U, 209189824U, 209321792U, - 209451584U, 209582656U, 209715136U, 209845568U, 209976896U, 210106432U, - 210239296U, 210370112U, 210501568U, 210630976U, 210763712U, 210894272U, - 211024832U, 211156672U, 211287616U, 211418176U, 211549376U, 211679296U, - 211812032U, 211942592U, 212074432U, 212204864U, 212334016U, 212467648U, - 212597824U, 212727616U, 212860352U, 212991424U, 213120832U, 213253952U, - 213385024U, 213515584U, 213645632U, 213777728U, 213909184U, 214040128U, - 214170688U, 214302656U, 214433728U, 214564544U, 214695232U, 214826048U, - 214956992U, 215089088U, 215219776U, 215350592U, 215482304U, 215613248U, - 215743552U, 215874752U, 216005312U, 216137024U, 216267328U, 216399296U, - 216530752U, 216661696U, 216790592U, 216923968U, 217054528U, 217183168U, - 217316672U, 217448128U, 217579072U, 217709504U, 217838912U, 217972672U, - 218102848U, 218233024U, 218364736U, 218496832U, 218627776U, 218759104U, - 218888896U, 219021248U, 219151936U, 219281728U, 219413056U, 219545024U, - 219675968U, 219807296U, 219938624U, 220069312U, 220200128U, 220331456U, - 220461632U, 220592704U, 220725184U, 220855744U, 220987072U, 221117888U, - 221249216U, 221378368U, 221510336U, 221642048U, 221772736U, 221904832U, - 222031808U, 222166976U, 222297536U, 222428992U, 222559936U, 222690368U, - 222820672U, 222953152U, 223083968U, 223213376U, 223345984U, 223476928U, - 223608512U, 223738688U, 223869376U, 224001472U, 224132672U, 224262848U, - 224394944U, 224524864U, 224657344U, 224788288U, 224919488U, 225050432U, - 225181504U, 225312704U, 225443776U, 225574592U, 225704768U, 225834176U, - 225966784U, 226097216U, 226229824U, 226360384U, 226491712U, 226623424U, - 226754368U, 226885312U, 227015104U, 227147456U, 227278528U, 227409472U, - 227539904U, 227669696U, 227802944U, 227932352U, 228065216U, 228196288U, - 228326464U, 228457792U, 228588736U, 228720064U, 228850112U, 228981056U, - 229113152U, 229243328U, 229375936U, 229505344U, 229636928U, 229769152U, - 229894976U, 230030272U, 230162368U, 230292416U, 230424512U, 230553152U, - 230684864U, 230816704U, 230948416U, 231079616U, 231210944U, 231342016U, - 231472448U, 231603776U, 231733952U, 231866176U, 231996736U, 232127296U, - 232259392U, 232388672U, 232521664U, 232652608U, 232782272U, 232914496U, - 233043904U, 233175616U, 233306816U, 233438528U, 233569984U, 233699776U, - 233830592U, 233962688U, 234092224U, 234221888U, 234353984U, 234485312U, - 234618304U, 234749888U, 234880832U, 235011776U, 235142464U, 235274048U, - 235403456U, 235535936U, 235667392U, 235797568U, 235928768U, 236057152U, - 236190272U, 236322752U, 236453312U, 236583616U, 236715712U, 236846528U, - 236976448U, 237108544U, 237239104U, 237371072U, 237501632U, 237630784U, - 237764416U, 237895232U, 238026688U, 238157632U, 238286912U, 238419392U, - 238548032U, 238681024U, 238812608U, 238941632U, 239075008U, 239206336U, - 239335232U, 239466944U, 239599168U, 239730496U, 239861312U, 239992384U, - 240122816U, 240254656U, 240385856U, 240516928U, 240647872U, 240779072U, - 240909632U, 241040704U, 241171904U, 241302848U, 241433408U, 241565248U, - 241696192U, 241825984U, 241958848U, 242088256U, 242220224U, 242352064U, - 242481856U, 242611648U, 242744896U, 242876224U, 243005632U, 243138496U, - 243268672U, 243400384U, 243531712U, 243662656U, 243793856U, 243924544U, - 244054592U, 244187072U, 244316608U, 244448704U, 244580032U, 244710976U, - 244841536U, 244972864U, 245104448U, 245233984U, 245365312U, 245497792U, - 245628736U, 245759936U, 245889856U, 246021056U, 246152512U, 246284224U, - 246415168U, 246545344U, 246675904U, 246808384U, 246939584U, 247070144U, - 247199552U, 247331648U, 247463872U, 247593536U, 247726016U, 247857088U, - 247987648U, 248116928U, 248249536U, 248380736U, 248512064U, 248643008U, - 248773312U, 248901056U, 249036608U, 249167552U, 249298624U, 249429184U, - 249560512U, 249692096U, 249822784U, 249954112U, 250085312U, 250215488U, - 250345792U, 250478528U, 250608704U, 250739264U, 250870976U, 251002816U, - 251133632U, 251263552U, 251395136U, 251523904U, 251657792U, 251789248U, - 251919424U, 252051392U, 252182464U, 252313408U, 252444224U, 252575552U, - 252706624U, 252836032U, 252968512U, 253099712U, 253227584U, 253361728U, - 253493056U, 253623488U, 253754432U, 253885504U, 254017216U, 254148032U, - 254279488U, 254410432U, 254541376U, 254672576U, 254803264U, 254933824U, - 255065792U, 255196736U, 255326528U, 255458752U, 255589952U, 255721408U, - 255851072U, 255983296U, 256114624U, 256244416U, 256374208U, 256507712U, - 256636096U, 256768832U, 256900544U, 257031616U, 257162176U, 257294272U, - 257424448U, 257555776U, 257686976U, 257818432U, 257949632U, 258079552U, - 258211136U, 258342464U, 258473408U, 258603712U, 258734656U, 258867008U, - 258996544U, 259127744U, 259260224U, 259391296U, 259522112U, 259651904U, - 259784384U, 259915328U, 260045888U, 260175424U, 260308544U, 260438336U, - 260570944U, 260700992U, 260832448U, 260963776U, 261092672U, 261226304U, - 261356864U, 261487936U, 261619648U, 261750592U, 261879872U, 262011968U, - 262143424U, 262274752U, 262404416U, 262537024U, 262667968U, 262799296U, - 262928704U, 263061184U, 263191744U, 263322944U, 263454656U, 263585216U, - 263716672U, 263847872U, 263978944U, 264108608U, 264241088U, 264371648U, - 264501184U, 264632768U, 264764096U, 264895936U, 265024576U, 265158464U, - 265287488U, 265418432U, 265550528U, 265681216U, 265813312U, 265943488U, - 266075968U, 266206144U, 266337728U, 266468032U, 266600384U, 266731072U, - 266862272U, 266993344U, 267124288U, 267255616U, 267386432U, 267516992U, - 267648704U, 267777728U, 267910592U, 268040512U, 268172096U, 268302784U, - 268435264U, 268566208U, 268696256U, 268828096U, 268959296U, 269090368U, - 269221312U, 269352256U, 269482688U, 269614784U, 269745856U, 269876416U, - 270007616U, 270139328U, 270270272U, 270401216U, 270531904U, 270663616U, - 270791744U, 270924736U, 271056832U, 271186112U, 271317184U, 271449536U, - 271580992U, 271711936U, 271843136U, 271973056U, 272105408U, 272236352U, - 272367296U, 272498368U, 272629568U, 272759488U, 272891456U, 273022784U, - 273153856U, 273284672U, 273415616U, 273547072U, 273677632U, 273808448U, - 273937088U, 274071488U, 274200896U, 274332992U, 274463296U, 274595392U, - 274726208U, 274857536U, 274988992U, 275118656U, 275250496U, 275382208U, - 275513024U, 275643968U, 275775296U, 275906368U, 276037184U, 276167872U, - 276297664U, 276429376U, 276560576U, 276692672U, 276822976U, 276955072U, - 277085632U, 277216832U, 277347008U, 277478848U, 277609664U, 277740992U, - 277868608U, 278002624U, 278134336U, 278265536U, 278395328U, 278526784U, - 278657728U, 278789824U, 278921152U, 279052096U, 279182912U, 279313088U, - 279443776U, 279576256U, 279706048U, 279838528U, 279969728U, 280099648U, - 280230976U, 280361408U, 280493632U, 280622528U, 280755392U, 280887104U, - 281018176U, 281147968U, 281278912U, 281411392U, 281542592U, 281673152U, - 281803712U, 281935552U, 282066496U, 282197312U, 282329024U, 282458816U, - 282590272U, 282720832U, 282853184U, 282983744U, 283115072U, 283246144U, - 283377344U, 283508416U, 283639744U, 283770304U, 283901504U, 284032576U, - 284163136U, 284294848U, 284426176U, 284556992U, 284687296U, 284819264U, - 284950208U, 285081536U -}; - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/endian.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/endian.h deleted file mode 100644 index 5b8abf03d3097..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/endian.h +++ /dev/null @@ -1,78 +0,0 @@ -#pragma once - -#include -#include "compiler.h" - -#if defined(__MINGW32__) || defined(_WIN32) - # define LITTLE_ENDIAN 1234 - # define BYTE_ORDER LITTLE_ENDIAN -#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__) - # include -#elif defined(__OpenBSD__) || defined(__SVR4) - # include -#elif defined(__APPLE__) -# include -#elif defined( BSD ) && (BSD >= 199103) - # include -#elif defined( __QNXNTO__ ) && defined( __LITTLEENDIAN__ ) - # define LITTLE_ENDIAN 1234 - # define BYTE_ORDER LITTLE_ENDIAN -#elif defined( __QNXNTO__ ) && defined( __BIGENDIAN__ ) - # define BIG_ENDIAN 1234 - # define BYTE_ORDER BIG_ENDIAN -#else -# include -#endif - -#if defined(_WIN32) -#include -#define ethash_swap_u32(input_) _byteswap_ulong(input_) -#define ethash_swap_u64(input_) _byteswap_uint64(input_) -#elif defined(__APPLE__) -#include -#define ethash_swap_u32(input_) OSSwapInt32(input_) -#define ethash_swap_u64(input_) OSSwapInt64(input_) -#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__) -#define ethash_swap_u32(input_) bswap32(input_) -#define ethash_swap_u64(input_) bswap64(input_) -#elif defined(__OpenBSD__) -#include -#define ethash_swap_u32(input_) swap32(input_) -#define ethash_swap_u64(input_) swap64(input_) -#else // posix -#include -#define ethash_swap_u32(input_) bswap_32(input_) -#define ethash_swap_u64(input_) bswap_64(input_) -#endif - - -#if LITTLE_ENDIAN == BYTE_ORDER - -#define fix_endian32(dst_ ,src_) dst_ = src_ -#define fix_endian32_same(val_) -#define fix_endian64(dst_, src_) dst_ = src_ -#define fix_endian64_same(val_) -#define fix_endian_arr32(arr_, size_) -#define fix_endian_arr64(arr_, size_) - -#elif BIG_ENDIAN == BYTE_ORDER - -#define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_) -#define fix_endian32_same(val_) val_ = ethash_swap_u32(val_) -#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_) -#define fix_endian64_same(val_) val_ = ethash_swap_u64(val_) -#define fix_endian_arr32(arr_, size_) \ - do { \ - for (unsigned i_ = 0; i_ < (size_); ++i_) { \ - arr_[i_] = ethash_swap_u32(arr_[i_]); \ - } \ - } while (0) -#define fix_endian_arr64(arr_, size_) \ - do { \ - for (unsigned i_ = 0; i_ < (size_); ++i_) { \ - arr_[i_] = ethash_swap_u64(arr_[i_]); \ - } \ - } while (0) -#else -# error "endian not supported" -#endif // BYTE_ORDER diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/ethash.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/ethash.h deleted file mode 100644 index 0c6a1f9e90a93..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/ethash.h +++ /dev/null @@ -1,147 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ - -/** @file ethash.h -* @date 2015 -*/ -#pragma once - -#include -#include -#include -#include -#include "compiler.h" - -#define ETHASH_REVISION 23 -#define ETHASH_DATASET_BYTES_INIT 1073741824U // 2**30 -#define ETHASH_DATASET_BYTES_GROWTH 8388608U // 2**23 -#define ETHASH_CACHE_BYTES_INIT 1073741824U // 2**24 -#define ETHASH_CACHE_BYTES_GROWTH 131072U // 2**17 -#define ETHASH_EPOCH_LENGTH 30000U -#define ETHASH_MIX_BYTES 128 -#define ETHASH_HASH_BYTES 64 -#define ETHASH_DATASET_PARENTS 256 -#define ETHASH_CACHE_ROUNDS 3 -#define ETHASH_ACCESSES 64 -#define ETHASH_DAG_MAGIC_NUM_SIZE 8 -#define ETHASH_DAG_MAGIC_NUM 0xFEE1DEADBADDCAFE - -#ifdef __cplusplus -extern "C" { -#endif - -/// Type of a seedhash/blockhash e.t.c. -typedef struct ethash_h256 { uint8_t b[32]; } ethash_h256_t; - -// convenience macro to statically initialize an h256_t -// usage: -// ethash_h256_t a = ethash_h256_static_init(1, 2, 3, ... ) -// have to provide all 32 values. If you don't provide all the rest -// will simply be unitialized (not guranteed to be 0) -#define ethash_h256_static_init(...) \ - { {__VA_ARGS__} } - -struct ethash_light; -typedef struct ethash_light* ethash_light_t; -struct ethash_full; -typedef struct ethash_full* ethash_full_t; -typedef int(*ethash_callback_t)(unsigned); - -typedef struct ethash_return_value { - ethash_h256_t result; - ethash_h256_t mix_hash; - bool success; -} ethash_return_value_t; - -/** - * Allocate and initialize a new ethash_light handler - * - * @param block_number The block number for which to create the handler - * @return Newly allocated ethash_light handler or NULL in case of - * ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes() - */ -ethash_light_t ethash_light_new(uint64_t block_number); -/** - * Frees a previously allocated ethash_light handler - * @param light The light handler to free - */ -void ethash_light_delete(ethash_light_t light); -/** - * Calculate the light client data - * - * @param light The light client handler - * @param header_hash The header hash to pack into the mix - * @param nonce The nonce to pack into the mix - * @return an object of ethash_return_value_t holding the return values - */ -ethash_return_value_t ethash_light_compute( - ethash_light_t light, - ethash_h256_t const header_hash, - uint64_t nonce -); - -/** - * Allocate and initialize a new ethash_full handler - * - * @param light The light handler containing the cache. - * @param callback A callback function with signature of @ref ethash_callback_t - * It accepts an unsigned with which a progress of DAG calculation - * can be displayed. If all goes well the callback should return 0. - * If a non-zero value is returned then DAG generation will stop. - * Be advised. A progress value of 100 means that DAG creation is - * almost complete and that this function will soon return succesfully. - * It does not mean that the function has already had a succesfull return. - * @return Newly allocated ethash_full handler or NULL in case of - * ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data() - */ -ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback); - -/** - * Frees a previously allocated ethash_full handler - * @param full The light handler to free - */ -void ethash_full_delete(ethash_full_t full); -/** - * Calculate the full client data - * - * @param full The full client handler - * @param header_hash The header hash to pack into the mix - * @param nonce The nonce to pack into the mix - * @return An object of ethash_return_value to hold the return value - */ -ethash_return_value_t ethash_full_compute( - ethash_full_t full, - ethash_h256_t const header_hash, - uint64_t nonce -); -/** - * Get a pointer to the full DAG data - */ -void const* ethash_full_dag(ethash_full_t full); -/** - * Get the size of the DAG data - */ -uint64_t ethash_full_dag_size(ethash_full_t full); - -/** - * Calculate the seedhash for a given block number - */ -ethash_h256_t ethash_get_seedhash(uint64_t block_number); - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/fnv.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/fnv.h deleted file mode 100644 index e11090f6ac450..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/fnv.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - This file is part of cpp-expanse. - - cpp-expanse is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - cpp-expanse is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with cpp-expanse. If not, see . -*/ -/** @file fnv.h -* @author Matthew Wampler-Doty -* @date 2015 -*/ - -#pragma once -#include -#include "compiler.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define FNV_PRIME 0x01000193 - -/* The FNV-1 spec multiplies the prime with the input one byte (octet) in turn. - We instead multiply it with the full 32-bit input. - This gives a different result compared to a canonical FNV-1 implementation. -*/ -static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y) -{ - return x * FNV_PRIME ^ y; -} - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/internal.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/internal.c deleted file mode 100644 index b609509bae3e3..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/internal.c +++ /dev/null @@ -1,507 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with cpp-expanse. If not, see . -*/ -/** @file internal.c -* @author Tim Hughes -* @author Matthew Wampler-Doty -* @date 2015 -*/ - -#include -#include -#include -#include -#include -#include "mmap.h" -#include "ethash.h" -#include "fnv.h" -#include "endian.h" -#include "internal.h" -#include "data_sizes.h" -#include "io.h" - -#ifdef WITH_CRYPTOPP - -#include "sha3_cryptopp.h" - -#else -#include "sha3.h" -#endif // WITH_CRYPTOPP - -uint64_t ethash_get_datasize(uint64_t const block_number) -{ - assert(block_number / ETHASH_EPOCH_LENGTH < 2048); - return dag_sizes[block_number / ETHASH_EPOCH_LENGTH]; -} - -uint64_t ethash_get_cachesize(uint64_t const block_number) -{ - assert(block_number / ETHASH_EPOCH_LENGTH < 2048); - return cache_sizes[block_number / ETHASH_EPOCH_LENGTH]; -} - -// Follows Sergio's "STRICT MEMORY HARD HASHING FUNCTIONS" (2014) -// https://bitslog.files.wordpress.com/2013/12/memohash-v0-3.pdf -// SeqMemoHash(s, R, N) -bool static ethash_compute_cache_nodes( - node* const nodes, - uint64_t cache_size, - ethash_h256_t const* seed -) -{ - if (cache_size % sizeof(node) != 0) { - return false; - } - uint32_t const num_nodes = (uint32_t) (cache_size / sizeof(node)); - - SHA3_512(nodes[0].bytes, (uint8_t*)seed, 32); - - for (uint32_t i = 1; i != num_nodes; ++i) { - SHA3_512(nodes[i].bytes, nodes[i - 1].bytes, 64); - } - - for (uint32_t j = 0; j != ETHASH_CACHE_ROUNDS; j++) { - for (uint32_t i = 0; i != num_nodes; i++) { - uint32_t const idx = nodes[i].words[0] % num_nodes; - node data; - data = nodes[(num_nodes - 1 + i) % num_nodes]; - for (uint32_t w = 0; w != NODE_WORDS; ++w) { - data.words[w] ^= nodes[idx].words[w]; - } - SHA3_512(nodes[i].bytes, data.bytes, sizeof(data)); - } - } - - // now perform endian conversion - fix_endian_arr32(nodes->words, num_nodes * NODE_WORDS); - return true; -} - -void ethash_calculate_dag_item( - node* const ret, - uint32_t node_index, - ethash_light_t const light -) -{ - uint32_t num_parent_nodes = (uint32_t) (light->cache_size / sizeof(node)); - node const* cache_nodes = (node const *) light->cache; - node const* init = &cache_nodes[node_index % num_parent_nodes]; - memcpy(ret, init, sizeof(node)); - ret->words[0] ^= node_index; - SHA3_512(ret->bytes, ret->bytes, sizeof(node)); -#if defined(_M_X64) && ENABLE_SSE - __m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME); - __m128i xmm0 = ret->xmm[0]; - __m128i xmm1 = ret->xmm[1]; - __m128i xmm2 = ret->xmm[2]; - __m128i xmm3 = ret->xmm[3]; -#endif - - for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) { - uint32_t parent_index = fnv_hash(node_index ^ i, ret->words[i % NODE_WORDS]) % num_parent_nodes; - node const *parent = &cache_nodes[parent_index]; - -#if defined(_M_X64) && ENABLE_SSE - { - xmm0 = _mm_mullo_epi32(xmm0, fnv_prime); - xmm1 = _mm_mullo_epi32(xmm1, fnv_prime); - xmm2 = _mm_mullo_epi32(xmm2, fnv_prime); - xmm3 = _mm_mullo_epi32(xmm3, fnv_prime); - xmm0 = _mm_xor_si128(xmm0, parent->xmm[0]); - xmm1 = _mm_xor_si128(xmm1, parent->xmm[1]); - xmm2 = _mm_xor_si128(xmm2, parent->xmm[2]); - xmm3 = _mm_xor_si128(xmm3, parent->xmm[3]); - - // have to write to ret as values are used to compute index - ret->xmm[0] = xmm0; - ret->xmm[1] = xmm1; - ret->xmm[2] = xmm2; - ret->xmm[3] = xmm3; - } - #else - { - for (unsigned w = 0; w != NODE_WORDS; ++w) { - ret->words[w] = fnv_hash(ret->words[w], parent->words[w]); - } - } -#endif - } - SHA3_512(ret->bytes, ret->bytes, sizeof(node)); -} - -bool ethash_compute_full_data( - void* mem, - uint64_t full_size, - ethash_light_t const light, - ethash_callback_t callback -) -{ - if (full_size % (sizeof(uint32_t) * MIX_WORDS) != 0 || - (full_size % sizeof(node)) != 0) { - return false; - } - uint32_t const max_n = (uint32_t)(full_size / sizeof(node)); - node* full_nodes = mem; - double const progress_change = 1.0f / max_n; - double progress = 0.0f; - // now compute full nodes - for (uint32_t n = 0; n != max_n; ++n) { - if (callback && - n % (max_n / 100) == 0 && - callback((unsigned int)(ceil(progress * 100.0f))) != 0) { - - return false; - } - progress += progress_change; - ethash_calculate_dag_item(&(full_nodes[n]), n, light); - } - return true; -} - -static bool ethash_hash( - ethash_return_value_t* ret, - node const* full_nodes, - ethash_light_t const light, - uint64_t full_size, - ethash_h256_t const header_hash, - uint64_t const nonce -) -{ - if (full_size % MIX_WORDS != 0) { - return false; - } - - // pack hash and nonce togexper into first 40 bytes of s_mix - assert(sizeof(node) * 8 == 512); - node s_mix[MIX_NODES + 1]; - memcpy(s_mix[0].bytes, &header_hash, 32); - fix_endian64(s_mix[0].double_words[4], nonce); - - // compute sha3-512 hash and replicate across mix - SHA3_512(s_mix->bytes, s_mix->bytes, 40); - fix_endian_arr32(s_mix[0].words, 16); - - node* const mix = s_mix + 1; - for (uint32_t w = 0; w != MIX_WORDS; ++w) { - mix->words[w] = s_mix[0].words[w % NODE_WORDS]; - } - - unsigned const page_size = sizeof(uint32_t) * MIX_WORDS; - unsigned const num_full_pages = (unsigned) (full_size / page_size); - - for (unsigned i = 0; i != ETHASH_ACCESSES; ++i) { - uint32_t const index = fnv_hash(s_mix->words[0] ^ i, mix->words[i % MIX_WORDS]) % num_full_pages; - - for (unsigned n = 0; n != MIX_NODES; ++n) { - node const* dag_node; - if (full_nodes) { - dag_node = &full_nodes[MIX_NODES * index + n]; - } else { - node tmp_node; - ethash_calculate_dag_item(&tmp_node, index * MIX_NODES + n, light); - dag_node = &tmp_node; - } - -#if defined(_M_X64) && ENABLE_SSE - { - __m128i fnv_prime = _mm_set1_epi32(FNV_PRIME); - __m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]); - __m128i xmm1 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[1]); - __m128i xmm2 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[2]); - __m128i xmm3 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[3]); - mix[n].xmm[0] = _mm_xor_si128(xmm0, dag_node->xmm[0]); - mix[n].xmm[1] = _mm_xor_si128(xmm1, dag_node->xmm[1]); - mix[n].xmm[2] = _mm_xor_si128(xmm2, dag_node->xmm[2]); - mix[n].xmm[3] = _mm_xor_si128(xmm3, dag_node->xmm[3]); - } - #else - { - for (unsigned w = 0; w != NODE_WORDS; ++w) { - mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]); - } - } -#endif - } - - } - - // compress mix - for (uint32_t w = 0; w != MIX_WORDS; w += 4) { - uint32_t reduction = mix->words[w + 0]; - reduction = reduction * FNV_PRIME ^ mix->words[w + 1]; - reduction = reduction * FNV_PRIME ^ mix->words[w + 2]; - reduction = reduction * FNV_PRIME ^ mix->words[w + 3]; - mix->words[w / 4] = reduction; - } - - fix_endian_arr32(mix->words, MIX_WORDS / 4); - memcpy(&ret->mix_hash, mix->bytes, 32); - // final Keccak hash - SHA3_256(&ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix) - return true; -} - -void ethash_quick_hash( - ethash_h256_t* return_hash, - ethash_h256_t const* header_hash, - uint64_t nonce, - ethash_h256_t const* mix_hash -) -{ - uint8_t buf[64 + 32]; - memcpy(buf, header_hash, 32); - fix_endian64_same(nonce); - memcpy(&(buf[32]), &nonce, 8); - SHA3_512(buf, buf, 40); - memcpy(&(buf[64]), mix_hash, 32); - SHA3_256(return_hash, buf, 64 + 32); -} - -ethash_h256_t ethash_get_seedhash(uint64_t block_number) -{ - ethash_h256_t ret; - ethash_h256_reset(&ret); - uint64_t const epochs = block_number / ETHASH_EPOCH_LENGTH; - for (uint32_t i = 0; i < epochs; ++i) - SHA3_256(&ret, (uint8_t*)&ret, 32); - return ret; -} - -bool ethash_quick_check_difficulty( - ethash_h256_t const* header_hash, - uint64_t const nonce, - ethash_h256_t const* mix_hash, - ethash_h256_t const* boundary -) -{ - - ethash_h256_t return_hash; - ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash); - return ethash_check_difficulty(&return_hash, boundary); -} - -ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed) -{ - struct ethash_light *ret; - ret = calloc(sizeof(*ret), 1); - if (!ret) { - return NULL; - } - ret->cache = malloc((size_t)cache_size); - if (!ret->cache) { - goto fail_free_light; - } - node* nodes = (node*)ret->cache; - if (!ethash_compute_cache_nodes(nodes, cache_size, seed)) { - goto fail_free_cache_mem; - } - ret->cache_size = cache_size; - return ret; - -fail_free_cache_mem: - free(ret->cache); -fail_free_light: - free(ret); - return NULL; -} - -ethash_light_t ethash_light_new(uint64_t block_number) -{ - ethash_h256_t seedhash = ethash_get_seedhash(block_number); - ethash_light_t ret; - ret = ethash_light_new_internal(ethash_get_cachesize(block_number), &seedhash); - ret->block_number = block_number; - return ret; -} - -void ethash_light_delete(ethash_light_t light) -{ - if (light->cache) { - free(light->cache); - } - free(light); -} - -ethash_return_value_t ethash_light_compute_internal( - ethash_light_t light, - uint64_t full_size, - ethash_h256_t const header_hash, - uint64_t nonce -) -{ - ethash_return_value_t ret; - ret.success = true; - if (!ethash_hash(&ret, NULL, light, full_size, header_hash, nonce)) { - ret.success = false; - } - return ret; -} - -ethash_return_value_t ethash_light_compute( - ethash_light_t light, - ethash_h256_t const header_hash, - uint64_t nonce -) -{ - uint64_t full_size = ethash_get_datasize(light->block_number); - return ethash_light_compute_internal(light, full_size, header_hash, nonce); -} - -static bool ethash_mmap(struct ethash_full* ret, FILE* f) -{ - int fd; - char* mmapped_data; - errno = 0; - ret->file = f; - if ((fd = ethash_fileno(ret->file)) == -1) { - return false; - } - mmapped_data= mmap( - NULL, - (size_t)ret->file_size + ETHASH_DAG_MAGIC_NUM_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fd, - 0 - ); - if (mmapped_data == MAP_FAILED) { - return false; - } - ret->data = (node*)(mmapped_data + ETHASH_DAG_MAGIC_NUM_SIZE); - return true; -} - -ethash_full_t ethash_full_new_internal( - char const* dirname, - ethash_h256_t const seed_hash, - uint64_t full_size, - ethash_light_t const light, - ethash_callback_t callback -) -{ - struct ethash_full* ret; - FILE *f = NULL; - ret = calloc(sizeof(*ret), 1); - if (!ret) { - return NULL; - } - ret->file_size = (size_t)full_size; - switch (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, false)) { - case ETHASH_IO_FAIL: - // ethash_io_prepare will do all ETHASH_CRITICAL() logging in fail case - goto fail_free_full; - case ETHASH_IO_MEMO_MATCH: - if (!ethash_mmap(ret, f)) { - ETHASH_CRITICAL("mmap failure()"); - goto fail_close_file; - } - return ret; - case ETHASH_IO_MEMO_SIZE_MISMATCH: - // if a DAG of same filename but unexpected size is found, silently force new file creation - if (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, true) != ETHASH_IO_MEMO_MISMATCH) { - ETHASH_CRITICAL("Could not recreate DAG file after finding existing DAG with unexpected size."); - goto fail_free_full; - } - // fallthrough to the mismatch case here, DO NOT go through match - case ETHASH_IO_MEMO_MISMATCH: - if (!ethash_mmap(ret, f)) { - ETHASH_CRITICAL("mmap failure()"); - goto fail_close_file; - } - break; - } - - if (!ethash_compute_full_data(ret->data, full_size, light, callback)) { - ETHASH_CRITICAL("Failure at computing DAG data."); - goto fail_free_full_data; - } - - // after the DAG has been filled then we finalize it by writting the magic number at the beginning - if (fseek(f, 0, SEEK_SET) != 0) { - ETHASH_CRITICAL("Could not seek to DAG file start to write magic number."); - goto fail_free_full_data; - } - uint64_t const magic_num = ETHASH_DAG_MAGIC_NUM; - if (fwrite(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) { - ETHASH_CRITICAL("Could not write magic number to DAG's beginning."); - goto fail_free_full_data; - } - if (fflush(f) != 0) {// make sure the magic number IS there - ETHASH_CRITICAL("Could not flush memory mapped data to DAG file. Insufficient space?"); - goto fail_free_full_data; - } - return ret; - -fail_free_full_data: - // could check that munmap(..) == 0 but even if it did not can't really do anything here - munmap(ret->data, (size_t)full_size); -fail_close_file: - fclose(ret->file); -fail_free_full: - free(ret); - return NULL; -} - -ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback) -{ - char strbuf[256]; - if (!ethash_get_default_dirname(strbuf, 256)) { - return NULL; - } - uint64_t full_size = ethash_get_datasize(light->block_number); - ethash_h256_t seedhash = ethash_get_seedhash(light->block_number); - return ethash_full_new_internal(strbuf, seedhash, full_size, light, callback); -} - -void ethash_full_delete(ethash_full_t full) -{ - // could check that munmap(..) == 0 but even if it did not can't really do anything here - munmap(full->data, (size_t)full->file_size); - if (full->file) { - fclose(full->file); - } - free(full); -} - -ethash_return_value_t ethash_full_compute( - ethash_full_t full, - ethash_h256_t const header_hash, - uint64_t nonce -) -{ - ethash_return_value_t ret; - ret.success = true; - if (!ethash_hash( - &ret, - (node const*)full->data, - NULL, - full->file_size, - header_hash, - nonce)) { - ret.success = false; - } - return ret; -} - -void const* ethash_full_dag(ethash_full_t full) -{ - return full->data; -} - -uint64_t ethash_full_dag_size(ethash_full_t full) -{ - return full->file_size; -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/internal.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/internal.h deleted file mode 100644 index 26c395ad6f0e9..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/internal.h +++ /dev/null @@ -1,179 +0,0 @@ -#pragma once -#include "compiler.h" -#include "endian.h" -#include "ethash.h" -#include - -#define ENABLE_SSE 0 - -#if defined(_M_X64) && ENABLE_SSE -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -// compile time settings -#define NODE_WORDS (64/4) -#define MIX_WORDS (ETHASH_MIX_BYTES/4) -#define MIX_NODES (MIX_WORDS / NODE_WORDS) -#include - -typedef union node { - uint8_t bytes[NODE_WORDS * 4]; - uint32_t words[NODE_WORDS]; - uint64_t double_words[NODE_WORDS / 2]; - -#if defined(_M_X64) && ENABLE_SSE - __m128i xmm[NODE_WORDS/4]; -#endif - -} node; - -static inline uint8_t ethash_h256_get(ethash_h256_t const* hash, unsigned int i) -{ - return hash->b[i]; -} - -static inline void ethash_h256_set(ethash_h256_t* hash, unsigned int i, uint8_t v) -{ - hash->b[i] = v; -} - -static inline void ethash_h256_reset(ethash_h256_t* hash) -{ - memset(hash, 0, 32); -} - -// Returns if hash is less than or equal to boundary (2^256/difficulty) -static inline bool ethash_check_difficulty( - ethash_h256_t const* hash, - ethash_h256_t const* boundary -) -{ - // Boundary is big endian - for (int i = 0; i < 32; i++) { - if (ethash_h256_get(hash, i) == ethash_h256_get(boundary, i)) { - continue; - } - return ethash_h256_get(hash, i) < ethash_h256_get(boundary, i); - } - return true; -} - -/** - * Difficulty quick check for POW preverification - * - * @param header_hash The hash of the header - * @param nonce The block's nonce - * @param mix_hash The mix digest hash - * @param boundary The boundary is defined as (2^256 / difficulty) - * @return true for succesful pre-verification and false otherwise - */ -bool ethash_quick_check_difficulty( - ethash_h256_t const* header_hash, - uint64_t const nonce, - ethash_h256_t const* mix_hash, - ethash_h256_t const* boundary -); - -struct ethash_light { - void* cache; - uint64_t cache_size; - uint64_t block_number; -}; - -/** - * Allocate and initialize a new ethash_light handler. Internal version - * - * @param cache_size The size of the cache in bytes - * @param seed Block seedhash to be used during the computation of the - * cache nodes - * @return Newly allocated ethash_light handler or NULL in case of - * ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes() - */ -ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed); - -/** - * Calculate the light client data. Internal version. - * - * @param light The light client handler - * @param full_size The size of the full data in bytes. - * @param header_hash The header hash to pack into the mix - * @param nonce The nonce to pack into the mix - * @return The resulting hash. - */ -ethash_return_value_t ethash_light_compute_internal( - ethash_light_t light, - uint64_t full_size, - ethash_h256_t const header_hash, - uint64_t nonce -); - -struct ethash_full { - FILE* file; - uint64_t file_size; - node* data; -}; - -/** - * Allocate and initialize a new ethash_full handler. Internal version. - * - * @param dirname The directory in which to put the DAG file. - * @param seedhash The seed hash of the block. Used in the DAG file naming. - * @param full_size The size of the full data in bytes. - * @param cache A cache object to use that was allocated with @ref ethash_cache_new(). - * Iff this function succeeds the ethash_full_t will take memory - * memory ownership of the cache and free it at deletion. If - * not then the user still has to handle freeing of the cache himself. - * @param callback A callback function with signature of @ref ethash_callback_t - * It accepts an unsigned with which a progress of DAG calculation - * can be displayed. If all goes well the callback should return 0. - * If a non-zero value is returned then DAG generation will stop. - * @return Newly allocated ethash_full handler or NULL in case of - * ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data() - */ -ethash_full_t ethash_full_new_internal( - char const* dirname, - ethash_h256_t const seed_hash, - uint64_t full_size, - ethash_light_t const light, - ethash_callback_t callback -); - -void ethash_calculate_dag_item( - node* const ret, - uint32_t node_index, - ethash_light_t const cache -); - -void ethash_quick_hash( - ethash_h256_t* return_hash, - ethash_h256_t const* header_hash, - const uint64_t nonce, - ethash_h256_t const* mix_hash -); - -uint64_t ethash_get_datasize(uint64_t const block_number); -uint64_t ethash_get_cachesize(uint64_t const block_number); - -/** - * Compute the memory data for a full node's memory - * - * @param mem A pointer to an ethash full's memory - * @param full_size The size of the full data in bytes - * @param cache A cache object to use in the calculation - * @param callback The callback function. Check @ref ethash_full_new() for details. - * @return true if all went fine and false for invalid parameters - */ -bool ethash_compute_full_data( - void* mem, - uint64_t full_size, - ethash_light_t const light, - ethash_callback_t callback -); - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io.c deleted file mode 100644 index f4db477c200b0..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ -/** @file io.c - * @author Lefteris Karapetsas - * @date 2015 - */ -#include "io.h" -#include -#include -#include - -enum ethash_io_rc ethash_io_prepare( - char const* dirname, - ethash_h256_t const seedhash, - FILE** output_file, - uint64_t file_size, - bool force_create -) -{ - char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE]; - enum ethash_io_rc ret = ETHASH_IO_FAIL; - // reset errno before io calls - errno = 0; - - // assert directory exists - if (!ethash_mkdir(dirname)) { - ETHASH_CRITICAL("Could not create the ethash directory"); - goto end; - } - - ethash_io_mutable_name(ETHASH_REVISION, &seedhash, mutable_name); - char* tmpfile = ethash_io_create_filename(dirname, mutable_name, strlen(mutable_name)); - if (!tmpfile) { - ETHASH_CRITICAL("Could not create the full DAG pathname"); - goto end; - } - - FILE *f; - if (!force_create) { - // try to open the file - f = ethash_fopen(tmpfile, "rb+"); - if (f) { - size_t found_size; - if (!ethash_file_size(f, &found_size)) { - fclose(f); - ETHASH_CRITICAL("Could not query size of DAG file: \"%s\"", tmpfile); - goto free_memo; - } - if (file_size != found_size - ETHASH_DAG_MAGIC_NUM_SIZE) { - fclose(f); - ret = ETHASH_IO_MEMO_SIZE_MISMATCH; - goto free_memo; - } - // compare the magic number, no need to care about endianess since it's local - uint64_t magic_num; - if (fread(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) { - // I/O error - fclose(f); - ETHASH_CRITICAL("Could not read from DAG file: \"%s\"", tmpfile); - ret = ETHASH_IO_MEMO_SIZE_MISMATCH; - goto free_memo; - } - if (magic_num != ETHASH_DAG_MAGIC_NUM) { - fclose(f); - ret = ETHASH_IO_MEMO_SIZE_MISMATCH; - goto free_memo; - } - ret = ETHASH_IO_MEMO_MATCH; - goto set_file; - } - } - - // file does not exist, will need to be created - f = ethash_fopen(tmpfile, "wb+"); - if (!f) { - ETHASH_CRITICAL("Could not create DAG file: \"%s\"", tmpfile); - goto free_memo; - } - // make sure it's of the proper size - if (fseek(f, (long int)(file_size + ETHASH_DAG_MAGIC_NUM_SIZE - 1), SEEK_SET) != 0) { - fclose(f); - ETHASH_CRITICAL("Could not seek to the end of DAG file: \"%s\". Insufficient space?", tmpfile); - goto free_memo; - } - if (fputc('\n', f) == EOF) { - fclose(f); - ETHASH_CRITICAL("Could not write in the end of DAG file: \"%s\". Insufficient space?", tmpfile); - goto free_memo; - } - if (fflush(f) != 0) { - fclose(f); - ETHASH_CRITICAL("Could not flush at end of DAG file: \"%s\". Insufficient space?", tmpfile); - goto free_memo; - } - ret = ETHASH_IO_MEMO_MISMATCH; - goto set_file; - - ret = ETHASH_IO_MEMO_MATCH; -set_file: - *output_file = f; -free_memo: - free(tmpfile); -end: - return ret; -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io.h deleted file mode 100644 index e9ce31ad2771b..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io.h +++ /dev/null @@ -1,202 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ -/** @file io.h - * @author Lefteris Karapetsas - * @date 2015 - */ -#pragma once -#include -#include -#include -#include -#ifdef __cplusplus -#define __STDC_FORMAT_MACROS 1 -#endif -#include -#include "endian.h" -#include "ethash.h" - -#ifdef __cplusplus -extern "C" { -#endif -// Maximum size for mutable part of DAG file name -// 6 is for "full-R", the suffix of the filename -// 10 is for maximum number of digits of a uint32_t (for REVISION) -// 1 is for - and 16 is for the first 16 hex digits for first 8 bytes of -// the seedhash and last 1 is for the null terminating character -// Reference: https://github.com/expanse-org/wiki/wiki/Ethash-DAG -#define DAG_MUTABLE_NAME_MAX_SIZE (6 + 10 + 1 + 16 + 1) -/// Possible return values of @see ethash_io_prepare -enum ethash_io_rc { - ETHASH_IO_FAIL = 0, ///< There has been an IO failure - ETHASH_IO_MEMO_SIZE_MISMATCH, ///< DAG with revision/hash match, but file size was wrong. - ETHASH_IO_MEMO_MISMATCH, ///< The DAG file did not exist or there was revision/hash mismatch - ETHASH_IO_MEMO_MATCH, ///< DAG file existed and revision/hash matched. No need to do anything -}; - -// small hack for windows. I don't feel I should use va_args and forward just -// to have this one function properly cross-platform abstracted -#if defined(_WIN32) && !defined(__GNUC__) -#define snprintf(...) sprintf_s(__VA_ARGS__) -#endif - -/** - * Logs a critical error in important parts of ethash. Should mostly help - * figure out what kind of problem (I/O, memory e.t.c.) causes a NULL - * ethash_full_t - */ -#ifdef ETHASH_PRINT_CRITICAL_OUTPUT -#define ETHASH_CRITICAL(...) \ - do \ - { \ - printf("ETHASH CRITICAL ERROR: "__VA_ARGS__); \ - printf("\n"); \ - fflush(stdout); \ - } while (0) -#else -#define ETHASH_CRITICAL(...) -#endif - -/** - * Prepares io for ethash - * - * Create the DAG directory and the DAG file if they don't exist. - * - * @param[in] dirname A null terminated c-string of the path of the ethash - * data directory. If it does not exist it's created. - * @param[in] seedhash The seedhash of the current block number, used in the - * naming of the file as can be seen from the spec at: - * https://github.com/expanse-org/wiki/wiki/Ethash-DAG - * @param[out] output_file If there was no failure then this will point to an open - * file descriptor. User is responsible for closing it. - * In the case of memo match then the file is open on read - * mode, while on the case of mismatch a new file is created - * on write mode - * @param[in] file_size The size that the DAG file should have on disk - * @param[out] force_create If true then there is no check to see if the file - * already exists - * @return For possible return values @see enum ethash_io_rc - */ -enum ethash_io_rc ethash_io_prepare( - char const* dirname, - ethash_h256_t const seedhash, - FILE** output_file, - uint64_t file_size, - bool force_create -); - -/** - * An fopen wrapper for no-warnings crossplatform fopen. - * - * Msvc compiler considers fopen to be insecure and suggests to use their - * alternative. This is a wrapper for this alternative. Another way is to - * #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does - * not sound like a good idea. - * - * @param file_name The path to the file to open - * @param mode Opening mode. Check fopen() - * @return The FILE* or NULL in failure - */ -FILE* ethash_fopen(char const* file_name, char const* mode); - -/** - * An strncat wrapper for no-warnings crossplatform strncat. - * - * Msvc compiler considers strncat to be insecure and suggests to use their - * alternative. This is a wrapper for this alternative. Another way is to - * #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does - * not sound like a good idea. - * - * @param des Destination buffer - * @param dest_size Maximum size of the destination buffer. This is the - * extra argument for the MSVC secure strncat - * @param src Souce buffer - * @param count Number of bytes to copy from source - * @return If all is well returns the dest buffer. If there is an - * error returns NULL - */ -char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count); - -/** - * A cross-platform mkdir wrapper to create a directory or assert it's there - * - * @param dirname The full path of the directory to create - * @return true if the directory was created or if it already - * existed - */ -bool ethash_mkdir(char const* dirname); - -/** - * Get a file's size - * - * @param[in] f The open file stream whose size to get - * @param[out] size Pass a size_t by reference to contain the file size - * @return true in success and false if there was a failure - */ -bool ethash_file_size(FILE* f, size_t* ret_size); - -/** - * Get a file descriptor number from a FILE stream - * - * @param f The file stream whose fd to get - * @return Platform specific fd handler - */ -int ethash_fileno(FILE* f); - -/** - * Create the filename for the DAG. - * - * @param dirname The directory name in which the DAG file should reside - * If it does not end with a directory separator it is appended. - * @param filename The actual name of the file - * @param filename_length The length of the filename in bytes - * @return A char* containing the full name. User must deallocate. - */ -char* ethash_io_create_filename( - char const* dirname, - char const* filename, - size_t filename_length -); - -/** - * Gets the default directory name for the DAG depending on the system - * - * The spec defining this directory is here: https://github.com/expanse-org/wiki/wiki/Ethash-DAG - * - * @param[out] strbuf A string buffer of sufficient size to keep the - * null termninated string of the directory name - * @param[in] buffsize Size of @a strbuf in bytes - * @return true for success and false otherwise - */ -bool ethash_get_default_dirname(char* strbuf, size_t buffsize); - -static inline bool ethash_io_mutable_name( - uint32_t revision, - ethash_h256_t const* seed_hash, - char* output -) -{ - uint64_t hash = *((uint64_t*)seed_hash); -#if LITTLE_ENDIAN == BYTE_ORDER - hash = ethash_swap_u64(hash); -#endif - return snprintf(output, DAG_MUTABLE_NAME_MAX_SIZE, "full-R%u-%016" PRIx64, revision, hash) >= 0; -} - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io_posix.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io_posix.c deleted file mode 100644 index c9a17d845efa6..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io_posix.c +++ /dev/null @@ -1,111 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ -/** @file io_posix.c - * @author Lefteris Karapetsas - * @date 2015 - */ - -#include "io.h" -#include -#include -#include -#include -#include -#include -#include -#include - -FILE* ethash_fopen(char const* file_name, char const* mode) -{ - return fopen(file_name, mode); -} - -char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count) -{ - return strlen(dest) + count + 1 <= dest_size ? strncat(dest, src, count) : NULL; -} - -bool ethash_mkdir(char const* dirname) -{ - int rc = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); - return rc != -1 || errno == EEXIST; -} - -int ethash_fileno(FILE *f) -{ - return fileno(f); -} - -char* ethash_io_create_filename( - char const* dirname, - char const* filename, - size_t filename_length -) -{ - size_t dirlen = strlen(dirname); - size_t dest_size = dirlen + filename_length + 1; - if (dirname[dirlen] != '/') { - dest_size += 1; - } - char* name = malloc(dest_size); - if (!name) { - return NULL; - } - - name[0] = '\0'; - ethash_strncat(name, dest_size, dirname, dirlen); - if (dirname[dirlen] != '/') { - ethash_strncat(name, dest_size, "/", 1); - } - ethash_strncat(name, dest_size, filename, filename_length); - return name; -} - -bool ethash_file_size(FILE* f, size_t* ret_size) -{ - struct stat st; - int fd; - if ((fd = fileno(f)) == -1 || fstat(fd, &st) != 0) { - return false; - } - *ret_size = st.st_size; - return true; -} - -bool ethash_get_default_dirname(char* strbuf, size_t buffsize) -{ - static const char dir_suffix[] = ".ethash/"; - strbuf[0] = '\0'; - char* home_dir = getenv("HOME"); - if (!home_dir || strlen(home_dir) == 0) - { - struct passwd* pwd = getpwuid(getuid()); - if (pwd) - home_dir = pwd->pw_dir; - } - - size_t len = strlen(home_dir); - if (!ethash_strncat(strbuf, buffsize, home_dir, len)) { - return false; - } - if (home_dir[len] != '/') { - if (!ethash_strncat(strbuf, buffsize, "/", 1)) { - return false; - } - } - return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix)); -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io_win32.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io_win32.c deleted file mode 100644 index 34f1aaa774b60..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/io_win32.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ -/** @file io_win32.c - * @author Lefteris Karapetsas - * @date 2015 - */ - -#include "io.h" -#include -#include -#include -#include -#include -#include - -FILE* ethash_fopen(char const* file_name, char const* mode) -{ - FILE* f; - return fopen_s(&f, file_name, mode) == 0 ? f : NULL; -} - -char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count) -{ - return strncat_s(dest, dest_size, src, count) == 0 ? dest : NULL; -} - -bool ethash_mkdir(char const* dirname) -{ - int rc = _mkdir(dirname); - return rc != -1 || errno == EEXIST; -} - -int ethash_fileno(FILE* f) -{ - return _fileno(f); -} - -char* ethash_io_create_filename( - char const* dirname, - char const* filename, - size_t filename_length -) -{ - size_t dirlen = strlen(dirname); - size_t dest_size = dirlen + filename_length + 1; - if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') { - dest_size += 1; - } - char* name = malloc(dest_size); - if (!name) { - return NULL; - } - - name[0] = '\0'; - ethash_strncat(name, dest_size, dirname, dirlen); - if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') { - ethash_strncat(name, dest_size, "\\", 1); - } - ethash_strncat(name, dest_size, filename, filename_length); - return name; -} - -bool ethash_file_size(FILE* f, size_t* ret_size) -{ - struct _stat st; - int fd; - if ((fd = _fileno(f)) == -1 || _fstat(fd, &st) != 0) { - return false; - } - *ret_size = st.st_size; - return true; -} - -bool ethash_get_default_dirname(char* strbuf, size_t buffsize) -{ - static const char dir_suffix[] = "Ethash\\"; - strbuf[0] = '\0'; - if (!SUCCEEDED(SHGetFolderPathA(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, (CHAR*)strbuf))) { - return false; - } - if (!ethash_strncat(strbuf, buffsize, "\\", 1)) { - return false; - } - - return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix)); -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/mmap.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/mmap.h deleted file mode 100644 index 1e226e83fdceb..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/mmap.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ -/** @file mmap.h - * @author Lefteris Karapetsas - * @date 2015 - */ -#pragma once -#if defined(__MINGW32__) || defined(_WIN32) -#include - -#define PROT_READ 0x1 -#define PROT_WRITE 0x2 -/* This flag is only available in WinXP+ */ -#ifdef FILE_MAP_EXECUTE -#define PROT_EXEC 0x4 -#else -#define PROT_EXEC 0x0 -#define FILE_MAP_EXECUTE 0 -#endif - -#define MAP_SHARED 0x01 -#define MAP_PRIVATE 0x02 -#define MAP_ANONYMOUS 0x20 -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FAILED ((void *) -1) - -void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset); -void munmap(void* addr, size_t length); -#else // posix, yay! ^_^ -#include -#endif - - diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/mmap_win32.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/mmap_win32.c deleted file mode 100644 index 42968b98a4955..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/mmap_win32.c +++ /dev/null @@ -1,84 +0,0 @@ -/* mmap() replacement for Windows - * - * Author: Mike Frysinger - * Placed into the public domain - */ - -/* References: - * CreateFileMapping: http://msdn.microsoft.com/en-us/library/aa366537(VS.85).aspx - * CloseHandle: http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx - * MapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366761(VS.85).aspx - * UnmapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366882(VS.85).aspx - */ - -#include -#include -#include "mmap.h" - -#ifdef __USE_FILE_OFFSET64 -# define DWORD_HI(x) (x >> 32) -# define DWORD_LO(x) ((x) & 0xffffffff) -#else -# define DWORD_HI(x) (0) -# define DWORD_LO(x) (x) -#endif - -void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset) -{ - if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) - return MAP_FAILED; - if (fd == -1) { - if (!(flags & MAP_ANON) || offset) - return MAP_FAILED; - } else if (flags & MAP_ANON) - return MAP_FAILED; - - DWORD flProtect; - if (prot & PROT_WRITE) { - if (prot & PROT_EXEC) - flProtect = PAGE_EXECUTE_READWRITE; - else - flProtect = PAGE_READWRITE; - } else if (prot & PROT_EXEC) { - if (prot & PROT_READ) - flProtect = PAGE_EXECUTE_READ; - else if (prot & PROT_EXEC) - flProtect = PAGE_EXECUTE; - } else - flProtect = PAGE_READONLY; - - off_t end = length + offset; - HANDLE mmap_fd, h; - if (fd == -1) - mmap_fd = INVALID_HANDLE_VALUE; - else - mmap_fd = (HANDLE)_get_osfhandle(fd); - h = CreateFileMapping(mmap_fd, NULL, flProtect, DWORD_HI(end), DWORD_LO(end), NULL); - if (h == NULL) - return MAP_FAILED; - - DWORD dwDesiredAccess; - if (prot & PROT_WRITE) - dwDesiredAccess = FILE_MAP_WRITE; - else - dwDesiredAccess = FILE_MAP_READ; - if (prot & PROT_EXEC) - dwDesiredAccess |= FILE_MAP_EXECUTE; - if (flags & MAP_PRIVATE) - dwDesiredAccess |= FILE_MAP_COPY; - void *ret = MapViewOfFile(h, dwDesiredAccess, DWORD_HI(offset), DWORD_LO(offset), length); - if (ret == NULL) { - ret = MAP_FAILED; - } - // since we are handling the file ourselves with fd, close the Windows Handle here - CloseHandle(h); - return ret; -} - -void munmap(void* addr, size_t length) -{ - UnmapViewOfFile(addr); -} - -#undef DWORD_HI -#undef DWORD_LO diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3.c deleted file mode 100644 index e72fe10184f5b..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3.c +++ /dev/null @@ -1,151 +0,0 @@ -/** libkeccak-tiny -* -* A single-file implementation of SHA-3 and SHAKE. -* -* Implementor: David Leon Gil -* License: CC0, attribution kindly requested. Blame taken too, -* but not liability. -*/ -#include "sha3.h" - -#include -#include -#include -#include - -/******** The Keccak-f[1600] permutation ********/ - -/*** Constants. ***/ -static const uint8_t rho[24] = \ - { 1, 3, 6, 10, 15, 21, - 28, 36, 45, 55, 2, 14, - 27, 41, 56, 8, 25, 43, - 62, 18, 39, 61, 20, 44}; -static const uint8_t pi[24] = \ - {10, 7, 11, 17, 18, 3, - 5, 16, 8, 21, 24, 4, - 15, 23, 19, 13, 12, 2, - 20, 14, 22, 9, 6, 1}; -static const uint64_t RC[24] = \ - {1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, - 0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL, - 0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL}; - -/*** Helper macros to unroll the permutation. ***/ -#define rol(x, s) (((x) << s) | ((x) >> (64 - s))) -#define REPEAT6(e) e e e e e e -#define REPEAT24(e) REPEAT6(e e e e) -#define REPEAT5(e) e e e e e -#define FOR5(v, s, e) \ - v = 0; \ - REPEAT5(e; v += s;) - -/*** Keccak-f[1600] ***/ -static inline void keccakf(void* state) { - uint64_t* a = (uint64_t*)state; - uint64_t b[5] = {0}; - uint64_t t = 0; - uint8_t x, y; - - for (int i = 0; i < 24; i++) { - // Theta - FOR5(x, 1, - b[x] = 0; - FOR5(y, 5, - b[x] ^= a[x + y]; )) - FOR5(x, 1, - FOR5(y, 5, - a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); )) - // Rho and pi - t = a[1]; - x = 0; - REPEAT24(b[0] = a[pi[x]]; - a[pi[x]] = rol(t, rho[x]); - t = b[0]; - x++; ) - // Chi - FOR5(y, - 5, - FOR5(x, 1, - b[x] = a[y + x];) - FOR5(x, 1, - a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); )) - // Iota - a[0] ^= RC[i]; - } -} - -/******** The FIPS202-defined functions. ********/ - -/*** Some helper macros. ***/ - -#define _(S) do { S } while (0) -#define FOR(i, ST, L, S) \ - _(for (size_t i = 0; i < L; i += ST) { S; }) -#define mkapply_ds(NAME, S) \ - static inline void NAME(uint8_t* dst, \ - const uint8_t* src, \ - size_t len) { \ - FOR(i, 1, len, S); \ - } -#define mkapply_sd(NAME, S) \ - static inline void NAME(const uint8_t* src, \ - uint8_t* dst, \ - size_t len) { \ - FOR(i, 1, len, S); \ - } - -mkapply_ds(xorin, dst[i] ^= src[i]) // xorin -mkapply_sd(setout, dst[i] = src[i]) // setout - -#define P keccakf -#define Plen 200 - -// Fold P*F over the full blocks of an input. -#define foldP(I, L, F) \ - while (L >= rate) { \ - F(a, I, rate); \ - P(a); \ - I += rate; \ - L -= rate; \ - } - -/** The sponge-based hash construction. **/ -static inline int hash(uint8_t* out, size_t outlen, - const uint8_t* in, size_t inlen, - size_t rate, uint8_t delim) { - if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) { - return -1; - } - uint8_t a[Plen] = {0}; - // Absorb input. - foldP(in, inlen, xorin); - // Xor in the DS and pad frame. - a[inlen] ^= delim; - a[rate - 1] ^= 0x80; - // Xor in the last block. - xorin(a, in, inlen); - // Apply P - P(a); - // Squeeze output. - foldP(out, outlen, setout); - setout(a, out, outlen); - memset(a, 0, 200); - return 0; -} - -#define defsha3(bits) \ - int sha3_##bits(uint8_t* out, size_t outlen, \ - const uint8_t* in, size_t inlen) { \ - if (outlen > (bits/8)) { \ - return -1; \ - } \ - return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \ - } - -/*** FIPS202 SHA3 FOFs ***/ -defsha3(256) -defsha3(512) diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3.h deleted file mode 100644 index a38006292f435..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#ifdef __cplusplus -extern "C" { -#endif - -#include "compiler.h" -#include -#include - -struct ethash_h256; - -#define decsha3(bits) \ - int sha3_##bits(uint8_t*, size_t, uint8_t const*, size_t); - -decsha3(256) -decsha3(512) - -static inline void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t const size) -{ - sha3_256((uint8_t*)ret, 32, data, size); -} - -static inline void SHA3_512(uint8_t* ret, uint8_t const* data, size_t const size) -{ - sha3_512(ret, 64, data, size); -} - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3_cryptopp.cpp b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3_cryptopp.cpp deleted file mode 100644 index 2a7c02664cd6a..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3_cryptopp.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ - -/** @file sha3.cpp -* @author Tim Hughes -* @date 2015 -*/ -#include -#include - -extern "C" { -struct ethash_h256; -typedef struct ethash_h256 ethash_h256_t; -void SHA3_256(ethash_h256_t const* ret, uint8_t const* data, size_t size) -{ - CryptoPP::SHA3_256().CalculateDigest((uint8_t*)ret, data, size); -} - -void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size) -{ - CryptoPP::SHA3_512().CalculateDigest(ret, data, size); -} -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3_cryptopp.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3_cryptopp.h deleted file mode 100644 index 9edc407d50eb9..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/sha3_cryptopp.h +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once - -#include "compiler.h" -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -struct ethash_h256; - -void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t size); -void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size); - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/util.h b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/util.h deleted file mode 100644 index c5fc6e55b5fc1..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/util.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - This file is part of ethash. - - ethash is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - ethash is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with ethash. If not, see . -*/ -/** @file util.h - * @author Tim Hughes - * @date 2015 - */ -#pragma once -#include -#include "compiler.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef _MSC_VER -void debugf(char const* str, ...); -#else -#define debugf printf -#endif - -static inline uint32_t min_u32(uint32_t a, uint32_t b) -{ - return a < b ? a : b; -} - -static inline uint32_t clamp_u32(uint32_t x, uint32_t min_, uint32_t max_) -{ - return x < min_ ? min_ : (x > max_ ? max_ : x); -} - -#ifdef __cplusplus -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/util_win32.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/util_win32.c deleted file mode 100644 index 6f2e69e6d12a6..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/libethash/util_win32.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - This file is part of cpp-expanse. - - cpp-expanse is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - cpp-expanse is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with cpp-expanse. If not, see . -*/ -/** @file util.c - * @author Tim Hughes - * @date 2015 - */ -#include -#include -#include "util.h" - - -// foward declare without all of Windows.h -__declspec(dllimport) void __stdcall OutputDebugStringA(char const* lpOutputString); - -void debugf(char const* str, ...) -{ - va_list args; - va_start(args, str); - - char buf[1<<16]; - _vsnprintf_s(buf, sizeof(buf), sizeof(buf), str, args); - buf[sizeof(buf)-1] = '\0'; - OutputDebugStringA(buf); -} diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/python/core.c b/Godeps/_workspace/src/github.com/expanse-org/ethash/src/python/core.c deleted file mode 100644 index 9b082d57b0f37..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/src/python/core.c +++ /dev/null @@ -1,267 +0,0 @@ -#include -#include -#include -#include -#include -#include "../libethash/ethash.h" -#include "../libethash/internal.h" - -#if PY_MAJOR_VERSION >= 3 -#define PY_STRING_FORMAT "y#" -#define PY_CONST_STRING_FORMAT "y" -#else -#define PY_STRING_FORMAT "s#" -#define PY_CONST_STRING_FORMAT "s" -#endif - -#define MIX_WORDS (ETHASH_MIX_BYTES/4) - -static PyObject * -mkcache_bytes(PyObject *self, PyObject *args) { - unsigned long block_number; - unsigned long cache_size; - - if (!PyArg_ParseTuple(args, "k", &block_number)) - return 0; - - ethash_light_t L = ethash_light_new(block_number); - PyObject * val = Py_BuildValue(PY_STRING_FORMAT, L->cache, L->cache_size); - free(L->cache); - return val; -} - -/* -static PyObject * -calc_dataset_bytes(PyObject *self, PyObject *args) { - char *cache_bytes; - unsigned long full_size; - int cache_size; - - if (!PyArg_ParseTuple(args, "k" PY_STRING_FORMAT, &full_size, &cache_bytes, &cache_size)) - return 0; - - if (full_size % MIX_WORDS != 0) { - char error_message[1024]; - sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %lu)", MIX_WORDS, full_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - - if (cache_size % ETHASH_HASH_BYTES != 0) { - char error_message[1024]; - sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", ETHASH_HASH_BYTES, cache_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - - ethash_params params; - params.cache_size = (size_t) cache_size; - params.full_size = (size_t) full_size; - ethash_cache cache; - cache.mem = (void *) cache_bytes; - void *mem = malloc(params.full_size); - ethash_compute_full_data(mem, ¶ms, &cache); - PyObject * val = Py_BuildValue(PY_STRING_FORMAT, (char *) mem, full_size); - free(mem); - return val; -}*/ - -// hashimoto_light(full_size, cache, header, nonce) -static PyObject * -hashimoto_light(PyObject *self, PyObject *args) { - char *cache_bytes; - char *header; - unsigned long block_number; - unsigned long long nonce; - int cache_size, header_size; - if (!PyArg_ParseTuple(args, "k" PY_STRING_FORMAT PY_STRING_FORMAT "K", &block_number, &cache_bytes, &cache_size, &header, &header_size, &nonce)) - return 0; - if (header_size != 32) { - char error_message[1024]; - sprintf(error_message, "Seed must be 32 bytes long (was %i)", header_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - struct ethash_light *s; - s = calloc(sizeof(*s), 1); - s->cache = cache_bytes; - s->cache_size = cache_size; - s->block_number = block_number; - struct ethash_h256 *h; - h = calloc(sizeof(*h), 1); - for (int i = 0; i < 32; i++) h->b[i] = header[i]; - struct ethash_return_value out = ethash_light_compute(s, *h, nonce); - return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "," PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}", - "mix digest", &out.mix_hash, 32, - "result", &out.result, 32); -} -/* -// hashimoto_full(dataset, header, nonce) -static PyObject * -hashimoto_full(PyObject *self, PyObject *args) { - char *full_bytes; - char *header; - unsigned long long nonce; - int full_size, header_size; - - if (!PyArg_ParseTuple(args, PY_STRING_FORMAT PY_STRING_FORMAT "K", &full_bytes, &full_size, &header, &header_size, &nonce)) - return 0; - - if (full_size % MIX_WORDS != 0) { - char error_message[1024]; - sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - - if (header_size != 32) { - char error_message[1024]; - sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - - - ethash_return_value out; - ethash_params params; - params.full_size = (size_t) full_size; - ethash_full(&out, (void *) full_bytes, ¶ms, (ethash_h256_t *) header, nonce); - return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}", - "mix digest", &out.mix_hash, 32, - "result", &out.result, 32); -} - -// mine(dataset_bytes, header, difficulty_bytes) -static PyObject * -mine(PyObject *self, PyObject *args) { - char *full_bytes; - char *header; - char *difficulty; - srand(time(0)); - uint64_t nonce = ((uint64_t) rand()) << 32 | rand(); - int full_size, header_size, difficulty_size; - - if (!PyArg_ParseTuple(args, PY_STRING_FORMAT PY_STRING_FORMAT PY_STRING_FORMAT, &full_bytes, &full_size, &header, &header_size, &difficulty, &difficulty_size)) - return 0; - - if (full_size % MIX_WORDS != 0) { - char error_message[1024]; - sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - - if (header_size != 32) { - char error_message[1024]; - sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - - if (difficulty_size != 32) { - char error_message[1024]; - sprintf(error_message, "Difficulty must be an array of 32 bytes (only had %i)", difficulty_size); - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - - ethash_return_value out; - ethash_params params; - params.full_size = (size_t) full_size; - - // TODO: Multi threading? - do { - ethash_full(&out, (void *) full_bytes, ¶ms, (const ethash_h256_t *) header, nonce++); - // TODO: disagrees with the spec https://github.com/expanse-org/wiki/wiki/Ethash#mining - } while (!ethash_check_difficulty(&out.result, (const ethash_h256_t *) difficulty)); - - return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":K}", - "mix digest", &out.mix_hash, 32, - "result", &out.result, 32, - "nonce", nonce); -} -*/ - -//get_seedhash(block_number) -static PyObject * -get_seedhash(PyObject *self, PyObject *args) { - unsigned long block_number; - if (!PyArg_ParseTuple(args, "k", &block_number)) - return 0; - if (block_number >= ETHASH_EPOCH_LENGTH * 2048) { - char error_message[1024]; - sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number); - - PyErr_SetString(PyExc_ValueError, error_message); - return 0; - } - ethash_h256_t seedhash = ethash_get_seedhash(block_number); - return Py_BuildValue(PY_STRING_FORMAT, (char *) &seedhash, 32); -} - -static PyMethodDef PyethashMethods[] = - { - {"get_seedhash", get_seedhash, METH_VARARGS, - "get_seedhash(block_number)\n\n" - "Gets the seedhash for a block."}, - {"mkcache_bytes", mkcache_bytes, METH_VARARGS, - "mkcache_bytes(block_number)\n\n" - "Makes a byte array for the cache for given block number\n"}, - /*{"calc_dataset_bytes", calc_dataset_bytes, METH_VARARGS, - "calc_dataset_bytes(full_size, cache_bytes)\n\n" - "Makes a byte array for the dataset for a given size given cache bytes"},*/ - {"hashimoto_light", hashimoto_light, METH_VARARGS, - "hashimoto_light(block_number, cache_bytes, header, nonce)\n\n" - "Runs the hashimoto hashing function just using cache bytes. Takes an int (full_size), byte array (cache_bytes), another byte array (header), and an int (nonce). Returns an object containing the mix digest, and hash result."}, - /*{"hashimoto_full", hashimoto_full, METH_VARARGS, - "hashimoto_full(dataset_bytes, header, nonce)\n\n" - "Runs the hashimoto hashing function using the dataset bytes. Useful for testing. Returns an object containing the mix digest (byte array), and hash result (another byte array)."}, - {"mine", mine, METH_VARARGS, - "mine(dataset_bytes, header, difficulty_bytes)\n\n" - "Mine for an adequate header. Returns an object containing the mix digest (byte array), hash result (another byte array) and nonce (an int)."},*/ - {NULL, NULL, 0, NULL} - }; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef PyethashModule = { - PyModuleDef_HEAD_INIT, - "pyethash", - "...", - -1, - PyethashMethods -}; - -PyMODINIT_FUNC PyInit_pyethash(void) { - PyObject *module = PyModule_Create(&PyethashModule); - // Following Spec: https://github.com/expanse-org/wiki/wiki/Ethash#definitions - PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION); - PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT); - PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH); - PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT); - PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH); - PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH); - PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES); - PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES); - PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS); - PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS); - PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES); - return module; -} -#else -PyMODINIT_FUNC -initpyethash(void) { - PyObject *module = Py_InitModule("pyethash", PyethashMethods); - // Following Spec: https://github.com/expanse-org/wiki/wiki/Ethash#definitions - PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION); - PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT); - PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH); - PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT); - PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH); - PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH); - PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES); - PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES); - PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS); - PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS); - PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES); -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/CMakeLists.txt b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/CMakeLists.txt deleted file mode 100644 index f94531c3df0e6..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/CMakeLists.txt +++ /dev/null @@ -1,66 +0,0 @@ -if (MSVC) - if (NOT BOOST_ROOT) - set (BOOST_ROOT "$ENV{BOOST_ROOT}") - endif() - set (CMAKE_PREFIX_PATH BOOST_ROOT) -endif() - -IF( NOT Boost_FOUND ) - # use multithreaded boost libraries, with -mt suffix - set(Boost_USE_MULTITHREADED ON) - - if (MSVC) - # TODO handle other msvc versions or it will fail find them - set(Boost_COMPILER -vc120) - # use static boost libraries *.lib - set(Boost_USE_STATIC_LIBS ON) - elseif (APPLE) - - # use static boost libraries *.a - set(Boost_USE_STATIC_LIBS ON) - - elseif (UNIX) - # use dynamic boost libraries .dll - set(Boost_USE_STATIC_LIBS OFF) - - endif() - find_package(Boost 1.48.0 COMPONENTS unit_test_framework system filesystem) -ENDIF() - -IF (Boost_FOUND) - message(STATUS "boost header: ${Boost_INCLUDE_DIRS}") - message(STATUS "boost libs : ${Boost_LIBRARIES}") - - include_directories( ${Boost_INCLUDE_DIR} ) - include_directories(../../src) - - link_directories(${Boost_LIBRARY_DIRS}) - file(GLOB HEADERS "*.h") - if ((NOT MSVC) AND (NOT APPLE)) - ADD_DEFINITIONS(-DBOOST_TEST_DYN_LINK) - endif() - if (NOT CRYPTOPP_FOUND) - find_package (CryptoPP) - endif() - - if (CRYPTOPP_FOUND) - add_definitions(-DWITH_CRYPTOPP) - endif() - - if (NOT MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ") - endif() - - add_executable (Test "./test.cpp" ${HEADERS}) - target_link_libraries(Test ${ETHHASH_LIBS}) - target_link_libraries(Test ${Boost_FILESYSTEM_LIBRARIES}) - target_link_libraries(Test ${Boost_SYSTEM_LIBRARIES}) - target_link_libraries(Test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARIES}) - - if (CRYPTOPP_FOUND) - TARGET_LINK_LIBRARIES(Test ${CRYPTOPP_LIBRARIES}) - endif() - - enable_testing () - add_test(NAME ethash COMMAND Test) -ENDIF() diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/test.cpp b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/test.cpp deleted file mode 100644 index ffcf10518607c..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/test.cpp +++ /dev/null @@ -1,669 +0,0 @@ -#include -#include -#include -#include -#include - -#ifdef WITH_CRYPTOPP - -#include - -#else -#include -#endif // WITH_CRYPTOPP - -#ifdef _WIN32 -#include -#include -#endif - -#define BOOST_TEST_MODULE Daggerhashimoto -#define BOOST_TEST_MAIN - -#include -#include -#include -#include -#include - -using namespace std; -using byte = uint8_t; -using bytes = std::vector; -namespace fs = boost::filesystem; - -// Just an alloca "wrapper" to silence uint64_t to size_t conversion warnings in windows -// consider replacing alloca calls with something better though! -#define our_alloca(param__) alloca((size_t)(param__)) - - -// some functions taken from exp::dev for convenience. -std::string bytesToHexString(const uint8_t *str, const uint64_t s) -{ - std::ostringstream ret; - - for (size_t i = 0; i < s; ++i) - ret << std::hex << std::setfill('0') << std::setw(2) << std::nouppercase << (int) str[i]; - - return ret.str(); -} - -std::string blockhashToHexString(ethash_h256_t* _hash) -{ - return bytesToHexString((uint8_t*)_hash, 32); -} - -int fromHex(char _i) -{ - if (_i >= '0' && _i <= '9') - return _i - '0'; - if (_i >= 'a' && _i <= 'f') - return _i - 'a' + 10; - if (_i >= 'A' && _i <= 'F') - return _i - 'A' + 10; - - BOOST_REQUIRE_MESSAGE(false, "should never get here"); - return -1; -} - -bytes hexStringToBytes(std::string const& _s) -{ - unsigned s = (_s[0] == '0' && _s[1] == 'x') ? 2 : 0; - std::vector ret; - ret.reserve((_s.size() - s + 1) / 2); - - if (_s.size() % 2) - try - { - ret.push_back(fromHex(_s[s++])); - } - catch (...) - { - ret.push_back(0); - } - for (unsigned i = s; i < _s.size(); i += 2) - try - { - ret.push_back((byte)(fromHex(_s[i]) * 16 + fromHex(_s[i + 1]))); - } - catch (...){ - ret.push_back(0); - } - return ret; -} - -ethash_h256_t stringToBlockhash(std::string const& _s) -{ - ethash_h256_t ret; - bytes b = hexStringToBytes(_s); - memcpy(&ret, b.data(), b.size()); - return ret; -} - - - -BOOST_AUTO_TEST_CASE(fnv_hash_check) { - uint32_t x = 1235U; - const uint32_t - y = 9999999U, - expected = (FNV_PRIME * x) ^y; - - x = fnv_hash(x, y); - - BOOST_REQUIRE_MESSAGE(x == expected, - "\nexpected: " << expected << "\n" - << "actual: " << x << "\n"); - -} - -BOOST_AUTO_TEST_CASE(SHA256_check) { - ethash_h256_t input; - ethash_h256_t out; - memcpy(&input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - SHA3_256(&out, (uint8_t*)&input, 32); - const std::string - expected = "2b5ddf6f4d21c23de216f44d5e4bdc68e044b71897837ea74c83908be7037cd7", - actual = bytesToHexString((uint8_t*)&out, 32); - BOOST_REQUIRE_MESSAGE(expected == actual, - "\nexpected: " << expected.c_str() << "\n" - << "actual: " << actual.c_str() << "\n"); -} - -BOOST_AUTO_TEST_CASE(SHA512_check) { - uint8_t input[64], out[64]; - memcpy(input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 64); - SHA3_512(out, input, 64); - const std::string - expected = "0be8a1d334b4655fe58c6b38789f984bb13225684e86b20517a55ab2386c7b61c306f25e0627c60064cecd6d80cd67a82b3890bd1289b7ceb473aad56a359405", - actual = bytesToHexString(out, 64); - BOOST_REQUIRE_MESSAGE(expected == actual, - "\nexpected: " << expected.c_str() << "\n" - << "actual: " << actual.c_str() << "\n"); -} - -BOOST_AUTO_TEST_CASE(test_swap_endian32) { - uint32_t v32 = (uint32_t)0xBAADF00D; - v32 = ethash_swap_u32(v32); - BOOST_REQUIRE_EQUAL(v32, (uint32_t)0x0DF0ADBA); -} - -BOOST_AUTO_TEST_CASE(test_swap_endian64) { - uint64_t v64 = (uint64_t)0xFEE1DEADDEADBEEF; - v64 = ethash_swap_u64(v64); - BOOST_REQUIRE_EQUAL(v64, (uint64_t)0xEFBEADDEADDEE1FE); -} - -BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_check) { - uint64_t full_size = ethash_get_datasize(0); - uint64_t cache_size = ethash_get_cachesize(0); - BOOST_REQUIRE_MESSAGE(full_size < ETHASH_DATASET_BYTES_INIT, - "\nfull size: " << full_size << "\n" - << "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n"); - BOOST_REQUIRE_MESSAGE(full_size + 20 * ETHASH_MIX_BYTES >= ETHASH_DATASET_BYTES_INIT, - "\nfull size + 20*MIX_BYTES: " << full_size + 20 * ETHASH_MIX_BYTES << "\n" - << "should be greater than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n"); - BOOST_REQUIRE_MESSAGE(cache_size < ETHASH_DATASET_BYTES_INIT / 32, - "\ncache size: " << cache_size << "\n" - << "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT / 32 << "\n"); -} - -BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_calcifide_check) { - uint64_t full_size = ethash_get_datasize(0); - uint64_t cache_size = ethash_get_cachesize(0); - const uint32_t expected_full_size = 1073739904; - const uint32_t expected_cache_size = 16776896; - BOOST_REQUIRE_MESSAGE(full_size == expected_full_size, - "\nexpected: " << expected_cache_size << "\n" - << "actual: " << full_size << "\n"); - BOOST_REQUIRE_MESSAGE(cache_size == expected_cache_size, - "\nexpected: " << expected_cache_size << "\n" - << "actual: " << cache_size << "\n"); -} - -BOOST_AUTO_TEST_CASE(ethash_check_difficulty_check) { - ethash_h256_t hash; - ethash_h256_t target; - memcpy(&hash, "11111111111111111111111111111111", 32); - memcpy(&target, "22222222222222222222222222222222", 32); - BOOST_REQUIRE_MESSAGE( - ethash_check_difficulty(&hash, &target), - "\nexpected \"" << std::string((char *) &hash, 32).c_str() << "\" to have the same or less difficulty than \"" << std::string((char *) &target, 32).c_str() << "\"\n"); - BOOST_REQUIRE_MESSAGE( - ethash_check_difficulty(&hash, &hash), ""); - // "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << hash << "\"\n"); - memcpy(&target, "11111111111111111111111111111112", 32); - BOOST_REQUIRE_MESSAGE( - ethash_check_difficulty(&hash, &target), ""); - // "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << target << "\"\n"); - memcpy(&target, "11111111111111111111111111111110", 32); - BOOST_REQUIRE_MESSAGE( - !ethash_check_difficulty(&hash, &target), ""); - // "\nexpected \"" << hash << "\" to have more difficulty than \"" << target << "\"\n"); -} - -BOOST_AUTO_TEST_CASE(test_ethash_io_mutable_name) { - char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE]; - // should have at least 8 bytes provided since this is what we test :) - ethash_h256_t seed1 = ethash_h256_static_init(0, 10, 65, 255, 34, 55, 22, 8); - ethash_io_mutable_name(1, &seed1, mutable_name); - BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "full-R1-000a41ff22371608")); - ethash_h256_t seed2 = ethash_h256_static_init(0, 0, 0, 0, 0, 0, 0, 0); - ethash_io_mutable_name(44, &seed2, mutable_name); - BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "full-R44-0000000000000000")); -} - -BOOST_AUTO_TEST_CASE(test_ethash_dir_creation) { - ethash_h256_t seedhash; - FILE *f = NULL; - memset(&seedhash, 0, 32); - BOOST_REQUIRE_EQUAL( - ETHASH_IO_MEMO_MISMATCH, - ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false) - ); - BOOST_REQUIRE(f); - - // let's make sure that the directory was created - BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/"))); - - // cleanup - fclose(f); - fs::remove_all("./test_ethash_directory/"); -} - -BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_match) { - uint64_t full_size; - uint64_t cache_size; - ethash_h256_t seed; - ethash_h256_t hash; - FILE* f; - memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - - cache_size = 1024; - full_size = 1024 * 32; - - ethash_light_t light = ethash_light_new_internal(cache_size, &seed); - ethash_full_t full = ethash_full_new_internal( - "./test_ethash_directory/", - seed, - full_size, - light, - NULL - ); - BOOST_ASSERT(full); - // let's make sure that the directory was created - BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/"))); - // delete the full here so that memory is properly unmapped and FILE handler freed - ethash_full_delete(full); - // and check that we have a match when checking again - BOOST_REQUIRE_EQUAL( - ETHASH_IO_MEMO_MATCH, - ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false) - ); - BOOST_REQUIRE(f); - - // cleanup - fclose(f); - ethash_light_delete(light); - fs::remove_all("./test_ethash_directory/"); -} - -BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_size_mismatch) { - static const int blockn = 0; - ethash_h256_t seedhash = ethash_get_seedhash(blockn); - FILE *f = NULL; - BOOST_REQUIRE_EQUAL( - ETHASH_IO_MEMO_MISMATCH, - ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false) - ); - BOOST_REQUIRE(f); - fclose(f); - - // let's make sure that the directory was created - BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/"))); - // and check that we get the size mismatch detected if we request diffferent size - BOOST_REQUIRE_EQUAL( - ETHASH_IO_MEMO_SIZE_MISMATCH, - ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 65, false) - ); - - // cleanup - fs::remove_all("./test_ethash_directory/"); -} - -BOOST_AUTO_TEST_CASE(test_ethash_get_default_dirname) { - char result[256]; - // this is really not an easy thing to test for in a unit test - // TODO: Improve this test ... -#ifdef _WIN32 - char homedir[256]; - BOOST_REQUIRE(SUCCEEDED(SHGetFolderPathA(NULL, CSIDL_PROFILE, NULL, 0, (CHAR*)homedir))); - BOOST_REQUIRE(ethash_get_default_dirname(result, 256)); - std::string res = std::string(homedir) + std::string("\\AppData\\Local\\Ethash\\"); -#else - char* homedir = getenv("HOME"); - BOOST_REQUIRE(ethash_get_default_dirname(result, 256)); - std::string res = std::string(homedir) + std::string("/.ethash/"); -#endif - BOOST_CHECK_MESSAGE(strcmp(res.c_str(), result) == 0, - "Expected \"" + res + "\" but got \"" + std::string(result) + "\"" - ); -} - -BOOST_AUTO_TEST_CASE(light_and_full_client_checks) { - uint64_t full_size; - uint64_t cache_size; - ethash_h256_t seed; - ethash_h256_t hash; - ethash_h256_t difficulty; - ethash_return_value_t light_out; - ethash_return_value_t full_out; - memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - - // Set the difficulty - ethash_h256_set(&difficulty, 0, 197); - ethash_h256_set(&difficulty, 1, 90); - for (int i = 2; i < 32; i++) - ethash_h256_set(&difficulty, i, 255); - - cache_size = 1024; - full_size = 1024 * 32; - - ethash_light_t light = ethash_light_new_internal(cache_size, &seed); - ethash_full_t full = ethash_full_new_internal( - "./test_ethash_directory/", - seed, - full_size, - light, - NULL - ); - BOOST_ASSERT(full); - { - const std::string - expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b", - actual = bytesToHexString((uint8_t const *) light->cache, cache_size); - - BOOST_REQUIRE_MESSAGE(expected == actual, - "\nexpected: " << expected.c_str() << "\n" - << "actual: " << actual.c_str() << "\n"); - } - { - node node; - ethash_calculate_dag_item(&node, 0, light); - const std::string - actual = bytesToHexString((uint8_t const *) &node, sizeof(node)), - expected = "b1698f829f90b35455804e5185d78f549fcb1bdce2bee006d4d7e68eb154b596be1427769eb1c3c3e93180c760af75f81d1023da6a0ffbe321c153a7c0103597"; - BOOST_REQUIRE_MESSAGE(actual == expected, - "\n" << "expected: " << expected.c_str() << "\n" - << "actual: " << actual.c_str() << "\n"); - } - { - for (int i = 0; i < full_size / sizeof(node); ++i) { - for (uint32_t j = 0; j < 32; ++j) { - node expected_node; - ethash_calculate_dag_item(&expected_node, j, light); - const std::string - actual = bytesToHexString((uint8_t const *) &(full->data[j]), sizeof(node)), - expected = bytesToHexString((uint8_t const *) &expected_node, sizeof(node)); - BOOST_REQUIRE_MESSAGE(actual == expected, - "\ni: " << j << "\n" - << "expected: " << expected.c_str() << "\n" - << "actual: " << actual.c_str() << "\n"); - } - } - } - { - uint64_t nonce = 0x7c7c597c; - full_out = ethash_full_compute(full, hash, nonce); - BOOST_REQUIRE(full_out.success); - light_out = ethash_light_compute_internal(light, full_size, hash, nonce); - BOOST_REQUIRE(light_out.success); - const std::string - light_result_string = blockhashToHexString(&light_out.result), - full_result_string = blockhashToHexString(&full_out.result); - BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string, - "\nlight result: " << light_result_string.c_str() << "\n" - << "full result: " << full_result_string.c_str() << "\n"); - const std::string - light_mix_hash_string = blockhashToHexString(&light_out.mix_hash), - full_mix_hash_string = blockhashToHexString(&full_out.mix_hash); - BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string, - "\nlight mix hash: " << light_mix_hash_string.c_str() << "\n" - << "full mix hash: " << full_mix_hash_string.c_str() << "\n"); - ethash_h256_t check_hash; - ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash); - const std::string check_hash_string = blockhashToHexString(&check_hash); - BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string, - "\ncheck hash string: " << check_hash_string.c_str() << "\n" - << "full result: " << full_result_string.c_str() << "\n"); - } - { - full_out = ethash_full_compute(full, hash, 5); - BOOST_REQUIRE(full_out.success); - std::string - light_result_string = blockhashToHexString(&light_out.result), - full_result_string = blockhashToHexString(&full_out.result); - BOOST_REQUIRE_MESSAGE(light_result_string != full_result_string, - "\nlight result and full result should differ: " << light_result_string.c_str() << "\n"); - - light_out = ethash_light_compute_internal(light, full_size, hash, 5); - BOOST_REQUIRE(light_out.success); - light_result_string = blockhashToHexString(&light_out.result); - BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string, - "\nlight result and full result should be the same\n" - << "light result: " << light_result_string.c_str() << "\n" - << "full result: " << full_result_string.c_str() << "\n"); - std::string - light_mix_hash_string = blockhashToHexString(&light_out.mix_hash), - full_mix_hash_string = blockhashToHexString(&full_out.mix_hash); - BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string, - "\nlight mix hash: " << light_mix_hash_string.c_str() << "\n" - << "full mix hash: " << full_mix_hash_string.c_str() << "\n"); - BOOST_REQUIRE_MESSAGE(ethash_check_difficulty(&full_out.result, &difficulty), - "ethash_check_difficulty failed" - ); - BOOST_REQUIRE_MESSAGE(ethash_quick_check_difficulty(&hash, 5U, &full_out.mix_hash, &difficulty), - "ethash_quick_check_difficulty failed" - ); - } - ethash_light_delete(light); - ethash_full_delete(full); - fs::remove_all("./test_ethash_directory/"); -} - -BOOST_AUTO_TEST_CASE(ethash_full_new_when_dag_exists_with_wrong_size) { - uint64_t full_size; - uint64_t cache_size; - ethash_h256_t seed; - ethash_h256_t hash; - ethash_return_value_t full_out; - ethash_return_value_t light_out; - memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - - cache_size = 1024; - full_size = 1024 * 32; - - // first make a DAG file of "wrong size" - FILE *f; - BOOST_REQUIRE_EQUAL( - ETHASH_IO_MEMO_MISMATCH, - ethash_io_prepare("./test_ethash_directory/", seed, &f, 64, false) - ); - fclose(f); - - // then create new DAG, which should detect the wrong size and force create a new file - ethash_light_t light = ethash_light_new_internal(cache_size, &seed); - BOOST_ASSERT(light); - ethash_full_t full = ethash_full_new_internal( - "./test_ethash_directory/", - seed, - full_size, - light, - NULL - ); - BOOST_ASSERT(full); - { - uint64_t nonce = 0x7c7c597c; - full_out = ethash_full_compute(full, hash, nonce); - BOOST_REQUIRE(full_out.success); - light_out = ethash_light_compute_internal(light, full_size, hash, nonce); - BOOST_REQUIRE(light_out.success); - const std::string - light_result_string = blockhashToHexString(&light_out.result), - full_result_string = blockhashToHexString(&full_out.result); - BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string, - "\nlight result: " << light_result_string.c_str() << "\n" - << "full result: " << full_result_string.c_str() << "\n"); - const std::string - light_mix_hash_string = blockhashToHexString(&light_out.mix_hash), - full_mix_hash_string = blockhashToHexString(&full_out.mix_hash); - BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string, - "\nlight mix hash: " << light_mix_hash_string.c_str() << "\n" - << "full mix hash: " << full_mix_hash_string.c_str() << "\n"); - ethash_h256_t check_hash; - ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash); - const std::string check_hash_string = blockhashToHexString(&check_hash); - BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string, - "\ncheck hash string: " << check_hash_string.c_str() << "\n" - << "full result: " << full_result_string.c_str() << "\n"); - } - - ethash_light_delete(light); - ethash_full_delete(full); - fs::remove_all("./test_ethash_directory/"); -} - -static bool g_executed = false; -static unsigned g_prev_progress = 0; -static int test_full_callback(unsigned _progress) -{ - g_executed = true; - BOOST_CHECK(_progress >= g_prev_progress); - g_prev_progress = _progress; - return 0; -} - -static int test_full_callback_that_fails(unsigned _progress) -{ - return 1; -} - -static int test_full_callback_create_incomplete_dag(unsigned _progress) -{ - if (_progress >= 30) { - return 1; - } - return 0; -} - -BOOST_AUTO_TEST_CASE(full_client_callback) { - uint64_t full_size; - uint64_t cache_size; - ethash_h256_t seed; - ethash_h256_t hash; - memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - - cache_size = 1024; - full_size = 1024 * 32; - - ethash_light_t light = ethash_light_new_internal(cache_size, &seed); - ethash_full_t full = ethash_full_new_internal( - "./test_ethash_directory/", - seed, - full_size, - light, - test_full_callback - ); - BOOST_ASSERT(full); - BOOST_CHECK(g_executed); - BOOST_REQUIRE_EQUAL(g_prev_progress, 100); - - ethash_full_delete(full); - ethash_light_delete(light); - fs::remove_all("./test_ethash_directory/"); -} - -BOOST_AUTO_TEST_CASE(failing_full_client_callback) { - uint64_t full_size; - uint64_t cache_size; - ethash_h256_t seed; - ethash_h256_t hash; - memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - - cache_size = 1024; - full_size = 1024 * 32; - - ethash_light_t light = ethash_light_new_internal(cache_size, &seed); - ethash_full_t full = ethash_full_new_internal( - "./test_ethash_directory/", - seed, - full_size, - light, - test_full_callback_that_fails - ); - BOOST_ASSERT(!full); - ethash_light_delete(light); - fs::remove_all("./test_ethash_directory/"); -} - -BOOST_AUTO_TEST_CASE(test_incomplete_dag_file) { - uint64_t full_size; - uint64_t cache_size; - ethash_h256_t seed; - ethash_h256_t hash; - memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); - - cache_size = 1024; - full_size = 1024 * 32; - - ethash_light_t light = ethash_light_new_internal(cache_size, &seed); - // create a full but stop at 30%, so no magic number is written - ethash_full_t full = ethash_full_new_internal( - "./test_ethash_directory/", - seed, - full_size, - light, - test_full_callback_create_incomplete_dag - ); - BOOST_ASSERT(!full); - FILE *f = NULL; - // confirm that we get a size_mismatch because the magic number is missing - BOOST_REQUIRE_EQUAL( - ETHASH_IO_MEMO_SIZE_MISMATCH, - ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false) - ); - ethash_light_delete(light); - fs::remove_all("./test_ethash_directory/"); -} - -BOOST_AUTO_TEST_CASE(test_block22_verification) { - // from POC-9 testnet, epoch 0 - ethash_light_t light = ethash_light_new(22); - ethash_h256_t seedhash = stringToBlockhash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d"); - BOOST_ASSERT(light); - ethash_return_value_t ret = ethash_light_compute( - light, - seedhash, - 0x495732e0ed7a801cU - ); - BOOST_REQUIRE_EQUAL(blockhashToHexString(&ret.result), "00000b184f1fdd88bfd94c86c39e65db0c36144d5e43f745f722196e730cb614"); - ethash_h256_t difficulty = ethash_h256_static_init(0x2, 0x5, 0x40); - BOOST_REQUIRE(ethash_check_difficulty(&ret.result, &difficulty)); - ethash_light_delete(light); -} - -BOOST_AUTO_TEST_CASE(test_block30001_verification) { - // from POC-9 testnet, epoch 1 - ethash_light_t light = ethash_light_new(30001); - ethash_h256_t seedhash = stringToBlockhash("7e44356ee3441623bc72a683fd3708fdf75e971bbe294f33e539eedad4b92b34"); - BOOST_ASSERT(light); - ethash_return_value_t ret = ethash_light_compute( - light, - seedhash, - 0x318df1c8adef7e5eU - ); - ethash_h256_t difficulty = ethash_h256_static_init(0x17, 0x62, 0xff); - BOOST_REQUIRE(ethash_check_difficulty(&ret.result, &difficulty)); - ethash_light_delete(light); -} - -BOOST_AUTO_TEST_CASE(test_block60000_verification) { - // from POC-9 testnet, epoch 2 - ethash_light_t light = ethash_light_new(60000); - ethash_h256_t seedhash = stringToBlockhash("5fc898f16035bf5ac9c6d9077ae1e3d5fc1ecc3c9fd5bee8bb00e810fdacbaa0"); - BOOST_ASSERT(light); - ethash_return_value_t ret = ethash_light_compute( - light, - seedhash, - 0x50377003e5d830caU - ); - ethash_h256_t difficulty = ethash_h256_static_init(0x25, 0xa6, 0x1e); - BOOST_REQUIRE(ethash_check_difficulty(&ret.result, &difficulty)); - ethash_light_delete(light); -} - -// Test of Full DAG creation with the minimal ethash.h API. -// Commented out since travis tests would take too much time. -// Uncomment and run on your own machine if you want to confirm -// it works fine. -#if 0 -static int progress_cb(unsigned _progress) -{ - printf("CREATING DAG. PROGRESS: %u\n", _progress); - fflush(stdout); - return 0; -} - -BOOST_AUTO_TEST_CASE(full_dag_test) { - ethash_light_t light = ethash_light_new(55); - BOOST_ASSERT(light); - ethash_full_t full = ethash_full_new(light, progress_cb); - BOOST_ASSERT(full); - ethash_light_delete(light); - ethash_full_delete(full); -} -#endif diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/test.sh b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/test.sh deleted file mode 100644 index 92b6b8b663874..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/c/test.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Strict mode -set -e - -VALGRIND_ARGS="--tool=memcheck" -VALGRIND_ARGS+=" --leak-check=yes" -VALGRIND_ARGS+=" --track-origins=yes" -VALGRIND_ARGS+=" --show-reachable=yes" -VALGRIND_ARGS+=" --num-callers=20" -VALGRIND_ARGS+=" --track-fds=yes" - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ]; do - DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - SOURCE="$(readlink "$SOURCE")" - [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" -done -TEST_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -rm -rf $TEST_DIR/build -mkdir -p $TEST_DIR/build -cd $TEST_DIR/build ; -cmake ../../.. > /dev/null -make Test -./test/c/Test - -# If we have valgrind also run memory check tests -if hash valgrind 2>/dev/null; then - echo "======== Running tests under valgrind ========"; - cd $TEST_DIR/build/ && valgrind $VALGRIND_ARGS ./test/c/Test -fi diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/.gitignore b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/.gitignore deleted file mode 100644 index c304fd6150283..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/.gitignore +++ /dev/null @@ -1 +0,0 @@ -python-virtual-env/ diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/requirements.txt b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/requirements.txt deleted file mode 100644 index 378263c627e10..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -pyethereum==0.7.522 -nose==1.3.4 -pysha3==0.3 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/test.sh b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/test.sh deleted file mode 100644 index 05c66b550b985..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/test.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# Strict mode -set -e - -if [ -x "$(which virtualenv2)" ] ; then - VIRTUALENV_EXEC=virtualenv2 -elif [ -x "$(which virtualenv)" ] ; then - VIRTUALENV_EXEC=virtualenv -else - echo "Could not find a suitable version of virtualenv" - false -fi - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ]; do - DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - SOURCE="$(readlink "$SOURCE")" - [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" -done -TEST_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -[ -d $TEST_DIR/python-virtual-env ] || $VIRTUALENV_EXEC --system-site-packages $TEST_DIR/python-virtual-env -source $TEST_DIR/python-virtual-env/bin/activate -pip install -r $TEST_DIR/requirements.txt > /dev/null -# force installation of nose in virtualenv even if existing in thereuser's system -pip install nose -I -pip install --upgrade --no-deps --force-reinstall -e $TEST_DIR/../.. -cd $TEST_DIR -nosetests --with-doctest -v --nocapture diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/test_pyethash.py b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/test_pyethash.py deleted file mode 100644 index 7eb1b60c7be73..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/python/test_pyethash.py +++ /dev/null @@ -1,105 +0,0 @@ -import pyethash -from random import randint - -def test_get_cache_size_not_None(): - for _ in range(100): - block_num = randint(0,12456789) - out = pyethash.get_cache_size(block_num) - assert out != None - -def test_get_full_size_not_None(): - for _ in range(100): - block_num = randint(0,12456789) - out = pyethash.get_full_size(block_num) - assert out != None - -def test_get_cache_size_based_on_EPOCH(): - for _ in range(100): - block_num = randint(0,12456789) - out1 = pyethash.get_cache_size(block_num) - out2 = pyethash.get_cache_size((block_num // pyethash.EPOCH_LENGTH) * pyethash.EPOCH_LENGTH) - assert out1 == out2 - -def test_get_full_size_based_on_EPOCH(): - for _ in range(100): - block_num = randint(0,12456789) - out1 = pyethash.get_full_size(block_num) - out2 = pyethash.get_full_size((block_num // pyethash.EPOCH_LENGTH) * pyethash.EPOCH_LENGTH) - assert out1 == out2 - -# See light_and_full_client_checks in test.cpp -def test_mkcache_is_as_expected(): - actual = pyethash.mkcache_bytes( - 1024, - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~").encode('hex') - expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b" - assert actual == expected - -def test_calc_dataset_is_not_None(): - cache = pyethash.mkcache_bytes( - 1024, - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") - assert pyethash.calc_dataset_bytes(1024 * 32, cache) != None - -def test_light_and_full_agree(): - cache = pyethash.mkcache_bytes( - 1024, - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") - full_size = 1024 * 32 - header = "~~~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~" - light_result = pyethash.hashimoto_light(full_size, cache, header, 0) - dataset = pyethash.calc_dataset_bytes(full_size, cache) - full_result = pyethash.hashimoto_full(dataset, header, 0) - assert light_result["mix digest"] != None - assert len(light_result["mix digest"]) == 32 - assert light_result["mix digest"] == full_result["mix digest"] - assert light_result["result"] != None - assert len(light_result["result"]) == 32 - assert light_result["result"] == full_result["result"] - -def int_to_bytes(i): - b = [] - for _ in range(32): - b.append(chr(i & 0xff)) - i >>= 8 - b.reverse() - return "".join(b) - -def test_mining_basic(): - easy_difficulty = int_to_bytes(2**256 - 1) - assert easy_difficulty.encode('hex') == 'f' * 64 - cache = pyethash.mkcache_bytes( - 1024, - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") - full_size = 1024 * 32 - header = "~~~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~" - dataset = pyethash.calc_dataset_bytes(full_size, cache) - # Check type of outputs - assert type(pyethash.mine(dataset,header,easy_difficulty)) == dict - assert type(pyethash.mine(dataset,header,easy_difficulty)["nonce"]) == long - assert type(pyethash.mine(dataset,header,easy_difficulty)["mix digest"]) == str - assert type(pyethash.mine(dataset,header,easy_difficulty)["result"]) == str - -def test_mining_doesnt_always_return_the_same_value(): - easy_difficulty1 = int_to_bytes(int(2**256 * 0.999)) - # 1 in 1000 difficulty - easy_difficulty2 = int_to_bytes(int(2**256 * 0.001)) - assert easy_difficulty1 != easy_difficulty2 - cache = pyethash.mkcache_bytes( - 1024, - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") - full_size = 1024 * 32 - header = "~~~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~" - dataset = pyethash.calc_dataset_bytes(full_size, cache) - # Check type of outputs - assert pyethash.mine(dataset, header, easy_difficulty1)['nonce'] != pyethash.mine(dataset, header, easy_difficulty2)['nonce'] - -def test_get_seedhash(): - assert pyethash.get_seedhash(0).encode('hex') == '0' * 64 - import hashlib, sha3 - expected = pyethash.get_seedhash(0) - #print "checking seed hashes:", - for i in range(0, 30000*2048, 30000): - #print i // 30000, - assert pyethash.get_seedhash(i) == expected - expected = hashlib.sha3_256(expected).digest() diff --git a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/test.sh b/Godeps/_workspace/src/github.com/expanse-org/ethash/test/test.sh deleted file mode 100644 index aaeaa878c4fcc..0000000000000 --- a/Godeps/_workspace/src/github.com/expanse-org/ethash/test/test.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Strict mode -set -e - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ]; do - DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - SOURCE="$(readlink "$SOURCE")" - [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" -done -TEST_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -echo -e "\n################# Testing JS ##################" -# TODO: Use mocha and real testing tools instead of rolling our own -cd $TEST_DIR/../js -if [ -x "$(which nodejs)" ] ; then - nodejs test.js -fi -if [ -x "$(which node)" ] ; then - node test.js -fi - -echo -e "\n################# Testing C ##################" -$TEST_DIR/c/test.sh - -# Temporarily commenting out python tests until they conform to the API -#echo -e "\n################# Testing Python ##################" -#$TEST_DIR/python/test.sh - -echo "################# Testing Go ##################" -cd $TEST_DIR/.. && go test -timeout 9999s diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index 43082e4d36283..4a4e86efdb26c 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -26,7 +26,6 @@ import ( "strings" "github.com/expanse-org/go-expanse/common" - ) // The ABI holds information about a contract's context and available diff --git a/accounts/abi/bind/backends/nil.go b/accounts/abi/bind/backends/nil.go deleted file mode 100644 index d7ecfd3ea409e..0000000000000 --- a/accounts/abi/bind/backends/nil.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package backends - -import ( - "math/big" - - "github.com/expanse-org/go-expanse/accounts/abi/bind" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" -) - -// This nil assignment ensures compile time that nilBackend implements bind.ContractBackend. -var _ bind.ContractBackend = (*nilBackend)(nil) - -// nilBackend implements bind.ContractBackend, but panics on any method call. -// Its sole purpose is to support the binding tests to construct the generated -// wrappers without calling any methods on them. -type nilBackend struct{} - -func (*nilBackend) ContractCall(common.Address, []byte, bool) ([]byte, error) { - panic("not implemented") -} -func (*nilBackend) EstimateGasLimit(common.Address, *common.Address, *big.Int, []byte) (*big.Int, error) { - panic("not implemented") -} -func (*nilBackend) HasCode(common.Address, bool) (bool, error) { panic("not implemented") } -func (*nilBackend) SuggestGasPrice() (*big.Int, error) { panic("not implemented") } -func (*nilBackend) PendingAccountNonce(common.Address) (uint64, error) { panic("not implemented") } -func (*nilBackend) SendTransaction(*types.Transaction) error { panic("not implemented") } - -// NewNilBackend creates a new binding backend that can be used for instantiation -// but will panic on any invocation. Its sole purpose is to help testing. -func NewNilBackend() bind.ContractBackend { - return new(nilBackend) -} diff --git a/accounts/abi/bind/backends/remote.go b/accounts/abi/bind/backends/remote.go deleted file mode 100644 index 3c6a036103a17..0000000000000 --- a/accounts/abi/bind/backends/remote.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package backends - -import ( - "encoding/json" - "fmt" - "math/big" - "sync" - "sync/atomic" - - "github.com/expanse-org/go-expanse/accounts/abi/bind" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/rlp" - "github.com/expanse-org/go-expanse/rpc" -) - -// This nil assignment ensures compile time that rpcBackend implements bind.ContractBackend. -var _ bind.ContractBackend = (*rpcBackend)(nil) - -// rpcBackend implements bind.ContractBackend, and acts as the data provider to -// Expanse contracts bound to Go structs. It uses an RPC connection to delegate -// all its functionality. -// -// Note: The current implementation is a blocking one. This should be replaced -// by a proper async version when a real RPC client is created. -type rpcBackend struct { - client rpc.Client // RPC client connection to interact with an API server - autoid uint32 // ID number to use for the next API request - lock sync.Mutex // Singleton access until we get to request multiplexing -} - -// NewRPCBackend creates a new binding backend to an RPC provider that can be -// used to interact with remote contracts. -func NewRPCBackend(client rpc.Client) bind.ContractBackend { - return &rpcBackend{ - client: client, - } -} - -// request is a JSON RPC request package assembled internally from the client -// method calls. -type request struct { - JSONRPC string `json:"jsonrpc"` // Version of the JSON RPC protocol, always set to 2.0 - ID int `json:"id"` // Auto incrementing ID number for this request - Method string `json:"method"` // Remote procedure name to invoke on the server - Params []interface{} `json:"params"` // List of parameters to pass through (keep types simple) -} - -// response is a JSON RPC response package sent back from the API server. -type response struct { - JSONRPC string `json:"jsonrpc"` // Version of the JSON RPC protocol, always set to 2.0 - ID int `json:"id"` // Auto incrementing ID number for this request - Error *failure `json:"error"` // Any error returned by the remote side - Result json.RawMessage `json:"result"` // Whatever the remote side sends us in reply -} - -// failure is a JSON RPC response error field sent back from the API server. -type failure struct { - Code int `json:"code"` // JSON RPC error code associated with the failure - Message string `json:"message"` // Specific error message of the failure -} - -// request forwards an API request to the RPC server, and parses the response. -// -// This is currently painfully non-concurrent, but it will have to do until we -// find the time for niceties like this :P -func (b *rpcBackend) request(method string, params []interface{}) (json.RawMessage, error) { - b.lock.Lock() - defer b.lock.Unlock() - - // Ugly hack to serialize an empty list properly - if params == nil { - params = []interface{}{} - } - // Assemble the request object - req := &request{ - JSONRPC: "2.0", - ID: int(atomic.AddUint32(&b.autoid, 1)), - Method: method, - Params: params, - } - if err := b.client.Send(req); err != nil { - return nil, err - } - res := new(response) - if err := b.client.Recv(res); err != nil { - return nil, err - } - if res.Error != nil { - if res.Error.Message == bind.ErrNoCode.Error() { - return nil, bind.ErrNoCode - } - return nil, fmt.Errorf("remote error: %s", res.Error.Message) - } - return res.Result, nil -} - -// HasCode implements ContractVerifier.HasCode by retrieving any code associated -// with the contract from the remote node, and checking its size. -func (b *rpcBackend) HasCode(contract common.Address, pending bool) (bool, error) { - // Execute the RPC code retrieval - block := "latest" - if pending { - block = "pending" - } - res, err := b.request("eth_getCode", []interface{}{contract.Hex(), block}) - if err != nil { - return false, err - } - var hex string - if err := json.Unmarshal(res, &hex); err != nil { - return false, err - } - // Convert the response back to a Go byte slice and return - return len(common.FromHex(hex)) > 0, nil -} - -// ContractCall implements ContractCaller.ContractCall, delegating the execution of -// a contract call to the remote node, returning the reply to for local processing. -func (b *rpcBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) { - // Pack up the request into an RPC argument - args := struct { - To common.Address `json:"to"` - Data string `json:"data"` - }{ - To: contract, - Data: common.ToHex(data), - } - // Execute the RPC call and retrieve the response - block := "latest" - if pending { - block = "pending" - } - res, err := b.request("exp_call", []interface{}{args, block}) - if err != nil { - return nil, err - } - var hex string - if err := json.Unmarshal(res, &hex); err != nil { - return nil, err - } - // Convert the response back to a Go byte slice and return - return common.FromHex(hex), nil -} - -// PendingAccountNonce implements ContractTransactor.PendingAccountNonce, delegating -// the current account nonce retrieval to the remote node. -func (b *rpcBackend) PendingAccountNonce(account common.Address) (uint64, error) { - res, err := b.request("exp_getTransactionCount", []interface{}{account.Hex(), "pending"}) - if err != nil { - return 0, err - } - var hex string - if err := json.Unmarshal(res, &hex); err != nil { - return 0, err - } - nonce, ok := new(big.Int).SetString(hex, 0) - if !ok { - return 0, fmt.Errorf("invalid nonce hex: %s", hex) - } - return nonce.Uint64(), nil -} - -// SuggestGasPrice implements ContractTransactor.SuggestGasPrice, delegating the -// gas price oracle request to the remote node. -func (b *rpcBackend) SuggestGasPrice() (*big.Int, error) { - res, err := b.request("exp_gasPrice", nil) - if err != nil { - return nil, err - } - var hex string - if err := json.Unmarshal(res, &hex); err != nil { - return nil, err - } - price, ok := new(big.Int).SetString(hex, 0) - if !ok { - return nil, fmt.Errorf("invalid price hex: %s", hex) - } - return price, nil -} - -// EstimateGasLimit implements ContractTransactor.EstimateGasLimit, delegating -// the gas estimation to the remote node. -func (b *rpcBackend) EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error) { - // Pack up the request into an RPC argument - args := struct { - From common.Address `json:"from"` - To *common.Address `json:"to"` - Value *rpc.HexNumber `json:"value"` - Data string `json:"data"` - }{ - From: sender, - To: contract, - Data: common.ToHex(data), - Value: rpc.NewHexNumber(value), - } - // Execute the RPC call and retrieve the response - res, err := b.request("exp_estimateGas", []interface{}{args}) - if err != nil { - return nil, err - } - var hex string - if err := json.Unmarshal(res, &hex); err != nil { - return nil, err - } - estimate, ok := new(big.Int).SetString(hex, 0) - if !ok { - return nil, fmt.Errorf("invalid estimate hex: %s", hex) - } - return estimate, nil -} - -// SendTransaction implements ContractTransactor.SendTransaction, delegating the -// raw transaction injection to the remote node. -func (b *rpcBackend) SendTransaction(tx *types.Transaction) error { - data, err := rlp.EncodeToBytes(tx) - if err != nil { - return err - } - res, err := b.request("exp_sendRawTransaction", []interface{}{common.ToHex(data)}) - if err != nil { - return err - } - var hex string - if err := json.Unmarshal(res, &hex); err != nil { - return err - } - return nil -} diff --git a/accounts/account_manager.go b/accounts/account_manager.go deleted file mode 100644 index 2ea2a11e66a23..0000000000000 --- a/accounts/account_manager.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Package accounts implements encrypted storage of secp256k1 private keys. -// -// Keys are stored as encrypted JSON files according to the Web3 Secret Storage specification. -// See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition for more information. -package accounts - -import ( - "crypto/ecdsa" - crand "crypto/rand" - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" -) - -var ( - ErrLocked = errors.New("account is locked") - ErrNoMatch = errors.New("no key for given address or file") - ErrDecrypt = errors.New("could not decrypt key with given passphrase") -) - -// Account represents a stored key. -// When used as an argument, it selects a unique key file to act on. -type Account struct { - Address common.Address // Expanse account address derived from the key - - // File contains the key file name. - // When Acccount is used as an argument to select a key, File can be left blank to - // select just by address or set to the basename or absolute path of a file in the key - // directory. Accounts returned by Manager will always contain an absolute path. - File string -} - -func (acc *Account) MarshalJSON() ([]byte, error) { - return []byte(`"` + acc.Address.Hex() + `"`), nil -} - -func (acc *Account) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &acc.Address) -} - -// Manager manages a key storage directory on disk. -type Manager struct { - cache *addrCache - keyStore keyStore - mu sync.RWMutex - unlocked map[common.Address]*unlocked -} - -type unlocked struct { - *Key - abort chan struct{} -} - -// NewManager creates a manager for the given directory. -func NewManager(keydir string, scryptN, scryptP int) *Manager { - keydir, _ = filepath.Abs(keydir) - am := &Manager{keyStore: &keyStorePassphrase{keydir, scryptN, scryptP}} - am.init(keydir) - return am -} - -// NewPlaintextManager creates a manager for the given directory. -// Deprecated: Use NewManager. -func NewPlaintextManager(keydir string) *Manager { - keydir, _ = filepath.Abs(keydir) - am := &Manager{keyStore: &keyStorePlain{keydir}} - am.init(keydir) - return am -} - -func (am *Manager) init(keydir string) { - am.unlocked = make(map[common.Address]*unlocked) - am.cache = newAddrCache(keydir) - // TODO: In order for this finalizer to work, there must be no references - // to am. addrCache doesn't keep a reference but unlocked keys do, - // so the finalizer will not trigger until all timed unlocks have expired. - runtime.SetFinalizer(am, func(m *Manager) { - m.cache.close() - }) -} - -// HasAddress reports whether a key with the given address is present. -func (am *Manager) HasAddress(addr common.Address) bool { - return am.cache.hasAddress(addr) -} - -// Accounts returns all key files present in the directory. -func (am *Manager) Accounts() []Account { - return am.cache.accounts() -} - -// DeleteAccount deletes the key matched by account if the passphrase is correct. -// If a contains no filename, the address must match a unique key. -func (am *Manager) DeleteAccount(a Account, passphrase string) error { - // Decrypting the key isn't really necessary, but we do - // it anyway to check the password and zero out the key - // immediately afterwards. - a, key, err := am.getDecryptedKey(a, passphrase) - if key != nil { - zeroKey(key.PrivateKey) - } - if err != nil { - return err - } - // The order is crucial here. The key is dropped from the - // cache after the file is gone so that a reload happening in - // between won't insert it into the cache again. - err = os.Remove(a.File) - if err == nil { - am.cache.delete(a) - } - return err -} - -// Sign signs hash with an unlocked private key matching the given address. -func (am *Manager) Sign(addr common.Address, hash []byte) (signature []byte, err error) { - am.mu.RLock() - defer am.mu.RUnlock() - unlockedKey, found := am.unlocked[addr] - if !found { - return nil, ErrLocked - } - return crypto.Sign(hash, unlockedKey.PrivateKey) -} - -// SignWithPassphrase signs hash if the private key matching the given address can be -// decrypted with the given passphrase. -func (am *Manager) SignWithPassphrase(addr common.Address, passphrase string, hash []byte) (signature []byte, err error) { - _, key, err := am.getDecryptedKey(Account{Address: addr}, passphrase) - if err != nil { - return nil, err - } - - defer zeroKey(key.PrivateKey) - return crypto.Sign(hash, key.PrivateKey) -} - -// Unlock unlocks the given account indefinitely. -func (am *Manager) Unlock(a Account, passphrase string) error { - return am.TimedUnlock(a, passphrase, 0) -} - -// Lock removes the private key with the given address from memory. -func (am *Manager) Lock(addr common.Address) error { - am.mu.Lock() - if unl, found := am.unlocked[addr]; found { - am.mu.Unlock() - am.expire(addr, unl, time.Duration(0)*time.Nanosecond) - } else { - am.mu.Unlock() - } - return nil -} - -// TimedUnlock unlocks the given account with the passphrase. The account -// stays unlocked for the duration of timeout. A timeout of 0 unlocks the account -// until the program exits. The account must match a unique key file. -// -// If the account address is already unlocked for a duration, TimedUnlock extends or -// shortens the active unlock timeout. If the address was previously unlocked -// indefinitely the timeout is not altered. -func (am *Manager) TimedUnlock(a Account, passphrase string, timeout time.Duration) error { - a, key, err := am.getDecryptedKey(a, passphrase) - if err != nil { - return err - } - - am.mu.Lock() - defer am.mu.Unlock() - u, found := am.unlocked[a.Address] - if found { - if u.abort == nil { - // The address was unlocked indefinitely, so unlocking - // it with a timeout would be confusing. - zeroKey(key.PrivateKey) - return nil - } else { - // Terminate the expire goroutine and replace it below. - close(u.abort) - } - } - if timeout > 0 { - u = &unlocked{Key: key, abort: make(chan struct{})} - go am.expire(a.Address, u, timeout) - } else { - u = &unlocked{Key: key} - } - am.unlocked[a.Address] = u - return nil -} - -func (am *Manager) getDecryptedKey(a Account, auth string) (Account, *Key, error) { - am.cache.maybeReload() - am.cache.mu.Lock() - a, err := am.cache.find(a) - am.cache.mu.Unlock() - if err != nil { - return a, nil, err - } - key, err := am.keyStore.GetKey(a.Address, a.File, auth) - return a, key, err -} - -func (am *Manager) expire(addr common.Address, u *unlocked, timeout time.Duration) { - t := time.NewTimer(timeout) - defer t.Stop() - select { - case <-u.abort: - // just quit - case <-t.C: - am.mu.Lock() - // only drop if it's still the same key instance that dropLater - // was launched with. we can check that using pointer equality - // because the map stores a new pointer every time the key is - // unlocked. - if am.unlocked[addr] == u { - zeroKey(u.PrivateKey) - delete(am.unlocked, addr) - } - am.mu.Unlock() - } -} - -// NewAccount generates a new key and stores it into the key directory, -// encrypting it with the passphrase. -func (am *Manager) NewAccount(passphrase string) (Account, error) { - _, account, err := storeNewKey(am.keyStore, crand.Reader, passphrase) - if err != nil { - return Account{}, err - } - // Add the account to the cache immediately rather - // than waiting for file system notifications to pick it up. - am.cache.add(account) - return account, nil -} - -// AccountByIndex returns the ith account. -func (am *Manager) AccountByIndex(i int) (Account, error) { - accounts := am.Accounts() - if i < 0 || i >= len(accounts) { - return Account{}, fmt.Errorf("account index %d out of range [0, %d]", i, len(accounts)-1) - } - return accounts[i], nil -} - -// Export exports as a JSON key, encrypted with newPassphrase. -func (am *Manager) Export(a Account, passphrase, newPassphrase string) (keyJSON []byte, err error) { - _, key, err := am.getDecryptedKey(a, passphrase) - if err != nil { - return nil, err - } - var N, P int - if store, ok := am.keyStore.(*keyStorePassphrase); ok { - N, P = store.scryptN, store.scryptP - } else { - N, P = StandardScryptN, StandardScryptP - } - return EncryptKey(key, newPassphrase, N, P) -} - -// Import stores the given encrypted JSON key into the key directory. -func (am *Manager) Import(keyJSON []byte, passphrase, newPassphrase string) (Account, error) { - key, err := DecryptKey(keyJSON, passphrase) - if key != nil && key.PrivateKey != nil { - defer zeroKey(key.PrivateKey) - } - if err != nil { - return Account{}, err - } - return am.importKey(key, newPassphrase) -} - -// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase. -func (am *Manager) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (Account, error) { - key := newKeyFromECDSA(priv) - if am.cache.hasAddress(key.Address) { - return Account{}, fmt.Errorf("account already exists") - } - - return am.importKey(key, passphrase) -} - -func (am *Manager) importKey(key *Key, passphrase string) (Account, error) { - a := Account{Address: key.Address, File: am.keyStore.JoinPath(keyFileName(key.Address))} - if err := am.keyStore.StoreKey(a.File, key, passphrase); err != nil { - return Account{}, err - } - am.cache.add(a) - return a, nil -} - -// Update changes the passphrase of an existing account. -func (am *Manager) Update(a Account, passphrase, newPassphrase string) error { - a, key, err := am.getDecryptedKey(a, passphrase) - if err != nil { - return err - } - return am.keyStore.StoreKey(a.File, key, newPassphrase) -} - -// ImportPreSaleKey decrypts the given Expanse presale wallet and stores -// a key file in the key directory. The key file is encrypted with the same passphrase. -func (am *Manager) ImportPreSaleKey(keyJSON []byte, passphrase string) (Account, error) { - a, _, err := importPreSaleKey(am.keyStore, keyJSON, passphrase) - if err != nil { - return a, err - } - am.cache.add(a) - return a, nil -} - -// zeroKey zeroes a private key in memory. -func zeroKey(k *ecdsa.PrivateKey) { - b := k.D.Bits() - for i := range b { - b[i] = 0 - } -} diff --git a/accounts/addrcache.go b/accounts/addrcache.go deleted file mode 100644 index 2ed774baa3963..0000000000000 --- a/accounts/addrcache.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package accounts - -import ( - "bufio" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -// Minimum amount of time between cache reloads. This limit applies if the platform does -// not support change notifications. It also applies if the keystore directory does not -// exist yet, the code will attempt to create a watcher at most this often. -const minReloadInterval = 2 * time.Second - -type accountsByFile []Account - -func (s accountsByFile) Len() int { return len(s) } -func (s accountsByFile) Less(i, j int) bool { return s[i].File < s[j].File } -func (s accountsByFile) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// AmbiguousAddrError is returned when attempting to unlock -// an address for which more than one file exists. -type AmbiguousAddrError struct { - Addr common.Address - Matches []Account -} - -func (err *AmbiguousAddrError) Error() string { - files := "" - for i, a := range err.Matches { - files += a.File - if i < len(err.Matches)-1 { - files += ", " - } - } - return fmt.Sprintf("multiple keys match address (%s)", files) -} - -// addrCache is a live index of all accounts in the keystore. -type addrCache struct { - keydir string - watcher *watcher - mu sync.Mutex - all accountsByFile - byAddr map[common.Address][]Account - throttle *time.Timer -} - -func newAddrCache(keydir string) *addrCache { - ac := &addrCache{ - keydir: keydir, - byAddr: make(map[common.Address][]Account), - } - ac.watcher = newWatcher(ac) - return ac -} - -func (ac *addrCache) accounts() []Account { - ac.maybeReload() - ac.mu.Lock() - defer ac.mu.Unlock() - cpy := make([]Account, len(ac.all)) - copy(cpy, ac.all) - return cpy -} - -func (ac *addrCache) hasAddress(addr common.Address) bool { - ac.maybeReload() - ac.mu.Lock() - defer ac.mu.Unlock() - return len(ac.byAddr[addr]) > 0 -} - -func (ac *addrCache) add(newAccount Account) { - ac.mu.Lock() - defer ac.mu.Unlock() - - i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].File >= newAccount.File }) - if i < len(ac.all) && ac.all[i] == newAccount { - return - } - // newAccount is not in the cache. - ac.all = append(ac.all, Account{}) - copy(ac.all[i+1:], ac.all[i:]) - ac.all[i] = newAccount - ac.byAddr[newAccount.Address] = append(ac.byAddr[newAccount.Address], newAccount) -} - -// note: removed needs to be unique here (i.e. both File and Address must be set). -func (ac *addrCache) delete(removed Account) { - ac.mu.Lock() - defer ac.mu.Unlock() - ac.all = removeAccount(ac.all, removed) - if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 { - delete(ac.byAddr, removed.Address) - } else { - ac.byAddr[removed.Address] = ba - } -} - -func removeAccount(slice []Account, elem Account) []Account { - for i := range slice { - if slice[i] == elem { - return append(slice[:i], slice[i+1:]...) - } - } - return slice -} - -// find returns the cached account for address if there is a unique match. -// The exact matching rules are explained by the documentation of Account. -// Callers must hold ac.mu. -func (ac *addrCache) find(a Account) (Account, error) { - // Limit search to address candidates if possible. - matches := ac.all - if (a.Address != common.Address{}) { - matches = ac.byAddr[a.Address] - } - if a.File != "" { - // If only the basename is specified, complete the path. - if !strings.ContainsRune(a.File, filepath.Separator) { - a.File = filepath.Join(ac.keydir, a.File) - } - for i := range matches { - if matches[i].File == a.File { - return matches[i], nil - } - } - if (a.Address == common.Address{}) { - return Account{}, ErrNoMatch - } - } - switch len(matches) { - case 1: - return matches[0], nil - case 0: - return Account{}, ErrNoMatch - default: - err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]Account, len(matches))} - copy(err.Matches, matches) - return Account{}, err - } -} - -func (ac *addrCache) maybeReload() { - ac.mu.Lock() - defer ac.mu.Unlock() - if ac.watcher.running { - return // A watcher is running and will keep the cache up-to-date. - } - if ac.throttle == nil { - ac.throttle = time.NewTimer(0) - } else { - select { - case <-ac.throttle.C: - default: - return // The cache was reloaded recently. - } - } - ac.watcher.start() - ac.reload() - ac.throttle.Reset(minReloadInterval) -} - -func (ac *addrCache) close() { - ac.mu.Lock() - ac.watcher.close() - if ac.throttle != nil { - ac.throttle.Stop() - } - ac.mu.Unlock() -} - -// reload caches addresses of existing accounts. -// Callers must hold ac.mu. -func (ac *addrCache) reload() { - accounts, err := ac.scan() - if err != nil && glog.V(logger.Debug) { - glog.Errorf("can't load keys: %v", err) - } - ac.all = accounts - sort.Sort(ac.all) - for k := range ac.byAddr { - delete(ac.byAddr, k) - } - for _, a := range accounts { - ac.byAddr[a.Address] = append(ac.byAddr[a.Address], a) - } - glog.V(logger.Debug).Infof("reloaded keys, cache has %d accounts", len(ac.all)) -} - -func (ac *addrCache) scan() ([]Account, error) { - files, err := ioutil.ReadDir(ac.keydir) - if err != nil { - return nil, err - } - - var ( - buf = new(bufio.Reader) - addrs []Account - keyJSON struct { - Address common.Address `json:"address"` - } - ) - for _, fi := range files { - path := filepath.Join(ac.keydir, fi.Name()) - if skipKeyFile(fi) { - glog.V(logger.Detail).Infof("ignoring file %s", path) - continue - } - fd, err := os.Open(path) - if err != nil { - glog.V(logger.Detail).Infoln(err) - continue - } - buf.Reset(fd) - // Parse the address. - keyJSON.Address = common.Address{} - err = json.NewDecoder(buf).Decode(&keyJSON) - switch { - case err != nil: - glog.V(logger.Debug).Infof("can't decode key %s: %v", path, err) - case (keyJSON.Address == common.Address{}): - glog.V(logger.Debug).Infof("can't decode key %s: missing or zero address", path) - default: - addrs = append(addrs, Account{Address: keyJSON.Address, File: path}) - } - fd.Close() - } - return addrs, err -} - -func skipKeyFile(fi os.FileInfo) bool { - // Skip editor backups and UNIX-style hidden files. - if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") { - return true - } - // Skip misc special files, directories (yes, symlinks too). - if fi.IsDir() || fi.Mode()&os.ModeType != 0 { - return true - } - return false -} diff --git a/accounts/addrcache_test.go b/accounts/addrcache_test.go deleted file mode 100644 index 3bd6ed7e3c7b4..0000000000000 --- a/accounts/addrcache_test.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package accounts - -import ( - "fmt" - "math/rand" - "os" - "path/filepath" - "reflect" - "sort" - "testing" - "time" - - "github.com/cespare/cp" - "github.com/davecgh/go-spew/spew" - "github.com/expanse-org/go-expanse/common" -) - -var ( - cachetestDir, _ = filepath.Abs(filepath.Join("testdata", "keystore")) - cachetestAccounts = []Account{ - { - Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), - File: filepath.Join(cachetestDir, "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), - }, - { - Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"), - File: filepath.Join(cachetestDir, "aaa"), - }, - { - Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"), - File: filepath.Join(cachetestDir, "zzz"), - }, - } -) - -func TestWatchNewFile(t *testing.T) { - t.Parallel() - - dir, am := tmpManager(t, false) - defer os.RemoveAll(dir) - - // Ensure the watcher is started before adding any files. - am.Accounts() - time.Sleep(200 * time.Millisecond) - - // Move in the files. - wantAccounts := make([]Account, len(cachetestAccounts)) - for i := range cachetestAccounts { - a := cachetestAccounts[i] - a.File = filepath.Join(dir, filepath.Base(a.File)) - wantAccounts[i] = a - if err := cp.CopyFile(a.File, cachetestAccounts[i].File); err != nil { - t.Fatal(err) - } - } - - // am should see the accounts. - var list []Account - for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 { - list = am.Accounts() - if reflect.DeepEqual(list, wantAccounts) { - return - } - time.Sleep(d) - } - t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantAccounts)) -} - -func TestWatchNoDir(t *testing.T) { - t.Parallel() - - // Create am but not the directory that it watches. - rand.Seed(time.Now().UnixNano()) - dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) - am := NewManager(dir, LightScryptN, LightScryptP) - - list := am.Accounts() - if len(list) > 0 { - t.Error("initial account list not empty:", list) - } - time.Sleep(100 * time.Millisecond) - - // Create the directory and copy a key file into it. - os.MkdirAll(dir, 0700) - defer os.RemoveAll(dir) - file := filepath.Join(dir, "aaa") - if err := cp.CopyFile(file, cachetestAccounts[0].File); err != nil { - t.Fatal(err) - } - - // am should see the account. - wantAccounts := []Account{cachetestAccounts[0]} - wantAccounts[0].File = file - for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 { - list = am.Accounts() - if reflect.DeepEqual(list, wantAccounts) { - return - } - time.Sleep(d) - } - t.Errorf("\ngot %v\nwant %v", list, wantAccounts) -} - -func TestCacheInitialReload(t *testing.T) { - cache := newAddrCache(cachetestDir) - accounts := cache.accounts() - if !reflect.DeepEqual(accounts, cachetestAccounts) { - t.Fatalf("got initial accounts: %swant %s", spew.Sdump(accounts), spew.Sdump(cachetestAccounts)) - } -} - -func TestCacheAddDeleteOrder(t *testing.T) { - cache := newAddrCache("testdata/no-such-dir") - cache.watcher.running = true // prevent unexpected reloads - - accounts := []Account{ - { - Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), - File: "-309830980", - }, - { - Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"), - File: "ggg", - }, - { - Address: common.HexToAddress("8bda78331c916a08481428e4b07c96d3e916d165"), - File: "zzzzzz-the-very-last-one.keyXXX", - }, - { - Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"), - File: "SOMETHING.key", - }, - { - Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), - File: "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8", - }, - { - Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"), - File: "aaa", - }, - { - Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"), - File: "zzz", - }, - } - for _, a := range accounts { - cache.add(a) - } - // Add some of them twice to check that they don't get reinserted. - cache.add(accounts[0]) - cache.add(accounts[2]) - - // Check that the account list is sorted by filename. - wantAccounts := make([]Account, len(accounts)) - copy(wantAccounts, accounts) - sort.Sort(accountsByFile(wantAccounts)) - list := cache.accounts() - if !reflect.DeepEqual(list, wantAccounts) { - t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accounts), spew.Sdump(wantAccounts)) - } - for _, a := range accounts { - if !cache.hasAddress(a.Address) { - t.Errorf("expected hasAccount(%x) to return true", a.Address) - } - } - if cache.hasAddress(common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e")) { - t.Errorf("expected hasAccount(%x) to return false", common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e")) - } - - // Delete a few keys from the cache. - for i := 0; i < len(accounts); i += 2 { - cache.delete(wantAccounts[i]) - } - cache.delete(Account{Address: common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"), File: "something"}) - - // Check content again after deletion. - wantAccountsAfterDelete := []Account{ - wantAccounts[1], - wantAccounts[3], - wantAccounts[5], - } - list = cache.accounts() - if !reflect.DeepEqual(list, wantAccountsAfterDelete) { - t.Fatalf("got accounts after delete: %s\nwant %s", spew.Sdump(list), spew.Sdump(wantAccountsAfterDelete)) - } - for _, a := range wantAccountsAfterDelete { - if !cache.hasAddress(a.Address) { - t.Errorf("expected hasAccount(%x) to return true", a.Address) - } - } - if cache.hasAddress(wantAccounts[0].Address) { - t.Errorf("expected hasAccount(%x) to return false", wantAccounts[0].Address) - } -} - -func TestCacheFind(t *testing.T) { - dir := filepath.Join("testdata", "dir") - cache := newAddrCache(dir) - cache.watcher.running = true // prevent unexpected reloads - - accounts := []Account{ - { - Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), - File: filepath.Join(dir, "a.key"), - }, - { - Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"), - File: filepath.Join(dir, "b.key"), - }, - { - Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"), - File: filepath.Join(dir, "c.key"), - }, - { - Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"), - File: filepath.Join(dir, "c2.key"), - }, - } - for _, a := range accounts { - cache.add(a) - } - - nomatchAccount := Account{ - Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"), - File: filepath.Join(dir, "something"), - } - tests := []struct { - Query Account - WantResult Account - WantError error - }{ - // by address - {Query: Account{Address: accounts[0].Address}, WantResult: accounts[0]}, - // by file - {Query: Account{File: accounts[0].File}, WantResult: accounts[0]}, - // by basename - {Query: Account{File: filepath.Base(accounts[0].File)}, WantResult: accounts[0]}, - // by file and address - {Query: accounts[0], WantResult: accounts[0]}, - // ambiguous address, tie resolved by file - {Query: accounts[2], WantResult: accounts[2]}, - // ambiguous address error - { - Query: Account{Address: accounts[2].Address}, - WantError: &AmbiguousAddrError{ - Addr: accounts[2].Address, - Matches: []Account{accounts[2], accounts[3]}, - }, - }, - // no match error - {Query: nomatchAccount, WantError: ErrNoMatch}, - {Query: Account{File: nomatchAccount.File}, WantError: ErrNoMatch}, - {Query: Account{File: filepath.Base(nomatchAccount.File)}, WantError: ErrNoMatch}, - {Query: Account{Address: nomatchAccount.Address}, WantError: ErrNoMatch}, - } - for i, test := range tests { - a, err := cache.find(test.Query) - if !reflect.DeepEqual(err, test.WantError) { - t.Errorf("test %d: error mismatch for query %v\ngot %q\nwant %q", i, test.Query, err, test.WantError) - continue - } - if a != test.WantResult { - t.Errorf("test %d: result mismatch for query %v\ngot %v\nwant %v", i, test.Query, a, test.WantResult) - continue - } - } -} diff --git a/accounts/key.go b/accounts/key.go deleted file mode 100644 index 91156194158fb..0000000000000 --- a/accounts/key.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package accounts - -import ( - "bytes" - "crypto/ecdsa" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/crypto/secp256k1" - "github.com/pborman/uuid" -) - -const ( - version = 3 -) - -type Key struct { - Id uuid.UUID // Version 4 "random" for unique id not derived from key data - // to simplify lookups we also store the address - Address common.Address - // we only store privkey as pubkey/address can be derived from it - // privkey in this struct is always in plaintext - PrivateKey *ecdsa.PrivateKey -} - -type keyStore interface { - // Loads and decrypts the key from disk. - GetKey(addr common.Address, filename string, auth string) (*Key, error) - // Writes and encrypts the key. - StoreKey(filename string, k *Key, auth string) error - // Joins filename with the key directory unless it is already absolute. - JoinPath(filename string) string -} - -type plainKeyJSON struct { - Address string `json:"address"` - PrivateKey string `json:"privatekey"` - Id string `json:"id"` - Version int `json:"version"` -} - -type encryptedKeyJSONV3 struct { - Address string `json:"address"` - Crypto cryptoJSON `json:"crypto"` - Id string `json:"id"` - Version int `json:"version"` -} - -type encryptedKeyJSONV1 struct { - Address string `json:"address"` - Crypto cryptoJSON `json:"crypto"` - Id string `json:"id"` - Version string `json:"version"` -} - -type cryptoJSON struct { - Cipher string `json:"cipher"` - CipherText string `json:"ciphertext"` - CipherParams cipherparamsJSON `json:"cipherparams"` - KDF string `json:"kdf"` - KDFParams map[string]interface{} `json:"kdfparams"` - MAC string `json:"mac"` -} - -type cipherparamsJSON struct { - IV string `json:"iv"` -} - -type scryptParamsJSON struct { - N int `json:"n"` - R int `json:"r"` - P int `json:"p"` - DkLen int `json:"dklen"` - Salt string `json:"salt"` -} - -func (k *Key) MarshalJSON() (j []byte, err error) { - jStruct := plainKeyJSON{ - hex.EncodeToString(k.Address[:]), - hex.EncodeToString(crypto.FromECDSA(k.PrivateKey)), - k.Id.String(), - version, - } - j, err = json.Marshal(jStruct) - return j, err -} - -func (k *Key) UnmarshalJSON(j []byte) (err error) { - keyJSON := new(plainKeyJSON) - err = json.Unmarshal(j, &keyJSON) - if err != nil { - return err - } - - u := new(uuid.UUID) - *u = uuid.Parse(keyJSON.Id) - k.Id = *u - addr, err := hex.DecodeString(keyJSON.Address) - if err != nil { - return err - } - - privkey, err := hex.DecodeString(keyJSON.PrivateKey) - if err != nil { - return err - } - - k.Address = common.BytesToAddress(addr) - k.PrivateKey = crypto.ToECDSA(privkey) - - return nil -} - -func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key { - id := uuid.NewRandom() - key := &Key{ - Id: id, - Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), - PrivateKey: privateKeyECDSA, - } - return key -} - -// NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit -// into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we -// retry until the first byte is 0. -func NewKeyForDirectICAP(rand io.Reader) *Key { - randBytes := make([]byte, 64) - _, err := rand.Read(randBytes) - if err != nil { - panic("key generation: could not read from random source: " + err.Error()) - } - reader := bytes.NewReader(randBytes) - privateKeyECDSA, err := ecdsa.GenerateKey(secp256k1.S256(), reader) - if err != nil { - panic("key generation: ecdsa.GenerateKey failed: " + err.Error()) - } - key := newKeyFromECDSA(privateKeyECDSA) - if !strings.HasPrefix(key.Address.Hex(), "0x00") { - return NewKeyForDirectICAP(rand) - } - return key -} - -func newKey(rand io.Reader) (*Key, error) { - privateKeyECDSA, err := ecdsa.GenerateKey(secp256k1.S256(), rand) - if err != nil { - return nil, err - } - return newKeyFromECDSA(privateKeyECDSA), nil -} - -func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, Account, error) { - key, err := newKey(rand) - if err != nil { - return nil, Account{}, err - } - a := Account{Address: key.Address, File: ks.JoinPath(keyFileName(key.Address))} - if err := ks.StoreKey(a.File, key, auth); err != nil { - zeroKey(key.PrivateKey) - return nil, a, err - } - return key, a, err -} - -func writeKeyFile(file string, content []byte) error { - // Create the keystore directory with appropriate permissions - // in case it is not present yet. - const dirPerm = 0700 - if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil { - return err - } - // Atomic write: create a temporary hidden file first - // then move it into place. TempFile assigns mode 0600. - f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp") - if err != nil { - return err - } - if _, err := f.Write(content); err != nil { - f.Close() - os.Remove(f.Name()) - return err - } - f.Close() - return os.Rename(f.Name(), file) -} - -// keyFileName implements the naming convention for keyfiles: -// UTC---
-func keyFileName(keyAddr common.Address) string { - ts := time.Now().UTC() - return fmt.Sprintf("UTC--%s--%s", toISO8601(ts), hex.EncodeToString(keyAddr[:])) -} - -func toISO8601(t time.Time) string { - var tz string - name, offset := t.Zone() - if name == "UTC" { - tz = "Z" - } else { - tz = fmt.Sprintf("%03d00", offset/3600) - } - return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz) -} diff --git a/accounts/key_store_passphrase.go b/accounts/key_store_passphrase.go deleted file mode 100644 index 8c75ad6fb72d1..0000000000000 --- a/accounts/key_store_passphrase.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -/* - -This key store behaves as KeyStorePlain with the difference that -the private key is encrypted and on disk uses another JSON encoding. - -The crypto is documented at https://github.com/expanse-org/wiki/wiki/Web3-Secret-Storage-Definition - -*/ - -package accounts - -import ( - "bytes" - "crypto/aes" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/crypto/randentropy" - "github.com/pborman/uuid" - "golang.org/x/crypto/pbkdf2" - "golang.org/x/crypto/scrypt" -) - -const ( - keyHeaderKDF = "scrypt" - - // n,r,p = 2^18, 8, 1 uses 256MB memory and approx 1s CPU time on a modern CPU. - StandardScryptN = 1 << 18 - StandardScryptP = 1 - - // n,r,p = 2^12, 8, 6 uses 4MB memory and approx 100ms CPU time on a modern CPU. - LightScryptN = 1 << 12 - LightScryptP = 6 - - scryptR = 8 - scryptDKLen = 32 -) - -type keyStorePassphrase struct { - keysDirPath string - scryptN int - scryptP int -} - -func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) { - // Load the key from the keystore and decrypt its contents - keyjson, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - key, err := DecryptKey(keyjson, auth) - if err != nil { - return nil, err - } - // Make sure we're really operating on the requested key (no swap attacks) - if key.Address != addr { - return nil, fmt.Errorf("key content mismatch: have account %x, want %x", key.Address, addr) - } - return key, nil -} - -func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error { - keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP) - if err != nil { - return err - } - return writeKeyFile(filename, keyjson) -} - -func (ks keyStorePassphrase) JoinPath(filename string) string { - if filepath.IsAbs(filename) { - return filename - } else { - return filepath.Join(ks.keysDirPath, filename) - } -} - -// EncryptKey encrypts a key using the specified scrypt parameters into a json -// blob that can be decrypted later on. -func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { - authArray := []byte(auth) - salt := randentropy.GetEntropyCSPRNG(32) - derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen) - if err != nil { - return nil, err - } - encryptKey := derivedKey[:16] - keyBytes := crypto.FromECDSA(key.PrivateKey) - - iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16 - cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv) - if err != nil { - return nil, err - } - mac := crypto.Keccak256(derivedKey[16:32], cipherText) - - scryptParamsJSON := make(map[string]interface{}, 5) - scryptParamsJSON["n"] = scryptN - scryptParamsJSON["r"] = scryptR - scryptParamsJSON["p"] = scryptP - scryptParamsJSON["dklen"] = scryptDKLen - scryptParamsJSON["salt"] = hex.EncodeToString(salt) - - cipherParamsJSON := cipherparamsJSON{ - IV: hex.EncodeToString(iv), - } - - cryptoStruct := cryptoJSON{ - Cipher: "aes-128-ctr", - CipherText: hex.EncodeToString(cipherText), - CipherParams: cipherParamsJSON, - KDF: "scrypt", - KDFParams: scryptParamsJSON, - MAC: hex.EncodeToString(mac), - } - encryptedKeyJSONV3 := encryptedKeyJSONV3{ - hex.EncodeToString(key.Address[:]), - cryptoStruct, - key.Id.String(), - version, - } - return json.Marshal(encryptedKeyJSONV3) -} - -// DecryptKey decrypts a key from a json blob, returning the private key itself. -func DecryptKey(keyjson []byte, auth string) (*Key, error) { - // Parse the json into a simple map to fetch the key version - m := make(map[string]interface{}) - if err := json.Unmarshal(keyjson, &m); err != nil { - return nil, err - } - // Depending on the version try to parse one way or another - var ( - keyBytes, keyId []byte - err error - ) - if version, ok := m["version"].(string); ok && version == "1" { - k := new(encryptedKeyJSONV1) - if err := json.Unmarshal(keyjson, k); err != nil { - return nil, err - } - keyBytes, keyId, err = decryptKeyV1(k, auth) - } else { - k := new(encryptedKeyJSONV3) - if err := json.Unmarshal(keyjson, k); err != nil { - return nil, err - } - keyBytes, keyId, err = decryptKeyV3(k, auth) - } - // Handle any decryption errors and return the key - if err != nil { - return nil, err - } - key := crypto.ToECDSA(keyBytes) - return &Key{ - Id: uuid.UUID(keyId), - Address: crypto.PubkeyToAddress(key.PublicKey), - PrivateKey: key, - }, nil -} - -func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) { - if keyProtected.Version != version { - return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version) - } - - if keyProtected.Crypto.Cipher != "aes-128-ctr" { - return nil, nil, fmt.Errorf("Cipher not supported: %v", keyProtected.Crypto.Cipher) - } - - keyId = uuid.Parse(keyProtected.Id) - mac, err := hex.DecodeString(keyProtected.Crypto.MAC) - if err != nil { - return nil, nil, err - } - - iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV) - if err != nil { - return nil, nil, err - } - - cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText) - if err != nil { - return nil, nil, err - } - - derivedKey, err := getKDFKey(keyProtected.Crypto, auth) - if err != nil { - return nil, nil, err - } - - calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText) - if !bytes.Equal(calculatedMAC, mac) { - return nil, nil, ErrDecrypt - } - - plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv) - if err != nil { - return nil, nil, err - } - return plainText, keyId, err -} - -func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) { - keyId = uuid.Parse(keyProtected.Id) - mac, err := hex.DecodeString(keyProtected.Crypto.MAC) - if err != nil { - return nil, nil, err - } - - iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV) - if err != nil { - return nil, nil, err - } - - cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText) - if err != nil { - return nil, nil, err - } - - derivedKey, err := getKDFKey(keyProtected.Crypto, auth) - if err != nil { - return nil, nil, err - } - - calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText) - if !bytes.Equal(calculatedMAC, mac) { - return nil, nil, ErrDecrypt - } - - plainText, err := aesCBCDecrypt(crypto.Keccak256(derivedKey[:16])[:16], cipherText, iv) - if err != nil { - return nil, nil, err - } - return plainText, keyId, err -} - -func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) { - authArray := []byte(auth) - salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string)) - if err != nil { - return nil, err - } - dkLen := ensureInt(cryptoJSON.KDFParams["dklen"]) - - if cryptoJSON.KDF == "scrypt" { - n := ensureInt(cryptoJSON.KDFParams["n"]) - r := ensureInt(cryptoJSON.KDFParams["r"]) - p := ensureInt(cryptoJSON.KDFParams["p"]) - return scrypt.Key(authArray, salt, n, r, p, dkLen) - - } else if cryptoJSON.KDF == "pbkdf2" { - c := ensureInt(cryptoJSON.KDFParams["c"]) - prf := cryptoJSON.KDFParams["prf"].(string) - if prf != "hmac-sha256" { - return nil, fmt.Errorf("Unsupported PBKDF2 PRF: %s", prf) - } - key := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New) - return key, nil - } - - return nil, fmt.Errorf("Unsupported KDF: %s", cryptoJSON.KDF) -} - -// TODO: can we do without this when unmarshalling dynamic JSON? -// why do integers in KDF params end up as float64 and not int after -// unmarshal? -func ensureInt(x interface{}) int { - res, ok := x.(int) - if !ok { - res = int(x.(float64)) - } - return res -} diff --git a/accounts/key_store_passphrase_test.go b/accounts/key_store_passphrase_test.go deleted file mode 100644 index ebde4745cbcaa..0000000000000 --- a/accounts/key_store_passphrase_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package accounts - -import ( - "io/ioutil" - "testing" - - "github.com/expanse-org/go-expanse/common" -) - -const ( - veryLightScryptN = 2 - veryLightScryptP = 1 -) - -// Tests that a json key file can be decrypted and encrypted in multiple rounds. -func TestKeyEncryptDecrypt(t *testing.T) { - keyjson, err := ioutil.ReadFile("testdata/very-light-scrypt.json") - if err != nil { - t.Fatal(err) - } - password := "" - address := common.HexToAddress("45dea0fb0bba44f4fcf290bba71fd57d7117cbb8") - - // Do a few rounds of decryption and encryption - for i := 0; i < 3; i++ { - // Try a bad password first - if _, err := DecryptKey(keyjson, password+"bad"); err == nil { - t.Errorf("test %d: json key decrypted with bad password", i) - } - // Decrypt with the correct password - key, err := DecryptKey(keyjson, password) - if err != nil { - t.Errorf("test %d: json key failed to decrypt: %v", i, err) - } - if key.Address != address { - t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address) - } - // Recrypt with a new password and start over - password += "new data appended" - if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil { - t.Errorf("test %d: failed to recrypt key %v", i, err) - } - } -} diff --git a/accounts/key_store_plain.go b/accounts/key_store_plain.go deleted file mode 100644 index 09c6531dac309..0000000000000 --- a/accounts/key_store_plain.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package accounts - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - - "github.com/expanse-org/go-expanse/common" -) - -type keyStorePlain struct { - keysDirPath string -} - -func (ks keyStorePlain) GetKey(addr common.Address, filename, auth string) (*Key, error) { - fd, err := os.Open(filename) - if err != nil { - return nil, err - } - defer fd.Close() - key := new(Key) - if err := json.NewDecoder(fd).Decode(key); err != nil { - return nil, err - } - if key.Address != addr { - return nil, fmt.Errorf("key content mismatch: have address %x, want %x", key.Address, addr) - } - return key, nil -} - -func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error { - content, err := json.Marshal(key) - if err != nil { - return err - } - return writeKeyFile(filename, content) -} - -func (ks keyStorePlain) JoinPath(filename string) string { - if filepath.IsAbs(filename) { - return filename - } else { - return filepath.Join(ks.keysDirPath, filename) - } -} diff --git a/accounts/key_store_test.go b/accounts/key_store_test.go deleted file mode 100644 index 0b97f39d8d37b..0000000000000 --- a/accounts/key_store_test.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package accounts - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - -) - -func tmpKeyStore(t *testing.T, encrypted bool) (dir string, ks keyStore) { - d, err := ioutil.TempDir("", "gexp-keystore-test") - if err != nil { - t.Fatal(err) - } - if encrypted { - ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP} - } else { - ks = &keyStorePlain{d} - } - return d, ks -} - -func TestKeyStorePlain(t *testing.T) { - dir, ks := tmpKeyStore(t, false) - defer os.RemoveAll(dir) - - pass := "" // not used but required by API - k1, account, err := storeNewKey(ks, rand.Reader, pass) - if err != nil { - t.Fatal(err) - } - k2, err := ks.GetKey(k1.Address, account.File, pass) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(k1.Address, k2.Address) { - t.Fatal(err) - } - if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) { - t.Fatal(err) - } -} - -func TestKeyStorePassphrase(t *testing.T) { - dir, ks := tmpKeyStore(t, true) - defer os.RemoveAll(dir) - - pass := "foo" - k1, account, err := storeNewKey(ks, rand.Reader, pass) - if err != nil { - t.Fatal(err) - } - k2, err := ks.GetKey(k1.Address, account.File, pass) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(k1.Address, k2.Address) { - t.Fatal(err) - } - if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) { - t.Fatal(err) - } -} - -func TestKeyStorePassphraseDecryptionFail(t *testing.T) { - dir, ks := tmpKeyStore(t, true) - defer os.RemoveAll(dir) - - pass := "foo" - k1, account, err := storeNewKey(ks, rand.Reader, pass) - if err != nil { - t.Fatal(err) - } - if _, err = ks.GetKey(k1.Address, account.File, "bar"); err != ErrDecrypt { - t.Fatalf("wrong error for invalid passphrase\ngot %q\nwant %q", err, ErrDecrypt) - } -} - -func TestImportPreSaleKey(t *testing.T) { - dir, ks := tmpKeyStore(t, true) - defer os.RemoveAll(dir) - - // file content of a presale key file generated with: - // python pyethsaletool.py genwallet - // with password "foo" - fileContent := "{\"encseed\": \"26d87f5f2bf9835f9a47eefae571bc09f9107bb13d54ff12a4ec095d01f83897494cf34f7bed2ed34126ecba9db7b62de56c9d7cd136520a0427bfb11b8954ba7ac39b90d4650d3448e31185affcd74226a68f1e94b1108e6e0a4a91cdd83eba\", \"ethaddr\": \"d4584b5f6229b7be90727b0fc8c6b91bb427821f\", \"email\": \"gustav.simonsson@gmail.com\", \"btcaddr\": \"1EVknXyFC68kKNLkh6YnKzW41svSRoaAcx\"}" - pass := "foo" - account, _, err := importPreSaleKey(ks, []byte(fileContent), pass) - if err != nil { - t.Fatal(err) - } - if account.Address != common.HexToAddress("d4584b5f6229b7be90727b0fc8c6b91bb427821f") { - t.Errorf("imported account has wrong address %x", account.Address) - } - if !strings.HasPrefix(account.File, dir) { - t.Errorf("imported account file not in keystore directory: %q", account.File) - } -} - - -// Test and utils for the key store tests in the Expanse JSON tests; -// testdataKeyStoreTests/basic_tests.json - -type KeyStoreTestV3 struct { - Json encryptedKeyJSONV3 - Password string - Priv string -} - -type KeyStoreTestV1 struct { - Json encryptedKeyJSONV1 - Password string - Priv string -} - -func TestV3_PBKDF2_1(t *testing.T) { - t.Parallel() - tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t) - testDecryptV3(tests["wikipage_test_vector_pbkdf2"], t) -} - -func TestV3_PBKDF2_2(t *testing.T) { - t.Parallel() - tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t) - testDecryptV3(tests["test1"], t) -} - -func TestV3_PBKDF2_3(t *testing.T) { - t.Parallel() - tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t) - testDecryptV3(tests["python_generated_test_with_odd_iv"], t) -} - -func TestV3_PBKDF2_4(t *testing.T) { - t.Parallel() - tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t) - testDecryptV3(tests["evilnonce"], t) -} - -func TestV3_Scrypt_1(t *testing.T) { - t.Parallel() - tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t) - testDecryptV3(tests["wikipage_test_vector_scrypt"], t) -} - -func TestV3_Scrypt_2(t *testing.T) { - t.Parallel() - tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t) - testDecryptV3(tests["test2"], t) -} - -func TestV1_1(t *testing.T) { - t.Parallel() - tests := loadKeyStoreTestV1("testdata/v1_test_vector.json", t) - testDecryptV1(tests["test1"], t) -} - -func TestV1_2(t *testing.T) { - t.Parallel() - ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP} - addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e") - file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e" - k, err := ks.GetKey(addr, file, "g") - if err != nil { - t.Fatal(err) - } - privHex := hex.EncodeToString(crypto.FromECDSA(k.PrivateKey)) - expectedHex := "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d" - if privHex != expectedHex { - t.Fatal(fmt.Errorf("Unexpected privkey: %v, expected %v", privHex, expectedHex)) - } -} - -func testDecryptV3(test KeyStoreTestV3, t *testing.T) { - privBytes, _, err := decryptKeyV3(&test.Json, test.Password) - if err != nil { - t.Fatal(err) - } - privHex := hex.EncodeToString(privBytes) - if test.Priv != privHex { - t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex)) - } -} - -func testDecryptV1(test KeyStoreTestV1, t *testing.T) { - privBytes, _, err := decryptKeyV1(&test.Json, test.Password) - if err != nil { - t.Fatal(err) - } - privHex := hex.EncodeToString(privBytes) - if test.Priv != privHex { - t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex)) - } -} - -func loadKeyStoreTestV3(file string, t *testing.T) map[string]KeyStoreTestV3 { - tests := make(map[string]KeyStoreTestV3) - err := common.LoadJSON(file, &tests) - if err != nil { - t.Fatal(err) - } - return tests -} - -func loadKeyStoreTestV1(file string, t *testing.T) map[string]KeyStoreTestV1 { - tests := make(map[string]KeyStoreTestV1) - err := common.LoadJSON(file, &tests) - if err != nil { - t.Fatal(err) - } - return tests -} - -func TestKeyForDirectICAP(t *testing.T) { - t.Parallel() - key := NewKeyForDirectICAP(rand.Reader) - if !strings.HasPrefix(key.Address.Hex(), "0x00") { - t.Errorf("Expected first address byte to be zero, have: %s", key.Address.Hex()) - } -} diff --git a/accounts/presale.go b/accounts/presale.go deleted file mode 100644 index 304dd6d812836..0000000000000 --- a/accounts/presale.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package accounts - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/expanse-org/go-expanse/crypto" - "github.com/pborman/uuid" - "golang.org/x/crypto/pbkdf2" -) - -// creates a Key and stores that in the given KeyStore by decrypting a presale key JSON -func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (Account, *Key, error) { - key, err := decryptPreSaleKey(keyJSON, password) - if err != nil { - return Account{}, nil, err - } - key.Id = uuid.NewRandom() - a := Account{Address: key.Address, File: keyStore.JoinPath(keyFileName(key.Address))} - err = keyStore.StoreKey(a.File, key, password) - return a, key, err -} - -func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error) { - preSaleKeyStruct := struct { - EncSeed string - EthAddr string - Email string - BtcAddr string - }{} - err = json.Unmarshal(fileContent, &preSaleKeyStruct) - if err != nil { - return nil, err - } - encSeedBytes, err := hex.DecodeString(preSaleKeyStruct.EncSeed) - iv := encSeedBytes[:16] - cipherText := encSeedBytes[16:] - /* - See https://github.com/ethereum/pyethsaletool - - pyethsaletool generates the encryption key from password by - 2000 rounds of PBKDF2 with HMAC-SHA-256 using password as salt (:(). - 16 byte key length within PBKDF2 and resulting key is used as AES key - */ - passBytes := []byte(password) - derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New) - plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv) - if err != nil { - return nil, err - } - ethPriv := crypto.Keccak256(plainText) - ecKey := crypto.ToECDSA(ethPriv) - key = &Key{ - Id: nil, - Address: crypto.PubkeyToAddress(ecKey.PublicKey), - PrivateKey: ecKey, - } - derivedAddr := hex.EncodeToString(key.Address.Bytes()) // needed because .Hex() gives leading "0x" - expectedAddr := preSaleKeyStruct.EthAddr - if derivedAddr != expectedAddr { - err = fmt.Errorf("decrypted addr '%s' not equal to expected addr '%s'", derivedAddr, expectedAddr) - } - return key, err -} - -func aesCTRXOR(key, inText, iv []byte) ([]byte, error) { - // AES-128 is selected due to size of encryptKey. - aesBlock, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - stream := cipher.NewCTR(aesBlock, iv) - outText := make([]byte, len(inText)) - stream.XORKeyStream(outText, inText) - return outText, err -} - -func aesCBCDecrypt(key, cipherText, iv []byte) ([]byte, error) { - aesBlock, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - decrypter := cipher.NewCBCDecrypter(aesBlock, iv) - paddedPlaintext := make([]byte, len(cipherText)) - decrypter.CryptBlocks(paddedPlaintext, cipherText) - plaintext := pkcs7Unpad(paddedPlaintext) - if plaintext == nil { - return nil, ErrDecrypt - } - return plaintext, err -} - -// From https://leanpub.com/gocrypto/read#leanpub-auto-block-cipher-modes -func pkcs7Unpad(in []byte) []byte { - if len(in) == 0 { - return nil - } - - padding := in[len(in)-1] - if int(padding) > len(in) || padding > aes.BlockSize { - return nil - } else if padding == 0 { - return nil - } - - for i := len(in) - 1; i > len(in)-int(padding)-1; i-- { - if in[i] != padding { - return nil - } - } - return in[:len(in)-int(padding)] -} diff --git a/accounts/watch.go b/accounts/watch.go deleted file mode 100644 index efff823262f5f..0000000000000 --- a/accounts/watch.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build darwin,!ios freebsd linux,!arm64 netbsd solaris windows - -package accounts - -import ( - "time" - - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/rjeczalik/notify" -) - -type watcher struct { - ac *addrCache - starting bool - running bool - ev chan notify.EventInfo - quit chan struct{} -} - -func newWatcher(ac *addrCache) *watcher { - return &watcher{ - ac: ac, - ev: make(chan notify.EventInfo, 10), - quit: make(chan struct{}), - } -} - -// starts the watcher loop in the background. -// Start a watcher in the background if that's not already in progress. -// The caller must hold w.ac.mu. -func (w *watcher) start() { - if w.starting || w.running { - return - } - w.starting = true - go w.loop() -} - -func (w *watcher) close() { - close(w.quit) -} - -func (w *watcher) loop() { - defer func() { - w.ac.mu.Lock() - w.running = false - w.starting = false - w.ac.mu.Unlock() - }() - - err := notify.Watch(w.ac.keydir, w.ev, notify.All) - if err != nil { - glog.V(logger.Detail).Infof("can't watch %s: %v", w.ac.keydir, err) - return - } - defer notify.Stop(w.ev) - glog.V(logger.Detail).Infof("now watching %s", w.ac.keydir) - defer glog.V(logger.Detail).Infof("no longer watching %s", w.ac.keydir) - - w.ac.mu.Lock() - w.running = true - w.ac.mu.Unlock() - - // Wait for file system events and reload. - // When an event occurs, the reload call is delayed a bit so that - // multiple events arriving quickly only cause a single reload. - var ( - debounce = time.NewTimer(0) - debounceDuration = 500 * time.Millisecond - inCycle, hadEvent bool - ) - defer debounce.Stop() - for { - select { - case <-w.quit: - return - case <-w.ev: - if !inCycle { - debounce.Reset(debounceDuration) - inCycle = true - } else { - hadEvent = true - } - case <-debounce.C: - w.ac.mu.Lock() - w.ac.reload() - w.ac.mu.Unlock() - if hadEvent { - debounce.Reset(debounceDuration) - inCycle, hadEvent = true, false - } else { - inCycle, hadEvent = false, false - } - } - } -} diff --git a/build/deb.control b/build/deb.control index bbfca560adeec..b0068db2cd56c 100644 --- a/build/deb.control +++ b/build/deb.control @@ -13,7 +13,7 @@ Architecture: any Depends: ${misc:Depends}, {{.ExeList}} Description: Meta-package to install gexp and other tools Meta-package to install gexp and other tools - + {{range .Executables}} Package: {{$.ExeName .}} Conflicts: {{$.ExeConflicts .}} diff --git a/cmd/disasm/main.go b/cmd/disasm/main.go deleted file mode 100644 index 4e70818b3769d..0000000000000 --- a/cmd/disasm/main.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of go-expanse. -// -// go-expanse is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-expanse is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-expanse. If not, see . - -// disasm is a pretty-printer for EVM bytecode. -package main - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/vm" -) - -func main() { - code, err := ioutil.ReadAll(os.Stdin) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - code = common.Hex2Bytes(string(code[:len(code)-1])) - fmt.Printf("%x\n", code) - - for pc := uint64(0); pc < uint64(len(code)); pc++ { - op := vm.OpCode(code[pc]) - fmt.Printf("%-5d %v", pc, op) - - switch op { - case vm.PUSH1, vm.PUSH2, vm.PUSH3, vm.PUSH4, vm.PUSH5, vm.PUSH6, vm.PUSH7, vm.PUSH8, vm.PUSH9, vm.PUSH10, vm.PUSH11, vm.PUSH12, vm.PUSH13, vm.PUSH14, vm.PUSH15, vm.PUSH16, vm.PUSH17, vm.PUSH18, vm.PUSH19, vm.PUSH20, vm.PUSH21, vm.PUSH22, vm.PUSH23, vm.PUSH24, vm.PUSH25, vm.PUSH26, vm.PUSH27, vm.PUSH28, vm.PUSH29, vm.PUSH30, vm.PUSH31, vm.PUSH32: - a := uint64(op) - uint64(vm.PUSH1) + 1 - fmt.Printf(" => %x", code[pc+1:pc+1+a]) - - pc += a - } - fmt.Println() - } -} diff --git a/cmd/ethtest/main.go b/cmd/ethtest/main.go deleted file mode 100644 index 9bf61a2f878e2..0000000000000 --- a/cmd/ethtest/main.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of go-expanse. -// -// go-expanse is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-expanse is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-expanse. If not, see . - -// exptest executes Expanse JSON tests. -package main - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/params" - "github.com/expanse-org/go-expanse/tests" - "gopkg.in/urfave/cli.v1" - -) - -var ( - continueOnError = false - testExtension = ".json" - defaultTest = "all" - defaultDir = "." - allTests = []string{"BlockTests", "StateTests", "TransactionTests", "VMTests", "RLPTests"} - testDirMapping = map[string]string{"BlockTests": "BlockchainTests"} - skipTests = []string{} - - TestFlag = cli.StringFlag{ - Name: "test", - Usage: "Test type (string): VMTests, TransactionTests, StateTests, BlockTests", - Value: defaultTest, - } - FileFlag = cli.StringFlag{ - Name: "file", - Usage: "Test file or directory. Directories are searched for .json files 1 level deep", - Value: defaultDir, - EnvVar: "EXPANSE_TEST_PATH", - } - ContinueOnErrorFlag = cli.BoolFlag{ - Name: "continue", - Usage: "Continue running tests on error (true) or [default] exit immediately (false)", - } - ReadStdInFlag = cli.BoolFlag{ - Name: "stdin", - Usage: "Accept input from stdin instead of reading from file", - } - SkipTestsFlag = cli.StringFlag{ - Name: "skip", - Usage: "Tests names to skip", - } - TraceFlag = cli.BoolFlag{ - Name: "trace", - Usage: "Enable VM tracing", - } -) - -func runTestWithReader(test string, r io.Reader) error { - glog.Infoln("runTest", test) - var err error - switch strings.ToLower(test) { - case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests": - err = tests.RunBlockTestWithReader(params.MainNetHomesteadBlock, params.MainNetDAOForkBlock, r, skipTests) - case "st", "state", "statetest", "statetests": - rs := tests.RuleSet{HomesteadBlock: params.MainNetHomesteadBlock, DAOForkBlock: params.MainNetDAOForkBlock, DAOForkSupport: true} - err = tests.RunStateTestWithReader(rs, r, skipTests) - case "tx", "transactiontest", "transactiontests": - err = tests.RunTransactionTestsWithReader(r, skipTests) - case "vm", "vmtest", "vmtests": - err = tests.RunVmTestWithReader(r, skipTests) - case "rlp", "rlptest", "rlptests": - err = tests.RunRLPTestWithReader(r, skipTests) - default: - err = fmt.Errorf("Invalid test type specified: %v", test) - } - - if err != nil { - return err - } - - return nil -} - -func getFiles(path string) ([]string, error) { - glog.Infoln("getFiles", path) - var files []string - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return nil, err - } - - switch mode := fi.Mode(); { - case mode.IsDir(): - fi, _ := ioutil.ReadDir(path) - files = make([]string, len(fi)) - for i, v := range fi { - // only go 1 depth and leave directory entires blank - if !v.IsDir() && v.Name()[len(v.Name())-len(testExtension):len(v.Name())] == testExtension { - files[i] = filepath.Join(path, v.Name()) - glog.Infoln("Found file", files[i]) - } - } - case mode.IsRegular(): - files = make([]string, 1) - files[0] = path - } - - return files, nil -} - -func runSuite(test, file string) { - var tests []string - - if test == defaultTest { - tests = allTests - } else { - tests = []string{test} - } - - for _, curTest := range tests { - glog.Infoln("runSuite", curTest, file) - var err error - var files []string - if test == defaultTest { - // check if we have an explicit directory mapping for the test - if _, ok := testDirMapping[curTest]; ok { - files, err = getFiles(filepath.Join(file, testDirMapping[curTest])) - } else { - // otherwise assume test name - files, err = getFiles(filepath.Join(file, curTest)) - } - } else { - files, err = getFiles(file) - } - if err != nil { - glog.Fatalln(err) - } - - if len(files) == 0 { - glog.Warningln("No files matched path") - } - for _, curFile := range files { - // Skip blank entries - if len(curFile) == 0 { - continue - } - - r, err := os.Open(curFile) - if err != nil { - glog.Fatalln(err) - } - defer r.Close() - - err = runTestWithReader(curTest, r) - if err != nil { - if continueOnError { - glog.Errorln(err) - } else { - glog.Fatalln(err) - } - } - } - } -} - -func setupApp(c *cli.Context) error { - flagTest := c.GlobalString(TestFlag.Name) - flagFile := c.GlobalString(FileFlag.Name) - continueOnError = c.GlobalBool(ContinueOnErrorFlag.Name) - useStdIn := c.GlobalBool(ReadStdInFlag.Name) - skipTests = strings.Split(c.GlobalString(SkipTestsFlag.Name), " ") - - if !useStdIn { - runSuite(flagTest, flagFile) - } else { - if err := runTestWithReader(flagTest, os.Stdin); err != nil { - glog.Fatalln(err) - } - } - return nil -} - -func main() { - glog.SetToStderr(true) - - app := cli.NewApp() - app.Name = "exptest" - app.Usage = "go-expanse test interface" - app.Action = setupApp - app.Version = "0.2.0" - app.Author = "go-expanse team" - - app.Flags = []cli.Flag{ - TestFlag, - FileFlag, - ContinueOnErrorFlag, - ReadStdInFlag, - SkipTestsFlag, - TraceFlag, - } - - if err := app.Run(os.Args); err != nil { - glog.Fatalln(err) - } - -} diff --git a/cmd/geth/dao_test.go b/cmd/geth/dao_test.go deleted file mode 100644 index 4b2ec259c89bb..0000000000000 --- a/cmd/geth/dao_test.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "io/ioutil" - "math/big" - "os" - "path/filepath" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/params" -) - -// Genesis block for nodes which don't care about the DAO fork (i.e. not configured) -var daoOldGenesis = `{ - "alloc" : {}, - "coinbase" : "0x0000000000000000000000000000000000000000", - "difficulty" : "0x20000", - "extraData" : "", - "gasLimit" : "0x2fefd8", - "nonce" : "0x0000000000000042", - "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp" : "0x00", - "config" : {} -}` - -// Genesis block for nodes which actively oppose the DAO fork -var daoNoForkGenesis = `{ - "alloc" : {}, - "coinbase" : "0x0000000000000000000000000000000000000000", - "difficulty" : "0x20000", - "extraData" : "", - "gasLimit" : "0x2fefd8", - "nonce" : "0x0000000000000042", - "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp" : "0x00", - "config" : { - "daoForkBlock" : 314, - "daoForkSupport" : false - } -}` - -// Genesis block for nodes which actively support the DAO fork -var daoProForkGenesis = `{ - "alloc" : {}, - "coinbase" : "0x0000000000000000000000000000000000000000", - "difficulty" : "0x20000", - "extraData" : "", - "gasLimit" : "0x2fefd8", - "nonce" : "0x0000000000000042", - "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp" : "0x00", - "config" : { - "daoForkBlock" : 314, - "daoForkSupport" : true - } -}` - -var daoGenesisHash = common.HexToHash("5e1fc79cb4ffa4739177b5408045cd5d51c6cf766133f23f7cd72ee1f8d790e0") -var daoGenesisForkBlock = big.NewInt(314) - -// Tests that the DAO hard-fork number and the nodes support/opposition is correctly -// set in the database after various initialization procedures and invocations. -func TestDAODefaultMainnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, "", [][2]bool{{false, false}}, params.MainNetDAOForkBlock, true) -} -func TestDAOSupportMainnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, "", [][2]bool{{true, false}}, params.MainNetDAOForkBlock, true) -} -func TestDAOOpposeMainnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, "", [][2]bool{{false, true}}, params.MainNetDAOForkBlock, false) -} -func TestDAOSwitchToSupportMainnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, "", [][2]bool{{false, true}, {true, false}}, params.MainNetDAOForkBlock, true) -} -func TestDAOSwitchToOpposeMainnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, "", [][2]bool{{true, false}, {false, true}}, params.MainNetDAOForkBlock, false) -} -func TestDAODefaultTestnet(t *testing.T) { - testDAOForkBlockNewChain(t, true, "", [][2]bool{{false, false}}, params.TestNetDAOForkBlock, true) -} -func TestDAOSupportTestnet(t *testing.T) { - testDAOForkBlockNewChain(t, true, "", [][2]bool{{true, false}}, params.TestNetDAOForkBlock, true) -} -func TestDAOOpposeTestnet(t *testing.T) { - testDAOForkBlockNewChain(t, true, "", [][2]bool{{false, true}}, params.TestNetDAOForkBlock, false) -} -func TestDAOSwitchToSupportTestnet(t *testing.T) { - testDAOForkBlockNewChain(t, true, "", [][2]bool{{false, true}, {true, false}}, params.TestNetDAOForkBlock, true) -} -func TestDAOSwitchToOpposeTestnet(t *testing.T) { - testDAOForkBlockNewChain(t, true, "", [][2]bool{{true, false}, {false, true}}, params.TestNetDAOForkBlock, false) -} -func TestDAOInitOldPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{}, nil, false) -} -func TestDAODefaultOldPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{false, false}}, params.MainNetDAOForkBlock, true) -} -func TestDAOSupportOldPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{true, false}}, params.MainNetDAOForkBlock, true) -} -func TestDAOOpposeOldPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{false, true}}, params.MainNetDAOForkBlock, false) -} -func TestDAOSwitchToSupportOldPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{false, true}, {true, false}}, params.MainNetDAOForkBlock, true) -} -func TestDAOSwitchToOpposeOldPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{true, false}, {false, true}}, params.MainNetDAOForkBlock, false) -} -func TestDAOInitNoForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{}, daoGenesisForkBlock, false) -} -func TestDAODefaultNoForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{false, false}}, daoGenesisForkBlock, false) -} -func TestDAOSupportNoForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{true, false}}, daoGenesisForkBlock, true) -} -func TestDAOOpposeNoForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{false, true}}, daoGenesisForkBlock, false) -} -func TestDAOSwitchToSupportNoForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{false, true}, {true, false}}, daoGenesisForkBlock, true) -} -func TestDAOSwitchToOpposeNoForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{true, false}, {false, true}}, daoGenesisForkBlock, false) -} -func TestDAOInitProForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{}, daoGenesisForkBlock, true) -} -func TestDAODefaultProForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{false, false}}, daoGenesisForkBlock, true) -} -func TestDAOSupportProForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{true, false}}, daoGenesisForkBlock, true) -} -func TestDAOOpposeProForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{false, true}}, daoGenesisForkBlock, false) -} -func TestDAOSwitchToSupportProForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{false, true}, {true, false}}, daoGenesisForkBlock, true) -} -func TestDAOSwitchToOpposeProForkPrivnet(t *testing.T) { - testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{true, false}, {false, true}}, daoGenesisForkBlock, false) -} - -func testDAOForkBlockNewChain(t *testing.T, testnet bool, genesis string, votes [][2]bool, expectBlock *big.Int, expectVote bool) { - // Create a temporary data directory to use and inspect later - datadir := tmpdir(t) - defer os.RemoveAll(datadir) - - // Start a instance with the requested flags set and immediately terminate - if genesis != "" { - json := filepath.Join(datadir, "genesis.json") - if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil { - t.Fatalf("failed to write genesis file: %v", err) - } - runGeth(t, "--datadir", datadir, "init", json).cmd.Wait() - } - for _, vote := range votes { - args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir} - if testnet { - args = append(args, "--testnet") - } - if vote[0] { - args = append(args, "--support-dao-fork") - } - if vote[1] { - args = append(args, "--oppose-dao-fork") - } - geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...) - geth.cmd.Wait() - } - // Retrieve the DAO config flag from the database - path := filepath.Join(datadir, "chaindata") - if testnet && genesis == "" { - path = filepath.Join(datadir, "testnet", "chaindata") - } - db, err := ethdb.NewLDBDatabase(path, 0, 0) - if err != nil { - t.Fatalf("failed to open test database: %v", err) - } - defer db.Close() - - genesisHash := common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") - if testnet { - genesisHash = common.HexToHash("0x0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303") - } - if genesis != "" { - genesisHash = daoGenesisHash - } - config, err := core.GetChainConfig(db, genesisHash) - if err != nil { - t.Fatalf("failed to retrieve chain config: %v", err) - } - // Validate the DAO hard-fork block number against the expected value - if config.DAOForkBlock == nil { - if expectBlock != nil { - t.Errorf("dao hard-fork block mismatch: have nil, want %v", expectBlock) - } - } else if expectBlock == nil { - t.Errorf("dao hard-fork block mismatch: have %v, want nil", config.DAOForkBlock) - } else if config.DAOForkBlock.Cmp(expectBlock) != 0 { - t.Errorf("dao hard-fork block mismatch: have %v, want %v", config.DAOForkBlock, expectBlock) - } - if config.DAOForkSupport != expectVote { - t.Errorf("dao hard-fork support mismatch: have %v, want %v", config.DAOForkSupport, expectVote) - } -} diff --git a/cmd/gethrpctest/main.go b/cmd/gethrpctest/main.go deleted file mode 100644 index ce1e55731dc9a..0000000000000 --- a/cmd/gethrpctest/main.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -// gexprpctest is a command to run the external RPC tests. -package main - -import ( - "flag" - "io/ioutil" - "log" - "os" - "os/signal" - - "github.com/expanse-org/go-expanse/accounts" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/eth" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/node" - "github.com/expanse-org/go-expanse/params" - "github.com/expanse-org/go-expanse/tests" - "github.com/expanse-org/go-expanse/whisper" -) - -const defaultTestKey = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" - -var ( - testFile = flag.String("json", "", "Path to the .json test file to load") - testName = flag.String("test", "", "Name of the test from the .json file to run") - testKey = flag.String("key", defaultTestKey, "Private key of a test account to inject") -) - -func main() { - flag.Parse() - - // Enable logging errors, we really do want to see those - glog.SetV(2) - glog.SetToStderr(true) - - // Load the test suite to run the RPC against - tests, err := tests.LoadBlockTests(*testFile) - if err != nil { - log.Fatalf("Failed to load test suite: %v", err) - } - test, found := tests[*testName] - if !found { - log.Fatalf("Requested test (%s) not found within suite", *testName) - } - // Create the protocol stack to run the test with - keydir, err := ioutil.TempDir("", "") - if err != nil { - log.Fatalf("Failed to create temporary keystore directory: %v", err) - } - defer os.RemoveAll(keydir) - - stack, err := MakeSystemNode(keydir, *testKey, test) - if err != nil { - log.Fatalf("Failed to assemble test stack: %v", err) - } - if err := stack.Start(); err != nil { - log.Fatalf("Failed to start test node: %v", err) - } - defer stack.Stop() - - log.Println("Test node started...") - - // Make sure the tests contained within the suite pass - if err := RunTest(stack, test); err != nil { - log.Fatalf("Failed to run the pre-configured test: %v", err) - } - log.Println("Initial test suite passed...") - - quit := make(chan os.Signal, 1) - signal.Notify(quit, os.Interrupt) - <-quit -} - -// MakeSystemNode configures a protocol stack for the RPC tests based on a given -// keystore path and initial pre-state. -func MakeSystemNode(keydir string, privkey string, test *tests.BlockTest) (*node.Node, error) { - // Create a networkless protocol stack - stack, err := node.New(&node.Config{ - IPCPath: node.DefaultIPCEndpoint(), - HTTPHost: common.DefaultHTTPHost, - HTTPPort: common.DefaultHTTPPort, - HTTPModules: []string{"admin", "db", "exp", "debug", "miner", "net", "shh", "txpool", "personal", "web3"}, - WSHost: common.DefaultWSHost, - WSPort: common.DefaultWSPort, - WSModules: []string{"admin", "db", "exp", "debug", "miner", "net", "shh", "txpool", "personal", "web3"}, - NoDiscovery: true, - }) - if err != nil { - return nil, err - } - // Create the keystore and inject an unlocked account if requested - accman := accounts.NewPlaintextManager(keydir) - if len(privkey) > 0 { - key, err := crypto.HexToECDSA(privkey) - if err != nil { - return nil, err - } - a, err := accman.ImportECDSA(key, "") - if err != nil { - return nil, err - } - if err := accman.Unlock(a, ""); err != nil { - return nil, err - } - } - // Initialize and register the Expanse protocol - db, _ := ethdb.NewMemDatabase() - if _, err := test.InsertPreState(db); err != nil { - return nil, err - } - ethConf := &exp.Config{ - TestGenesisState: db, - TestGenesisBlock: test.Genesis, - ChainConfig: &core.ChainConfig{HomesteadBlock: params.MainNetHomesteadBlock}, - AccountManager: accman, - } - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return exp.New(ctx, ethConf) }); err != nil { - return nil, err - } - // Initialize and register the Whisper protocol - if err := stack.Register(func(*node.ServiceContext) (node.Service, error) { return whisper.New(), nil }); err != nil { - return nil, err - } - return stack, nil -} - -// RunTest executes the specified test against an already pre-configured protocol -// stack to ensure basic checks pass before running RPC tests. -func RunTest(stack *node.Node, test *tests.BlockTest) error { - var expanse *exp.Expanse - stack.Service(&expanse) - blockchain := expanse.BlockChain() - - // Process the blocks and verify the imported headers - blocks, err := test.TryBlocksInsert(blockchain) - if err != nil { - return err - } - if err := test.ValidateImportedHeaders(blockchain, blocks); err != nil { - return err - } - // Retrieve the assembled state and validate it - stateDb, err := blockchain.State() - if err != nil { - return err - } - if err := test.ValidatePostState(stateDb); err != nil { - return err - } - return nil -} diff --git a/cmd/gexp/chaincmd.go b/cmd/gexp/chaincmd.go index 6add203c98eaa..56273255182e5 100644 --- a/cmd/gexp/chaincmd.go +++ b/cmd/gexp/chaincmd.go @@ -58,10 +58,10 @@ participating. ArgsUsage: " ( ... ) ", Category: "BLOCKCHAIN COMMANDS", Description: ` -The import command imports blocks from an RLP-encoded form. The form can be one file -with several RLP-encoded blocks, or several files can be used. -If only one file is used, import error will result in failure. If several files are used, -processing will proceed even if an individual RLP-file import failure occurs. +The import command imports blocks from an RLP-encoded form. The form can be one file +with several RLP-encoded blocks, or several files can be used. +If only one file is used, import error will result in failure. If several files are used, +processing will proceed even if an individual RLP-file import failure occurs. `, } exportCommand = cli.Command{ diff --git a/cmd/utils/bootnodes.go b/cmd/utils/bootnodes.go deleted file mode 100644 index 4e379c1de8517..0000000000000 --- a/cmd/utils/bootnodes.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package utils - -import "github.com/expanse-org/go-expanse/p2p/discover" - -// FrontierBootNodes are the enode URLs of the P2P bootstrap nodes running on -// the Frontier network. -var FrontierBootNodes = []*discover.Node{ - // EXP/DEV Go Bootnodes - discover.MustParseNode("enode://7f335a047654f3e70d6f91312a7cf89c39704011f1a584e2698250db3d63817e74b88e26b7854111e16b2c9d0c7173c05419aeee2d0321850227b126d8b1be3f@46.101.156.249:42786"), - discover.MustParseNode("enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786"), - discover.MustParseNode("enode://96d3919b903e7f5ad59ac2f73c43be9172d9d27e2771355db03fd194732b795829a31fe2ea6de109d0804786c39a807e155f065b4b94c6fce167becd0ac02383@45.55.22.34:42786"), - discover.MustParseNode("enode://5f6c625bf287e3c08aad568de42d868781e961cbda805c8397cfb7be97e229419bef9a5a25a75f97632787106bba8a7caf9060fab3887ad2cfbeb182ab0f433f@46.101.182.53:42786"), - discover.MustParseNode("enode://d33a8d4c2c38a08971ed975b750f21d54c927c0bf7415931e214465a8d01651ecffe4401e1db913f398383381413c78105656d665d83f385244ab302d6138414@128.199.183.48:42786"), - discover.MustParseNode("enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786"), - discover.MustParseNode("enode://f6f0d6b9b7d02ec9e8e4a16e38675f3621ea5e69860c739a65c1597ca28aefb3cec7a6d84e471ac927d42a1b64c1cbdefad75e7ce8872d57548ddcece20afdd1@159.203.64.95:42786"), -} - -// TestNetBootNodes are the enode URLs of the P2P bootstrap nodes running on the -// Morden test network. -var TestNetBootNodes = []*discover.Node{ - // ETH/DEV Go Bootnodes - discover.MustParseNode("enode://e4533109cc9bd7604e4ff6c095f7a1d807e15b38e9bfeb05d3b7c423ba86af0a9e89abbf40bd9dde4250fef114cd09270fa4e224cbeef8b7bf05a51e8260d6b8@94.242.229.4:40404"), - discover.MustParseNode("enode://8c336ee6f03e99613ad21274f269479bf4413fb294d697ef15ab897598afb931f56beb8e97af530aee20ce2bcba5776f4a312bc168545de4d43736992c814592@94.242.229.203:30303"), - - // ETH/DEV Cpp Bootnodes -} diff --git a/cmd/utils/client.go b/cmd/utils/client.go deleted file mode 100644 index ffcd5bedb54a6..0000000000000 --- a/cmd/utils/client.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package utils - -import ( - "fmt" - "strings" - - "github.com/expanse-org/go-expanse/node" - "github.com/expanse-org/go-expanse/rpc" - "gopkg.in/urfave/cli.v1" -) - -// NewRemoteRPCClient returns a RPC client which connects to a running gexp instance. -// Depending on the given context this can either be a IPC or a HTTP client. -func NewRemoteRPCClient(ctx *cli.Context) (rpc.Client, error) { - if ctx.Args().Present() { - endpoint := ctx.Args().First() - return NewRemoteRPCClientFromString(endpoint) - } - // use IPC by default - return rpc.NewIPCClient(node.DefaultIPCEndpoint()) -} - -// NewRemoteRPCClientFromString returns a RPC client which connects to the given -// endpoint. It must start with either `ipc:` or `rpc:` (HTTP). -func NewRemoteRPCClientFromString(endpoint string) (rpc.Client, error) { - if strings.HasPrefix(endpoint, "ipc:") { - return rpc.NewIPCClient(endpoint[4:]) - } - if strings.HasPrefix(endpoint, "rpc:") { - return rpc.NewHTTPClient(endpoint[4:]) - } - if strings.HasPrefix(endpoint, "http://") { - return rpc.NewHTTPClient(endpoint) - } - if strings.HasPrefix(endpoint, "ws:") { - return rpc.NewWSClient(endpoint) - } - return nil, fmt.Errorf("invalid endpoint") -} diff --git a/common/httpclient/httpclient.go b/common/httpclient/httpclient.go deleted file mode 100644 index 338d6fee35c0e..0000000000000 --- a/common/httpclient/httpclient.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package httpclient - -import ( - "fmt" - "io/ioutil" - "net/http" - "path/filepath" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" -) - -type HTTPClient struct { - *http.Transport - DocRoot string - schemes []string -} - -func New(docRoot string) (self *HTTPClient) { - self = &HTTPClient{ - Transport: &http.Transport{}, - DocRoot: docRoot, - schemes: []string{"file"}, - } - self.RegisterProtocol("file", http.NewFileTransport(http.Dir(self.DocRoot))) - return -} - -// Clients should be reused instead of created as needed. Clients are safe for concurrent use by multiple goroutines. - -// A Client is higher-level than a RoundTripper (such as Transport) and additionally handles HTTP details such as cookies and redirects. - -func (self *HTTPClient) Client() *http.Client { - return &http.Client{ - Transport: self, - } -} - -func (self *HTTPClient) RegisterScheme(scheme string, rt http.RoundTripper) { - self.schemes = append(self.schemes, scheme) - self.RegisterProtocol(scheme, rt) -} - -func (self *HTTPClient) HasScheme(scheme string) bool { - for _, s := range self.schemes { - if s == scheme { - return true - } - } - return false -} - -func (self *HTTPClient) GetAuthContent(uri string, hash common.Hash) ([]byte, error) { - // retrieve content - content, err := self.Get(uri, "") - if err != nil { - return nil, err - } - - // check hash to authenticate content - chash := crypto.Keccak256Hash(content) - if chash != hash { - return nil, fmt.Errorf("content hash mismatch %x != %x (exp)", hash[:], chash[:]) - } - - return content, nil - -} - -// Get(uri, path) downloads the document at uri, if path is non-empty it -// is interpreted as a filepath to which the contents are saved -func (self *HTTPClient) Get(uri, path string) ([]byte, error) { - // retrieve content - resp, err := self.Client().Get(uri) - if err != nil { - return nil, err - } - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - var content []byte - content, err = ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode/100 != 2 { - return content, fmt.Errorf("HTTP error: %s", resp.Status) - } - - if path != "" { - var abspath string - abspath, err = filepath.Abs(path) - if err != nil { - return nil, err - } - err = ioutil.WriteFile(abspath, content, 0600) - if err != nil { - return nil, err - } - } - - return content, nil - -} diff --git a/common/httpclient/httpclient_test.go b/common/httpclient/httpclient_test.go deleted file mode 100644 index b28355d38d483..0000000000000 --- a/common/httpclient/httpclient_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package httpclient - -import ( - "io/ioutil" - "net/http" - "os" - "path" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" -) - -func TestGetAuthContent(t *testing.T) { - dir, err := ioutil.TempDir("", "httpclient-test") - if err != nil { - t.Fatal("cannot create temporary directory:", err) - } - defer os.RemoveAll(dir) - client := New(dir) - - text := "test" - hash := crypto.Keccak256Hash([]byte(text)) - if err := ioutil.WriteFile(path.Join(dir, "test.content"), []byte(text), os.ModePerm); err != nil { - t.Fatal("could not write test file", err) - } - content, err := client.GetAuthContent("file:///test.content", hash) - if err != nil { - t.Errorf("no error expected, got %v", err) - } - if string(content) != text { - t.Errorf("incorrect content. expected %v, got %v", text, string(content)) - } - - hash = common.Hash{} - content, err = client.GetAuthContent("file:///test.content", hash) - expected := "content hash mismatch 0000000000000000000000000000000000000000000000000000000000000000 != 9c22ff5f21f0b81b113e63f7db6da94fedef11b2119b4088b89664fb9a3cb658 (exp)" - if err == nil { - t.Errorf("expected error, got nothing") - } else { - if err.Error() != expected { - t.Errorf("expected error '%s' got '%v'", expected, err) - } - } - -} - -type rt struct{} - -func (rt) RoundTrip(req *http.Request) (resp *http.Response, err error) { return } - -func TestRegisterScheme(t *testing.T) { - client := New("/tmp/") - if client.HasScheme("scheme") { - t.Errorf("expected scheme not to be registered") - } - client.RegisterScheme("scheme", rt{}) - if !client.HasScheme("scheme") { - t.Errorf("expected scheme to be registered") - } -} diff --git a/common/math/dist.go b/common/math/dist.go deleted file mode 100644 index 802e0f3ea1a57..0000000000000 --- a/common/math/dist.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package math - -import ( - "math/big" - "sort" - - "github.com/expanse-org/go-expanse/common" -) - -type Summer interface { - Sum(i int) *big.Int - Len() int -} - -func Sum(slice Summer) (sum *big.Int) { - sum = new(big.Int) - - for i := 0; i < slice.Len(); i++ { - sum.Add(sum, slice.Sum(i)) - } - return -} - -type Vector struct { - Gas, Price *big.Int -} - -type VectorsBy func(v1, v2 Vector) bool - -func (self VectorsBy) Sort(vectors []Vector) { - bs := vectorSorter{ - vectors: vectors, - by: self, - } - sort.Sort(bs) -} - -type vectorSorter struct { - vectors []Vector - by func(v1, v2 Vector) bool -} - -func (v vectorSorter) Len() int { return len(v.vectors) } -func (v vectorSorter) Less(i, j int) bool { return v.by(v.vectors[i], v.vectors[j]) } -func (v vectorSorter) Swap(i, j int) { v.vectors[i], v.vectors[j] = v.vectors[j], v.vectors[i] } - -func PriceSort(v1, v2 Vector) bool { return v1.Price.Cmp(v2.Price) < 0 } -func GasSort(v1, v2 Vector) bool { return v1.Gas.Cmp(v2.Gas) < 0 } - -type vectorSummer struct { - vectors []Vector - by func(v Vector) *big.Int -} - -type VectorSum func(v Vector) *big.Int - -func (v VectorSum) Sum(vectors []Vector) *big.Int { - vs := vectorSummer{ - vectors: vectors, - by: v, - } - return Sum(vs) -} - -func (v vectorSummer) Len() int { return len(v.vectors) } -func (v vectorSummer) Sum(i int) *big.Int { return v.by(v.vectors[i]) } - -func GasSum(v Vector) *big.Int { return v.Gas } - -var etherInWei = new(big.Rat).SetInt(common.String2Big("1000000000000000000")) - -func GasPrice(bp, gl, ep *big.Int) *big.Int { - BP := new(big.Rat).SetInt(bp) - GL := new(big.Rat).SetInt(gl) - EP := new(big.Rat).SetInt(ep) - GP := new(big.Rat).Quo(BP, GL) - GP = GP.Quo(GP, EP) - - return GP.Mul(GP, etherInWei).Num() -} diff --git a/common/natspec/natspec.go b/common/natspec/natspec.go deleted file mode 100644 index ddd10b253dd27..0000000000000 --- a/common/natspec/natspec.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// +build ignore - -package natspec - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/common/httpclient" - "github.com/expanse-org/go-expanse/common/registrar" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/xeth" - - "github.com/robertkrimen/otto" -) - -type abi2method map[[8]byte]*method - -type NatSpec struct { - jsvm *otto.Otto - abiDocJson []byte - userDoc userDoc - tx, data string -} - -// main entry point for to get natspec notice for a transaction -// the implementation is frontend friendly in that it always gives back -// a notice that is safe to display -// :FIXME: the second return value is an error, which can be used to fine-tune bahaviour -func GetNotice(xeth *xexp.XEth, tx string, http *httpclient.HTTPClient) (notice string) { - ns, err := New(xeth, tx, http) - if err != nil { - if ns == nil { - return getFallbackNotice(fmt.Sprintf("no NatSpec info found for contract: %v", err), tx) - } else { - return getFallbackNotice(fmt.Sprintf("invalid NatSpec info: %v", err), tx) - } - } - - notice, err = ns.Notice() - if err != nil { - return getFallbackNotice(fmt.Sprintf("NatSpec notice error: %v", err), tx) - } - - return -} - -func getFallbackNotice(comment, tx string) string { - return fmt.Sprintf("About to submit transaction (%s): %s", comment, tx) -} - -type transaction struct { - To string `json:"to"` - Data string `json:"data"` -} - -type jsonTx struct { - Params []transaction `json:"params"` -} - -type contractInfo struct { - Source string `json:"source"` - Language string `json:"language"` - Version string `json:"compilerVersion"` - AbiDefinition json.RawMessage `json:"abiDefinition"` - UserDoc userDoc `json:"userDoc"` - DeveloperDoc json.RawMessage `json:"developerDoc"` -} - -func New(xeth *xexp.XEth, jsontx string, http *httpclient.HTTPClient) (self *NatSpec, err error) { - - // extract contract address from tx - var tx jsonTx - err = json.Unmarshal([]byte(jsontx), &tx) - if err != nil { - return - } - t := tx.Params[0] - contractAddress := t.To - - content, err := FetchDocsForContract(contractAddress, xeth, http) - if err != nil { - return - } - - self, err = NewWithDocs(content, jsontx, t.Data) - return -} - -// also called by admin.contractInfo.get -func FetchDocsForContract(contractAddress string, xeth *xexp.XEth, client *httpclient.HTTPClient) (content []byte, err error) { - // retrieve contract hash from state - codehex := xexp.CodeAt(contractAddress) - codeb := xexp.CodeAtBytes(contractAddress) - - if codehex == "0x" { - err = fmt.Errorf("contract (%v) not found", contractAddress) - return - } - codehash := common.BytesToHash(crypto.Keccak256(codeb)) - // set up nameresolver with natspecreg + urlhint contract addresses - reg := registrar.New(xeth) - - // resolve host via HashReg/UrlHint Resolver - hash, err := reg.HashToHash(codehash) - if err != nil { - return - } - if client.HasScheme("bzz") { - content, err = client.Get("bzz://"+hash.Hex()[2:], "") - if err == nil { // non-fatal - return - } - err = nil - //falling back to urlhint - } - - uri, err := reg.HashToUrl(hash) - if err != nil { - return - } - - // get content via http client and authenticate content using hash - content, err = client.GetAuthContent(uri, hash) - if err != nil { - return - } - return -} - -func NewWithDocs(infoDoc []byte, tx string, data string) (self *NatSpec, err error) { - - var contract contractInfo - err = json.Unmarshal(infoDoc, &contract) - if err != nil { - return - } - - self = &NatSpec{ - jsvm: otto.New(), - abiDocJson: []byte(contract.AbiDefinition), - userDoc: contract.UserDoc, - tx: tx, - data: data, - } - - // load and require natspec js (but it is meant to be protected environment) - _, err = self.jsvm.Run(natspecJS) - if err != nil { - return - } - _, err = self.jsvm.Run("var natspec = require('natspec');") - return -} - -// type abiDoc []method - -// type method struct { -// Name string `json:name` -// Inputs []input `json:inputs` -// abiKey [8]byte -// } - -// type input struct { -// Name string `json:name` -// Type string `json:type` -// } - -// json skeleton for abi doc (contract method definitions) -type method struct { - Notice string `json:notice` - name string -} - -type userDoc struct { - Methods map[string]*method `json:methods` -} - -func (self *NatSpec) makeAbi2method(abiKey [8]byte) (meth *method) { - for signature, m := range self.userDoc.Methods { - name := strings.Split(signature, "(")[0] - hash := []byte(common.Bytes2Hex(crypto.Keccak256([]byte(signature)))) - var key [8]byte - copy(key[:], hash[:8]) - if bytes.Equal(key[:], abiKey[:]) { - meth = m - mexp.name = name - return - } - } - return -} - -func (self *NatSpec) Notice() (notice string, err error) { - var abiKey [8]byte - if len(self.data) < 10 { - err = fmt.Errorf("Invalid transaction data") - return - } - copy(abiKey[:], self.data[2:10]) - meth := self.makeAbi2method(abiKey) - - if meth == nil { - err = fmt.Errorf("abi key does not match any method") - return - } - notice, err = self.noticeForMethod(self.tx, mexp.name, mexp.Notice) - return -} - -func (self *NatSpec) noticeForMethod(tx string, name, expression string) (notice string, err error) { - - if _, err = self.jsvm.Run("var transaction = " + tx + ";"); err != nil { - return "", fmt.Errorf("natspec.js error setting transaction: %v", err) - } - - if _, err = self.jsvm.Run("var abi = " + string(self.abiDocJson) + ";"); err != nil { - return "", fmt.Errorf("natspec.js error setting abi: %v", err) - } - - if _, err = self.jsvm.Run("var method = '" + name + "';"); err != nil { - return "", fmt.Errorf("natspec.js error setting method: %v", err) - } - - if _, err = self.jsvm.Run("var expression = \"" + expression + "\";"); err != nil { - return "", fmt.Errorf("natspec.js error setting expression: %v", err) - } - - self.jsvm.Run("var call = {method: method,abi: abi,transaction: transaction};") - value, err := self.jsvm.Run("natspec.evaluateExpression(expression, call);") - if err != nil { - return "", fmt.Errorf("natspec.js error evaluating expression: %v", err) - } - evalError := "Natspec evaluation failed, wrong input params" - if value.String() == evalError { - return "", fmt.Errorf("natspec.js error evaluating expression: wrong input params in expression '%s'", expression) - } - if len(value.String()) == 0 { - return "", fmt.Errorf("natspec.js error evaluating expression") - } - - return value.String(), nil - -} diff --git a/common/natspec/natspec_e2e_test.go b/common/natspec/natspec_e2e_test.go deleted file mode 100644 index 22dfe7b273fb6..0000000000000 --- a/common/natspec/natspec_e2e_test.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// +build ignore - -package natspec - -import ( - "fmt" - "io/ioutil" - "math/big" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/expanse-org/go-expanse/accounts" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/common/httpclient" - "github.com/expanse-org/go-expanse/common/registrar" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/eth" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/node" - xe "github.com/expanse-org/go-expanse/xeth" -) - -const ( - testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - testBalance = "10000000000000000000" - testKey = "e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674" - - testFileName = "long_file_name_for_testing_registration_of_URLs_longer_than_32_bytes.content" - - testNotice = "Register key `utils.toHex(_key)` <- content `utils.toHex(_content)`" - - testExpNotice = "Register key 0xadd1a7d961cff0242089674ec2ef6fca671ab15e1fe80e38859fc815b98d88ab <- content 0xb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7" - - testExpNotice2 = `About to submit transaction (NatSpec notice error: abi key does not match any method): {"params":[{"to":"%s","data": "0x31e12c20"}]}` - - testExpNotice3 = `About to submit transaction (no NatSpec info found for contract: HashToHash: content hash not found for '0x1392c62d05b2d149e22a339c531157ae06b44d39a674cce500064b12b9aeb019'): {"params":[{"to":"%s","data": "0x300a3bbfb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000066696c653a2f2f2f746573742e636f6e74656e74"}]}` -) - -const ( - testUserDoc = ` -{ - "methods": { - "register(uint256,uint256)": { - "notice": "` + testNotice + `" - } - }, - "invariants": [ - { "notice": "" } - ], - "construction": [ - { "notice": "" } - ] -} -` - testAbiDefinition = ` -[{ - "name": "register", - "constant": false, - "type": "function", - "inputs": [{ - "name": "_key", - "type": "uint256" - }, { - "name": "_content", - "type": "uint256" - }], - "outputs": [] -}] -` - - testContractInfo = ` -{ - "userDoc": ` + testUserDoc + `, - "abiDefinition": ` + testAbiDefinition + ` -} -` -) - -type testFrontend struct { - t *testing.T - expanse *exp.Expanse - xeth *xe.XEth - wait chan *big.Int - lastConfirm string - wantNatSpec bool -} - -func (self *testFrontend) AskPassword() (string, bool) { - return "", true -} - -func (self *testFrontend) UnlockAccount(acc []byte) bool { - self.expanse.AccountManager().Unlock(common.BytesToAddress(acc), "password") - return true -} - -func (self *testFrontend) ConfirmTransaction(tx string) bool { - if self.wantNatSpec { - client := httpclient.New("/tmp/") - self.lastConfirm = GetNotice(self.xeth, tx, client) - } - return true -} - -func testExp(t *testing.T) (expanse *exp.Expanse, err error) { - - tmp, err := ioutil.TempDir("", "natspec-test") - if err != nil { - t.Fatal(err) - } - - db, _ := ethdb.NewMemDatabase() - addr := common.HexToAddress(testAddress) - core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr, common.String2Big(testBalance)}) - ks := crypto.NewKeyStorePassphrase(filepath.Join(tmp, "keystore"), crypto.LightScryptN, crypto.LightScryptP) - am := accounts.NewManager(ks) - keyb, err := crypto.HexToECDSA(testKey) - if err != nil { - t.Fatal(err) - } - key := crypto.NewKeyFromECDSA(keyb) - err = ks.StoreKey(key, "") - if err != nil { - t.Fatal(err) - } - - err = am.Unlock(key.Address, "") - if err != nil { - t.Fatal(err) - } - - // only use minimalistic stack with no networking - return exp.New(&node.ServiceContext{EventMux: new(event.TypeMux)}, &exp.Config{ - AccountManager: am, - Etherbase: common.HexToAddress(testAddress), - PowTest: true, - TestGenesisState: db, - GpoMinGasPrice: common.Big1, - GpobaseCorrectionFactor: 1, - GpoMaxGasPrice: common.Big1, - }) -} - -func testInit(t *testing.T) (self *testFrontend) { - // initialise and start minimal expanse stack - expanse, err := testExp(t) - if err != nil { - t.Errorf("error creating expanse: %v", err) - return - } - err = expanse.Start(nil) - if err != nil { - t.Errorf("error starting expanse: %v", err) - return - } - - // mock frontend - self = &testFrontend{t: t, ethereum: ethereum} - self.xeth = xe.New(nil, self) - self.wait = self.xexp.UpdateState() - addr, _ := self.expanse.Etherbase() - - // initialise the registry contracts - reg := registrar.New(self.xeth) - registrar.GlobalRegistrarAddr = "0x0" - - var txG, txH, txU string - txG, err = reg.SetGlobalRegistrar("", addr) - if err != nil { - t.Fatalf("error creating GlobalRegistrar: %v", err) - } - if !processTxs(self, t, 1) { - t.Fatalf("error mining txs") - } - recG := self.xexp.GetTxReceipt(common.HexToHash(txG)) - if recG == nil { - t.Fatalf("blockchain error creating GlobalRegistrar") - } - registrar.GlobalRegistrarAddr = recG.ContractAddress.Hex() - - txH, err = reg.SetHashReg("", addr) - if err != nil { - t.Errorf("error creating HashReg: %v", err) - } - if !processTxs(self, t, 1) { - t.Errorf("error mining txs") - } - recH := self.xexp.GetTxReceipt(common.HexToHash(txH)) - if recH == nil { - t.Fatalf("blockchain error creating HashReg") - } - registrar.HashRegAddr = recH.ContractAddress.Hex() - - txU, err = reg.SetUrlHint("", addr) - if err != nil { - t.Errorf("error creating UrlHint: %v", err) - } - if !processTxs(self, t, 1) { - t.Errorf("error mining txs") - } - recU := self.xexp.GetTxReceipt(common.HexToHash(txU)) - if recU == nil { - t.Fatalf("blockchain error creating UrlHint") - } - registrar.UrlHintAddr = recU.ContractAddress.Hex() - - return -} - -// end to end test -func TestNatspecE2E(t *testing.T) { - t.Skip() - - tf := testInit(t) - defer tf.expanse.Stop() - addr, _ := tf.expanse.Etherbase() - - // create a contractInfo file (mock cloud-deployed contract metadocs) - // incidentally this is the info for the HashReg contract itself - ioutil.WriteFile("/tmp/"+testFileName, []byte(testContractInfo), os.ModePerm) - dochash := crypto.Keccak256Hash([]byte(testContractInfo)) - - // take the codehash for the contract we wanna test - codeb := tf.xexp.CodeAtBytes(registrar.HashRegAddr) - codehash := crypto.Keccak256Hash(codeb) - - reg := registrar.New(tf.xeth) - _, err := reg.SetHashToHash(addr, codehash, dochash) - if err != nil { - t.Errorf("error registering: %v", err) - } - _, err = reg.SetUrlToHash(addr, dochash, "file:///"+testFileName) - if err != nil { - t.Errorf("error registering: %v", err) - } - if !processTxs(tf, t, 5) { - return - } - - // NatSpec info for register method of HashReg contract installed - // now using the same transactions to check confirm messages - - tf.wantNatSpec = true // this is set so now the backend uses natspec confirmation - _, err = reg.SetHashToHash(addr, codehash, dochash) - if err != nil { - t.Errorf("error calling contract registry: %v", err) - } - - fmt.Printf("GlobalRegistrar: %v, HashReg: %v, UrlHint: %v\n", registrar.GlobalRegistrarAddr, registrar.HashRegAddr, registrar.UrlHintAddr) - if tf.lastConfirm != testExpNotice { - t.Errorf("Wrong confirm message. expected\n'%v', got\n'%v'", testExpNotice, tf.lastConfirm) - } - - // test unknown method - exp := fmt.Sprintf(testExpNotice2, registrar.HashRegAddr) - _, err = reg.SetOwner(addr) - if err != nil { - t.Errorf("error setting owner: %v", err) - } - - if tf.lastConfirm != exp { - t.Errorf("Wrong confirm message, expected\n'%v', got\n'%v'", exp, tf.lastConfirm) - } - - // test unknown contract - exp = fmt.Sprintf(testExpNotice3, registrar.UrlHintAddr) - - _, err = reg.SetUrlToHash(addr, dochash, "file:///test.content") - if err != nil { - t.Errorf("error registering: %v", err) - } - - if tf.lastConfirm != exp { - t.Errorf("Wrong confirm message, expected '%v', got '%v'", exp, tf.lastConfirm) - } - -} - -func pendingTransactions(repl *testFrontend, t *testing.T) (txc int64, err error) { - txs := repl.expanse.TxPool().GetTransactions() - return int64(len(txs)), nil -} - -func processTxs(repl *testFrontend, t *testing.T, expTxc int) bool { - var txc int64 - var err error - for i := 0; i < 50; i++ { - txc, err = pendingTransactions(repl, t) - if err != nil { - t.Errorf("unexpected error checking pending transactions: %v", err) - return false - } - if expTxc < int(txc) { - t.Errorf("too many pending transactions: expected %v, got %v", expTxc, txc) - return false - } else if expTxc == int(txc) { - break - } - time.Sleep(100 * time.Millisecond) - } - if int(txc) != expTxc { - t.Errorf("incorrect number of pending transactions, expected %v, got %v", expTxc, txc) - return false - } - - - err = repl.expanse.StartMining(runtime.NumCPU(), "") - if err != nil { - t.Errorf("unexpected error mining: %v", err) - return false - } - defer repl.expanse.StopMining() - - timer := time.NewTimer(100 * time.Second) - height := new(big.Int).Add(repl.xexp.CurrentBlock().Number(), big.NewInt(1)) - repl.wait <- height - select { - case <-timer.C: - // if times out make sure the xeth loop does not block - go func() { - select { - case repl.wait <- nil: - case <-repl.wait: - } - }() - case <-repl.wait: - } - txc, err = pendingTransactions(repl, t) - if err != nil { - t.Errorf("unexpected error checking pending transactions: %v", err) - return false - } - if txc != 0 { - t.Errorf("%d trasactions were not mined", txc) - return false - } - return true -} diff --git a/common/natspec/natspec_e2e_test.go.orig b/common/natspec/natspec_e2e_test.go.orig deleted file mode 100644 index 47969a7f8a730..0000000000000 --- a/common/natspec/natspec_e2e_test.go.orig +++ /dev/null @@ -1,253 +0,0 @@ -package natspec - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/expanse-org/go-expanse/accounts" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/common/docserver" - "github.com/expanse-org/go-expanse/common/registrar" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/exp" - xe "github.com/expanse-org/go-expanse/xeth" -) - -const ( - testBalance = "10000000000000000000" - - testFileName = "long_file_name_for_testing_registration_of_URLs_longer_than_32_bytes.content" - - testNotice = "Register key `utils.toHex(_key)` <- content `utils.toHex(_content)`" - - testExpNotice = "Register key 0xadd1a7d961cff0242089674ec2ef6fca671ab15e1fe80e38859fc815b98d88ab <- content 0xb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7" - - testExpNotice2 = `About to submit transaction (NatSpec notice error: abi key does not match any method): {"params":[{"to":"%s","data": "0x31e12c20"}]}` - - testExpNotice3 = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x1392c62d05b2d149e22a339c531157ae06b44d39a674cce500064b12b9aeb019'): {"params":[{"to":"%s","data": "0x300a3bbfb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000066696c653a2f2f2f746573742e636f6e74656e74"}]}` -) - -const ( - testUserDoc = ` -{ - "methods": { - "register(uint256,uint256)": { - "notice": "` + testNotice + `" - } - }, - "invariants": [ - { "notice": "" } - ], - "construction": [ - { "notice": "" } - ] -} -` - testAbiDefinition = ` -[{ - "name": "register", - "constant": false, - "type": "function", - "inputs": [{ - "name": "_key", - "type": "uint256" - }, { - "name": "_content", - "type": "uint256" - }], - "outputs": [] -}] -` - - testContractInfo = ` -{ - "userDoc": ` + testUserDoc + `, - "abiDefinition": ` + testAbiDefinition + ` -} -` -) - -type testFrontend struct { - t *testing.T - expanse *exp.Expanse - xeth *xe.XEth - coinbase common.Address - stateDb *state.StateDB - txc uint64 - lastConfirm string - wantNatSpec bool -} - -func (self *testFrontend) UnlockAccount(acc []byte) bool { - self.expanse.AccountManager().Unlock(common.BytesToAddress(acc), "password") - return true -} - -func (self *testFrontend) ConfirmTransaction(tx string) bool { - if self.wantNatSpec { - ds := docserver.New("/tmp/") - self.lastConfirm = GetNotice(self.xeth, tx, ds) - } - return true -} - -func testExp(t *testing.T) (expanse *exp.Expanse, err error) { - - os.RemoveAll("/tmp/exp-natspec/") - - err = os.MkdirAll("/tmp/exp-natspec/keystore", os.ModePerm) - if err != nil { - panic(err) - } - - // create a testAddress - ks := crypto.NewKeyStorePassphrase("/tmp/exp-natspec/keystore", crypto.LightScryptN, crypto.LightScryptP) - am := accounts.NewManager(ks) - testAccount, err := am.NewAccount("password") - if err != nil { - panic(err) - } - testAddress := strings.TrimPrefix(testAccount.Address.Hex(), "0x") - - // set up mock genesis with balance on the testAddress - core.GenesisAccounts = []byte(`{ - "` + testAddress + `": {"balance": "` + testBalance + `"} - }`) - - // only use minimalistic stack with no networking - expanse, err = exp.New(&exp.Config{ - DataDir: "/tmp/exp-natspec", - AccountManager: am, - MaxPeers: 0, - }) - - if err != nil { - panic(err) - } - - return -} - -func testInit(t *testing.T) (self *testFrontend) { - // initialise and start minimal expanse stack - expanse, err := testExp(t) - if err != nil { - t.Errorf("error creating expanse: %v", err) - return - } - err = expanse.Start() - if err != nil { - t.Errorf("error starting expanse: %v", err) - return - } - - // mock frontend - self = &testFrontend{t: t, expanse: expanse} - self.xeth = xe.New(expanse, self) - - addr, _ := expanse.Etherbase() - self.coinbase = addr - self.stateDb = self.expanse.ChainManager().State().Copy() - - // initialise the registry contracts - reg := registrar.New(self.xeth) - err = reg.SetHashReg("", addr) - if err != nil { - t.Errorf("error creating HashReg: %v", err) - } - err = reg.SetUrlHint("", addr) - if err != nil { - t.Errorf("error creating UrlHint: %v", err) - } - self.applyTxs() - - return - -} - -// this is needed for transaction to be applied to the state in testing -// the heavy lifing is done in XEth.ApplyTestTxs -// this is fragile, -// and does process leaking since xeth loops cannot quit safely -// should be replaced by proper mining with testDAG for easy full integration tests -func (self *testFrontend) applyTxs() { - self.txc, self.xeth = self.xexp.ApplyTestTxs(self.stateDb, self.coinbase, self.txc) - return -} - -// end to end test -func TestNatspecE2E(t *testing.T) { - t.Skip() - - tf := testInit(t) - defer tf.expanse.Stop() - - // create a contractInfo file (mock cloud-deployed contract metadocs) - // incidentally this is the info for the registry contract itself - ioutil.WriteFile("/tmp/"+testFileName, []byte(testContractInfo), os.ModePerm) - dochash := common.BytesToHash(crypto.Sha3([]byte(testContractInfo))) - - // take the codehash for the contract we wanna test - // codehex := tf.xexp.CodeAt(registar.HashRegAddr) - codeb := tf.xexp.CodeAtBytes(registrar.HashRegAddr) - codehash := common.BytesToHash(crypto.Sha3(codeb)) - - // use resolver to register codehash->dochash->url - // test if globalregistry works - // registrar.HashRefAddr = "0x0" - // registrar.UrlHintAddr = "0x0" - reg := registrar.New(tf.xeth) - _, err := reg.SetHashToHash(tf.coinbase, codehash, dochash) - if err != nil { - t.Errorf("error registering: %v", err) - } - _, err = reg.SetUrlToHash(tf.coinbase, dochash, "file:///"+testFileName) - if err != nil { - t.Errorf("error registering: %v", err) - } - // apply txs to the state - tf.applyTxs() - - // NatSpec info for register method of HashReg contract installed - // now using the same transactions to check confirm messages - - tf.wantNatSpec = true // this is set so now the backend uses natspec confirmation - _, err = reg.SetHashToHash(tf.coinbase, codehash, dochash) - if err != nil { - t.Errorf("error calling contract registry: %v", err) - } - - fmt.Printf("GlobalRegistrar: %v, HashReg: %v, UrlHint: %v\n", registrar.GlobalRegistrarAddr, registrar.HashRegAddr, registrar.UrlHintAddr) - if tf.lastConfirm != testExpNotice { - t.Errorf("Wrong confirm message. expected '%v', got '%v'", testExpNotice, tf.lastConfirm) - } - - // test unknown method - exp := fmt.Sprintf(testExpNotice2, registrar.HashRegAddr) - _, err = reg.SetOwner(tf.coinbase) - if err != nil { - t.Errorf("error setting owner: %v", err) - } - - if tf.lastConfirm != exp { - t.Errorf("Wrong confirm message, expected '%v', got '%v'", exp, tf.lastConfirm) - } - - // test unknown contract - exp = fmt.Sprintf(testExpNotice3, registrar.UrlHintAddr) - - _, err = reg.SetUrlToHash(tf.coinbase, dochash, "file:///test.content") - if err != nil { - t.Errorf("error registering: %v", err) - } - - if tf.lastConfirm != exp { - t.Errorf("Wrong confirm message, expected '%v', got '%v'", exp, tf.lastConfirm) - } - -} diff --git a/common/registrar/ethreg/api.go b/common/registrar/ethreg/api.go deleted file mode 100644 index 3c50f4fe335b6..0000000000000 --- a/common/registrar/ethreg/api.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethreg - -import ( - "errors" - "math/big" - - "github.com/expanse-org/go-expanse/accounts" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/common/compiler" - "github.com/expanse-org/go-expanse/common/registrar" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -// registryAPIBackend is a backend for an Expanse Registry. -type registryAPIBackend struct { - config *core.ChainConfig - bc *core.BlockChain - chainDb ethdb.Database - txPool *core.TxPool - am *accounts.Manager -} - -// PrivateRegistarAPI offers various functions to access the Expanse registry. -type PrivateRegistarAPI struct { - config *core.ChainConfig - be *registryAPIBackend -} - -// NewPrivateRegistarAPI creates a new PrivateRegistarAPI instance. -func NewPrivateRegistarAPI(config *core.ChainConfig, bc *core.BlockChain, chainDb ethdb.Database, txPool *core.TxPool, am *accounts.Manager) *PrivateRegistarAPI { - return &PrivateRegistarAPI{ - config: config, - be: ®istryAPIBackend{ - config: config, - bc: bc, - chainDb: chainDb, - txPool: txPool, - am: am, - }, - } -} - -// SetGlobalRegistrar allows clients to set the global registry for the node. -// This method can be used to deploy a new registry. First zero out the current -// address by calling the method with namereg = '0x0' and then call this method -// again with '' as namereg. This will submit a transaction to the network which -// will deploy a new registry on execution. The TX hash is returned. When called -// with namereg '' and the current address is not zero the current global is -// address is returned.. -func (api *PrivateRegistarAPI) SetGlobalRegistrar(namereg string, from common.Address) (string, error) { - return registrar.New(api.be).SetGlobalRegistrar(namereg, from) -} - -// SetHashReg queries the registry for a hash. -func (api *PrivateRegistarAPI) SetHashReg(hashreg string, from common.Address) (string, error) { - return registrar.New(api.be).SetHashReg(hashreg, from) -} - -// SetUrlHint queries the registry for an url. -func (api *PrivateRegistarAPI) SetUrlHint(hashreg string, from common.Address) (string, error) { - return registrar.New(api.be).SetUrlHint(hashreg, from) -} - -// SaveInfo stores contract information on the local file system. -func (api *PrivateRegistarAPI) SaveInfo(info *compiler.ContractInfo, filename string) (contenthash common.Hash, err error) { - return compiler.SaveInfo(info, filename) -} - -// Register registers a new content hash in the registry. -func (api *PrivateRegistarAPI) Register(sender common.Address, addr common.Address, contentHashHex string) (bool, error) { - block := api.be.bc.CurrentBlock() - state, err := state.New(block.Root(), api.be.chainDb) - if err != nil { - return false, err - } - - codeb := state.GetCode(addr) - codeHash := common.BytesToHash(crypto.Keccak256(codeb)) - contentHash := common.HexToHash(contentHashHex) - - _, err = registrar.New(api.be).SetHashToHash(sender, codeHash, contentHash) - return err == nil, err -} - -// RegisterUrl registers a new url in the registry. -func (api *PrivateRegistarAPI) RegisterUrl(sender common.Address, contentHashHex string, url string) (bool, error) { - _, err := registrar.New(api.be).SetUrlToHash(sender, common.HexToHash(contentHashHex), url) - return err == nil, err -} - -// callmsg is the message type used for call transations. -type callmsg struct { - from *state.StateObject - to *common.Address - gas, gasPrice *big.Int - value *big.Int - data []byte -} - -// accessor boilerplate to implement core.Message -func (m callmsg) From() (common.Address, error) { - return m.from.Address(), nil -} -func (m callmsg) FromFrontier() (common.Address, error) { - return m.from.Address(), nil -} -func (m callmsg) Nonce() uint64 { - return m.from.Nonce() -} -func (m callmsg) To() *common.Address { - return m.to -} -func (m callmsg) GasPrice() *big.Int { - return m.gasPrice -} -func (m callmsg) Gas() *big.Int { - return m.gas -} -func (m callmsg) Value() *big.Int { - return m.value -} -func (m callmsg) Data() []byte { - return m.data -} - -// Call forms a transaction from the given arguments and tries to execute it on -// a private VM with a copy of the state. Any changes are therefore only temporary -// and not part of the actual state. This allows for local execution/queries. -func (be *registryAPIBackend) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, dataStr string) (string, string, error) { - block := be.bc.CurrentBlock() - statedb, err := state.New(block.Root(), be.chainDb) - if err != nil { - return "", "", err - } - - var from *state.StateObject - if len(fromStr) == 0 { - accounts := be.am.Accounts() - if len(accounts) == 0 { - from = statedb.GetOrNewStateObject(common.Address{}) - } else { - from = statedb.GetOrNewStateObject(accounts[0].Address) - } - } else { - from = statedb.GetOrNewStateObject(common.HexToAddress(fromStr)) - } - - from.SetBalance(common.MaxBig) - - msg := callmsg{ - from: from, - gas: common.Big(gasStr), - gasPrice: common.Big(gasPriceStr), - value: common.Big(valueStr), - data: common.FromHex(dataStr), - } - if len(toStr) > 0 { - addr := common.HexToAddress(toStr) - msg.to = &addr - } - - if msg.gas.Cmp(big.NewInt(0)) == 0 { - msg.gas = big.NewInt(50000000) - } - - if msg.gasPrice.Cmp(big.NewInt(0)) == 0 { - msg.gasPrice = new(big.Int).Mul(big.NewInt(50), common.Shannon) - } - - header := be.bc.CurrentBlock().Header() - vmenv := core.NewEnv(statedb, be.config, be.bc, msg, header, vm.Config{}) - gp := new(core.GasPool).AddGas(common.MaxBig) - res, gas, err := core.ApplyMessage(vmenv, msg, gp) - - return common.ToHex(res), gas.String(), err -} - -// StorageAt returns the data stores in the state for the given address and location. -func (be *registryAPIBackend) StorageAt(addr string, storageAddr string) string { - block := be.bc.CurrentBlock() - state, err := state.New(block.Root(), be.chainDb) - if err != nil { - return "" - } - return state.GetState(common.HexToAddress(addr), common.HexToHash(storageAddr)).Hex() -} - -// Transact forms a transaction from the given arguments and submits it to the -// transactio pool for execution. -func (be *registryAPIBackend) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceStr, codeStr string) (string, error) { - if len(toStr) > 0 && toStr != "0x" && !common.IsHexAddress(toStr) { - return "", errors.New("invalid address") - } - - var ( - from = common.HexToAddress(fromStr) - to = common.HexToAddress(toStr) - value = common.Big(valueStr) - gas *big.Int - price *big.Int - data []byte - contractCreation bool - ) - - if len(gasStr) == 0 { - gas = big.NewInt(90000) - } else { - gas = common.Big(gasStr) - } - - if len(gasPriceStr) == 0 { - price = big.NewInt(10000000000000) - } else { - price = common.Big(gasPriceStr) - } - - data = common.FromHex(codeStr) - if len(toStr) == 0 { - contractCreation = true - } - - nonce := be.txPool.State().GetNonce(from) - if len(nonceStr) != 0 { - nonce = common.Big(nonceStr).Uint64() - } - - var tx *types.Transaction - if contractCreation { - tx = types.NewContractCreation(nonce, value, gas, price, data) - } else { - tx = types.NewTransaction(nonce, to, value, gas, price, data) - } - - signature, err := be.am.Sign(from, tx.SigHash().Bytes()) - if err != nil { - return "", err - } - signedTx, err := tx.WithSignature(signature) - if err != nil { - return "", err - } - - be.txPool.SetLocal(signedTx) - if err := be.txPool.Add(signedTx); err != nil { - return "", nil - } - - if contractCreation { - addr := crypto.CreateAddress(from, nonce) - glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex()) - } else { - glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex()) - } - - return signedTx.Hash().Hex(), nil -} diff --git a/common/registrar/registrar.go b/common/registrar/registrar.go deleted file mode 100644 index 90582492d77d5..0000000000000 --- a/common/registrar/registrar.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package registrar - -import ( - "encoding/binary" - "fmt" - "math/big" - "regexp" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -/* -Registrar implements the Expanse name registrar services mapping -- arbitrary strings to expanse addresses -- hashes to hashes -- hashes to arbitrary strings -(likely will provide lookup service for all three) - -The Registrar is used by -* the roundtripper transport implementation of -url schemes to resolve domain names and services that register these names -* contract info retrieval (NatSpec). - -The Registrar uses 3 contracts on the blockchain: -* GlobalRegistrar: Name (string) -> Address (Owner) -* HashReg : Key Hash (hash of domain name or contract code) -> Content Hash -* UrlHint : Content Hash -> Url Hint - -These contracts are (currently) not included in the genesis block. -Each Set needs to be called once on each blockchain/network once. - -Contract addresses need to be set the first time any Registrar method is called -in a client session. -This is done for frontier by default, otherwise the caller needs to make sure -the relevant environment initialised the desired contracts -*/ -var ( - //these arnt the right ones for gexp - GlobalRegistrarAddr = "0x6c221ca53705f3497ec90ca7b84c59ae7382fc21" // frontier - HashRegAddr = "0x10774b55f37302de0075fdc131f63d303ae5dd9e" // frontier - UrlHintAddr = "0x8dad0847dedb1253b6dfe616c6d2841da291dc1f" // frontier - - zero = regexp.MustCompile("^(0x)?0*$") -) - -const ( - trueHex = "0000000000000000000000000000000000000000000000000000000000000001" - falseHex = "0000000000000000000000000000000000000000000000000000000000000000" -) - -func abiSignature(s string) string { - return common.ToHex(crypto.Keccak256([]byte(s))[:4]) -} - -var ( - HashRegName = "HashReg" - UrlHintName = "UrlHint" - - registerContentHashAbi = abiSignature("register(uint256,uint256)") - registerUrlAbi = abiSignature("register(uint256,uint8,uint256)") - setOwnerAbi = abiSignature("setowner()") - reserveAbi = abiSignature("reserve(bytes32)") - resolveAbi = abiSignature("addr(bytes32)") - registerAbi = abiSignature("setAddress(bytes32,address,bool)") - addressAbiPrefix = falseHex[:24] -) - -// Registrar's backend is defined as an interface (implemented by xeth, but could be remote) -type Backend interface { - StorageAt(string, string) string - Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceStr, codeStr string) (string, error) - Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, codeStr string) (string, string, error) -} - -// TODO Registrar should also just implement The Resolver and Registry interfaces -// Simplify for now. -type VersionedRegistrar interface { - Resolver(*big.Int) *Registrar - Registry() *Registrar -} - -type Registrar struct { - backend Backend -} - -func New(b Backend) (res *Registrar) { - res = &Registrar{b} - return -} - -func (self *Registrar) SetGlobalRegistrar(namereg string, addr common.Address) (txhash string, err error) { - if namereg != "" { - GlobalRegistrarAddr = namereg - return - } - if zero.MatchString(GlobalRegistrarAddr) { - if (addr == common.Address{}) { - err = fmt.Errorf("GlobalRegistrar address not found and sender for creation not given") - return - } else { - txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "800000", "", GlobalRegistrarCode) - if err != nil { - err = fmt.Errorf("GlobalRegistrar address not found and sender for creation failed: %v", err) - return - } - } - } - return -} - -func (self *Registrar) SetHashReg(hashreg string, addr common.Address) (txhash string, err error) { - if hashreg != "" { - HashRegAddr = hashreg - } else { - if !zero.MatchString(HashRegAddr) { - return - } - nameHex, extra := encodeName(HashRegName, 2) - hashRegAbi := resolveAbi + nameHex + extra - glog.V(logger.Detail).Infof("\ncall HashRegAddr %v with %v\n", GlobalRegistrarAddr, hashRegAbi) - var res string - res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", hashRegAbi) - if len(res) >= 40 { - HashRegAddr = "0x" + res[len(res)-40:len(res)] - } - if err != nil || zero.MatchString(HashRegAddr) { - if (addr == common.Address{}) { - err = fmt.Errorf("HashReg address not found and sender for creation not given") - return - } - - txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "", "", HashRegCode) - if err != nil { - err = fmt.Errorf("HashReg address not found and sender for creation failed: %v", err) - } - glog.V(logger.Detail).Infof("created HashRegAddr @ txhash %v\n", txhash) - } else { - glog.V(logger.Detail).Infof("HashRegAddr found at @ %v\n", HashRegAddr) - return - } - } - - return -} - -func (self *Registrar) SetUrlHint(urlhint string, addr common.Address) (txhash string, err error) { - if urlhint != "" { - UrlHintAddr = urlhint - } else { - if !zero.MatchString(UrlHintAddr) { - return - } - nameHex, extra := encodeName(UrlHintName, 2) - urlHintAbi := resolveAbi + nameHex + extra - glog.V(logger.Detail).Infof("UrlHint address query data: %s to %s", urlHintAbi, GlobalRegistrarAddr) - var res string - res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", urlHintAbi) - if len(res) >= 40 { - UrlHintAddr = "0x" + res[len(res)-40:len(res)] - } - if err != nil || zero.MatchString(UrlHintAddr) { - if (addr == common.Address{}) { - err = fmt.Errorf("UrlHint address not found and sender for creation not given") - return - } - txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "210000", "", UrlHintCode) - if err != nil { - err = fmt.Errorf("UrlHint address not found and sender for creation failed: %v", err) - } - glog.V(logger.Detail).Infof("created UrlHint @ txhash %v\n", txhash) - } else { - glog.V(logger.Detail).Infof("UrlHint found @ %v\n", HashRegAddr) - return - } - } - - return -} - -// ReserveName(from, name) reserves name for the sender address in the globalRegistrar -// the tx needs to be mined to take effect -func (self *Registrar) ReserveName(address common.Address, name string) (txh string, err error) { - if zero.MatchString(GlobalRegistrarAddr) { - return "", fmt.Errorf("GlobalRegistrar address is not set") - } - nameHex, extra := encodeName(name, 2) - abi := reserveAbi + nameHex + extra - glog.V(logger.Detail).Infof("Reserve data: %s", abi) - return self.backend.Transact( - address.Hex(), - GlobalRegistrarAddr, - "", "", "", "", - abi, - ) -} - -// SetAddressToName(from, name, addr) will set the Address to address for name -// in the globalRegistrar using from as the sender of the transaction -// the tx needs to be mined to take effect -func (self *Registrar) SetAddressToName(from common.Address, name string, address common.Address) (txh string, err error) { - if zero.MatchString(GlobalRegistrarAddr) { - return "", fmt.Errorf("GlobalRegistrar address is not set") - } - - nameHex, extra := encodeName(name, 6) - addrHex := encodeAddress(address) - - abi := registerAbi + nameHex + addrHex + trueHex + extra - glog.V(logger.Detail).Infof("SetAddressToName data: %s to %s ", abi, GlobalRegistrarAddr) - - return self.backend.Transact( - from.Hex(), - GlobalRegistrarAddr, - "", "", "", "", - abi, - ) -} - -// NameToAddr(from, name) queries the registrar for the address on name -func (self *Registrar) NameToAddr(from common.Address, name string) (address common.Address, err error) { - if zero.MatchString(GlobalRegistrarAddr) { - return address, fmt.Errorf("GlobalRegistrar address is not set") - } - - nameHex, extra := encodeName(name, 2) - abi := resolveAbi + nameHex + extra - glog.V(logger.Detail).Infof("NameToAddr data: %s", abi) - res, _, err := self.backend.Call( - from.Hex(), - GlobalRegistrarAddr, - "", "", "", - abi, - ) - if err != nil { - return - } - address = common.HexToAddress(res) - return -} - -// called as first step in the registration process on HashReg -func (self *Registrar) SetOwner(address common.Address) (txh string, err error) { - if zero.MatchString(HashRegAddr) { - return "", fmt.Errorf("HashReg address is not set") - } - return self.backend.Transact( - address.Hex(), - HashRegAddr, - "", "", "", "", - setOwnerAbi, - ) -} - -// registers some content hash to a key/code hash -// e.g., the contract Info combined Json Doc's ContentHash -// to CodeHash of a contract or hash of a domain -func (self *Registrar) SetHashToHash(address common.Address, codehash, dochash common.Hash) (txh string, err error) { - if zero.MatchString(HashRegAddr) { - return "", fmt.Errorf("HashReg address is not set") - } - - _, err = self.SetOwner(address) - if err != nil { - return - } - codehex := common.Bytes2Hex(codehash[:]) - dochex := common.Bytes2Hex(dochash[:]) - - data := registerContentHashAbi + codehex + dochex - glog.V(logger.Detail).Infof("SetHashToHash data: %s sent to %v\n", data, HashRegAddr) - return self.backend.Transact( - address.Hex(), - HashRegAddr, - "", "", "", "", - data, - ) -} - -// SetUrlToHash(from, hash, url) registers a url to a content hash so that the content can be fetched -// address is used as sender for the transaction and will be the owner of a new -// registry entry on first time use -// FIXME: silently doing nothing if sender is not the owner -// note that with content addressed storage, this step is no longer necessary -func (self *Registrar) SetUrlToHash(address common.Address, hash common.Hash, url string) (txh string, err error) { - if zero.MatchString(UrlHintAddr) { - return "", fmt.Errorf("UrlHint address is not set") - } - - hashHex := common.Bytes2Hex(hash[:]) - var urlHex string - urlb := []byte(url) - var cnt byte - n := len(urlb) - - for n > 0 { - if n > 32 { - n = 32 - } - urlHex = common.Bytes2Hex(urlb[:n]) - urlb = urlb[n:] - n = len(urlb) - bcnt := make([]byte, 32) - bcnt[31] = cnt - data := registerUrlAbi + - hashHex + - common.Bytes2Hex(bcnt) + - common.Bytes2Hex(common.Hex2BytesFixed(urlHex, 32)) - txh, err = self.backend.Transact( - address.Hex(), - UrlHintAddr, - "", "", "", "", - data, - ) - if err != nil { - return - } - cnt++ - } - return -} - -// HashToHash(key) resolves contenthash for key (a hash) using HashReg -// resolution is costless non-transactional -// implemented as direct retrieval from db -func (self *Registrar) HashToHash(khash common.Hash) (chash common.Hash, err error) { - if zero.MatchString(HashRegAddr) { - return common.Hash{}, fmt.Errorf("HashReg address is not set") - } - - // look up in hashReg - at := HashRegAddr[2:] - key := storageAddress(storageMapping(storageIdx2Addr(1), khash[:])) - hash := self.backend.StorageAt(at, key) - - if hash == "0x0" || len(hash) < 3 || (hash == common.Hash{}.Hex()) { - err = fmt.Errorf("HashToHash: content hash not found for '%v'", khash.Hex()) - return - } - copy(chash[:], common.Hex2BytesFixed(hash[2:], 32)) - return -} - -// HashToUrl(contenthash) resolves the url for contenthash using UrlHint -// resolution is costless non-transactional -// implemented as direct retrieval from db -// if we use content addressed storage, this step is no longer necessary -func (self *Registrar) HashToUrl(chash common.Hash) (uri string, err error) { - if zero.MatchString(UrlHintAddr) { - return "", fmt.Errorf("UrlHint address is not set") - } - // look up in URL reg - var str string = " " - var idx uint32 - for len(str) > 0 { - mapaddr := storageMapping(storageIdx2Addr(1), chash[:]) - key := storageAddress(storageFixedArray(mapaddr, storageIdx2Addr(idx))) - hex := self.backend.StorageAt(UrlHintAddr[2:], key) - str = string(common.Hex2Bytes(hex[2:])) - l := 0 - for (l < len(str)) && (str[l] == 0) { - l++ - } - - str = str[l:] - uri = uri + str - idx++ - } - - if len(uri) == 0 { - err = fmt.Errorf("HashToUrl: URL hint not found for '%v'", chash.Hex()) - } - return -} - -func storageIdx2Addr(varidx uint32) []byte { - data := make([]byte, 32) - binary.BigEndian.PutUint32(data[28:32], varidx) - return data -} - -func storageMapping(addr, key []byte) []byte { - data := make([]byte, 64) - copy(data[0:32], key[0:32]) - copy(data[32:64], addr[0:32]) - sha := crypto.Keccak256(data) - return sha -} - -func storageFixedArray(addr, idx []byte) []byte { - var carry byte - for i := 31; i >= 0; i-- { - var b byte = addr[i] + idx[i] + carry - if b < addr[i] { - carry = 1 - } else { - carry = 0 - } - addr[i] = b - } - return addr -} - -func storageAddress(addr []byte) string { - return common.ToHex(addr) -} - -func encodeAddress(address common.Address) string { - return addressAbiPrefix + address.Hex()[2:] -} - -func encodeName(name string, index uint8) (string, string) { - extra := common.Bytes2Hex([]byte(name)) - if len(name) > 32 { - return fmt.Sprintf("%064x", index), extra - } - return extra + falseHex[len(extra):], "" -} diff --git a/common/registrar/registrar_test.go b/common/registrar/registrar_test.go deleted file mode 100644 index 705921b9138f9..0000000000000 --- a/common/registrar/registrar_test.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package registrar - -import ( - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" -) - -type testBackend struct { - // contracts mock - contracts map[string](map[string]string) -} - -var ( - text = "test" - codehash = common.StringToHash("1234") - hash = common.BytesToHash(crypto.Keccak256([]byte(text))) - url = "bzz://bzzhash/my/path/contr.act" -) - -func NewTestBackend() *testBackend { - self := &testBackend{} - self.contracts = make(map[string](map[string]string)) - return self -} - -func (self *testBackend) initHashReg() { - self.contracts[HashRegAddr[2:]] = make(map[string]string) - key := storageAddress(storageMapping(storageIdx2Addr(1), codehash[:])) - self.contracts[HashRegAddr[2:]][key] = hash.Hex() -} - -func (self *testBackend) initUrlHint() { - self.contracts[UrlHintAddr[2:]] = make(map[string]string) - mapaddr := storageMapping(storageIdx2Addr(1), hash[:]) - - key := storageAddress(storageFixedArray(mapaddr, storageIdx2Addr(0))) - self.contracts[UrlHintAddr[2:]][key] = common.ToHex([]byte(url)) - key = storageAddress(storageFixedArray(mapaddr, storageIdx2Addr(1))) - self.contracts[UrlHintAddr[2:]][key] = "0x0" -} - -func (self *testBackend) StorageAt(ca, sa string) (res string) { - c := self.contracts[ca] - if c == nil { - return "0x0" - } - res = c[sa] - return -} - -func (self *testBackend) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceStr, codeStr string) (string, error) { - return "", nil -} - -func (self *testBackend) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, codeStr string) (string, string, error) { - return "", "", nil -} - -func TestSetGlobalRegistrar(t *testing.T) { - b := NewTestBackend() - res := New(b) - _, err := res.SetGlobalRegistrar("addresshex", common.BigToAddress(common.Big1)) - if err != nil { - t.Errorf("unexpected error: %v'", err) - } -} - -func TestHashToHash(t *testing.T) { - b := NewTestBackend() - res := New(b) - - HashRegAddr = "0x0" - got, err := res.HashToHash(codehash) - if err == nil { - t.Errorf("expected error") - } else { - exp := "HashReg address is not set" - if err.Error() != exp { - t.Errorf("incorrect error, expected '%v', got '%v'", exp, err.Error()) - } - } - - HashRegAddr = common.BigToAddress(common.Big1).Hex() //[2:] - got, err = res.HashToHash(codehash) - if err == nil { - t.Errorf("expected error") - } else { - exp := "HashToHash: content hash not found for '" + codehash.Hex() + "'" - if err.Error() != exp { - t.Errorf("incorrect error, expected '%v', got '%v'", exp, err.Error()) - } - } - - b.initHashReg() - got, err = res.HashToHash(codehash) - if err != nil { - t.Errorf("expected no error, got %v", err) - } else { - if got != hash { - t.Errorf("incorrect result, expected '%v', got '%v'", hash.Hex(), got.Hex()) - } - } -} - -func TestHashToUrl(t *testing.T) { - b := NewTestBackend() - res := New(b) - - UrlHintAddr = "0x0" - got, err := res.HashToUrl(hash) - if err == nil { - t.Errorf("expected error") - } else { - exp := "UrlHint address is not set" - if err.Error() != exp { - t.Errorf("incorrect error, expected '%v', got '%v'", exp, err.Error()) - } - } - - UrlHintAddr = common.BigToAddress(common.Big2).Hex() //[2:] - got, err = res.HashToUrl(hash) - if err == nil { - t.Errorf("expected error") - } else { - exp := "HashToUrl: URL hint not found for '" + hash.Hex() + "'" - if err.Error() != exp { - t.Errorf("incorrect error, expected '%v', got '%v'", exp, err.Error()) - } - } - - b.initUrlHint() - got, err = res.HashToUrl(hash) - if err != nil { - t.Errorf("expected no error, got %v", err) - } else { - if got != url { - t.Errorf("incorrect result, expected '%v', got '%s'", url, got) - } - } -} diff --git a/common/rlp_test.go b/common/rlp_test.go deleted file mode 100644 index 45b5e5cff6d42..0000000000000 --- a/common/rlp_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package common - -import ( - "bytes" - "math/big" - "reflect" - "testing" - - "github.com/expanse-org/go-expanse/rlp" -) - -func TestNonInterfaceSlice(t *testing.T) { - vala := []string{"value1", "value2", "value3"} - valb := []interface{}{"value1", "value2", "value3"} - resa := Encode(vala) - resb := Encode(valb) - if !bytes.Equal(resa, resb) { - t.Errorf("expected []string & []interface{} to be equal") - } -} - -func TestRlpValueEncoding(t *testing.T) { - val := EmptyValue() - val.AppendList().Append(byte(1)).Append(byte(2)).Append(byte(3)) - val.Append("4").AppendList().Append(byte(5)) - - res, err := rlp.EncodeToBytes(val) - if err != nil { - t.Fatalf("encode error: %v", err) - } - exp := Encode([]interface{}{[]interface{}{1, 2, 3}, "4", []interface{}{5}}) - if bytes.Compare(res, exp) != 0 { - t.Errorf("expected %x, got %x", exp, res) - } -} - -func TestValueSlice(t *testing.T) { - val := []interface{}{ - "value1", - "valeu2", - "value3", - } - - value := NewValue(val) - splitVal := value.SliceFrom(1) - - if splitVal.Len() != 2 { - t.Error("SliceFrom: Expected len", 2, "got", splitVal.Len()) - } - - splitVal = value.SliceTo(2) - if splitVal.Len() != 2 { - t.Error("SliceTo: Expected len", 2, "got", splitVal.Len()) - } - - splitVal = value.SliceFromTo(1, 3) - if splitVal.Len() != 2 { - t.Error("SliceFromTo: Expected len", 2, "got", splitVal.Len()) - } -} - -func TestLargeData(t *testing.T) { - data := make([]byte, 100000) - enc := Encode(data) - value := NewValueFromBytes(enc) - if value.Len() != len(data) { - t.Error("Expected data to be", len(data), "got", value.Len()) - } -} - -func TestValue(t *testing.T) { - value := NewValueFromBytes([]byte("\xcd\x83dog\x83god\x83cat\x01")) - if value.Get(0).Str() != "dog" { - t.Errorf("expected '%v', got '%v'", value.Get(0).Str(), "dog") - } - - if value.Get(3).Uint() != 1 { - t.Errorf("expected '%v', got '%v'", value.Get(3).Uint(), 1) - } -} - -func TestEncode(t *testing.T) { - strRes := "\x83dog" - bytes := Encode("dog") - - str := string(bytes) - if str != strRes { - t.Errorf("Expected %q, got %q", strRes, str) - } - - sliceRes := "\xcc\x83dog\x83god\x83cat" - strs := []interface{}{"dog", "god", "cat"} - bytes = Encode(strs) - slice := string(bytes) - if slice != sliceRes { - t.Error("Expected %q, got %q", sliceRes, slice) - } - - intRes := "\x82\x04\x00" - bytes = Encode(1024) - if string(bytes) != intRes { - t.Errorf("Expected %q, got %q", intRes, bytes) - } -} - -func TestDecode(t *testing.T) { - single := []byte("\x01") - b, _ := Decode(single, 0) - - if b.(uint8) != 1 { - t.Errorf("Expected 1, got %q", b) - } - - str := []byte("\x83dog") - b, _ = Decode(str, 0) - if bytes.Compare(b.([]byte), []byte("dog")) != 0 { - t.Errorf("Expected dog, got %q", b) - } - - slice := []byte("\xcc\x83dog\x83god\x83cat") - res := []interface{}{"dog", "god", "cat"} - b, _ = Decode(slice, 0) - if reflect.DeepEqual(b, res) { - t.Errorf("Expected %q, got %q", res, b) - } -} - -func TestEncodeDecodeBigInt(t *testing.T) { - bigInt := big.NewInt(1391787038) - encoded := Encode(bigInt) - - value := NewValueFromBytes(encoded) - if value.BigInt().Cmp(bigInt) != 0 { - t.Errorf("Expected %v, got %v", bigInt, value.BigInt()) - } -} - -func TestEncodeDecodeBytes(t *testing.T) { - bv := NewValue([]interface{}{[]byte{1, 2, 3, 4, 5}, []byte{6}}) - b, _ := rlp.EncodeToBytes(bv) - val := NewValueFromBytes(b) - if !bv.Cmp(val) { - t.Errorf("Expected %#v, got %#v", bv, val) - } -} - -func TestEncodeZero(t *testing.T) { - b, _ := rlp.EncodeToBytes(NewValue(0)) - exp := []byte{0xc0} - if bytes.Compare(b, exp) == 0 { - t.Error("Expected", exp, "got", b) - } -} - -func BenchmarkEncodeDecode(b *testing.B) { - for i := 0; i < b.N; i++ { - bytes := Encode([]interface{}{"dog", "god", "cat"}) - Decode(bytes, 0) - } -} diff --git a/common/value.go b/common/value.go deleted file mode 100644 index aacbb3f00904c..0000000000000 --- a/common/value.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package common - -import ( - "bytes" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - - "github.com/expanse-org/go-expanse/rlp" -) - -// Value can hold values of certain basic types and provides ways to -// convert between types without bothering to check whether the -// conversion is actually meaningful. -// -// It currently supports the following types: -// -// - int{,8,16,32,64} -// - uint{,8,16,32,64} -// - *big.Int -// - []byte, string -// - []interface{} -// -// Value is useful whenever you feel that Go's types limit your -// ability to express yourself. In these situations, use Value and -// forget about this strong typing nonsense. -type Value struct{ Val interface{} } - -func (val *Value) String() string { - return fmt.Sprintf("%x", val.Val) -} - -func NewValue(val interface{}) *Value { - t := val - if v, ok := val.(*Value); ok { - t = v.Val - } - - return &Value{Val: t} -} - -func (val *Value) Type() reflect.Kind { - return reflect.TypeOf(val.Val).Kind() -} - -func (val *Value) IsNil() bool { - return val.Val == nil -} - -func (val *Value) Len() int { - if data, ok := val.Val.([]interface{}); ok { - return len(data) - } - - return len(val.Bytes()) -} - -func (val *Value) Uint() uint64 { - if Val, ok := val.Val.(uint8); ok { - return uint64(Val) - } else if Val, ok := val.Val.(uint16); ok { - return uint64(Val) - } else if Val, ok := val.Val.(uint32); ok { - return uint64(Val) - } else if Val, ok := val.Val.(uint64); ok { - return Val - } else if Val, ok := val.Val.(float32); ok { - return uint64(Val) - } else if Val, ok := val.Val.(float64); ok { - return uint64(Val) - } else if Val, ok := val.Val.(int); ok { - return uint64(Val) - } else if Val, ok := val.Val.(uint); ok { - return uint64(Val) - } else if Val, ok := val.Val.([]byte); ok { - return new(big.Int).SetBytes(Val).Uint64() - } else if Val, ok := val.Val.(*big.Int); ok { - return Val.Uint64() - } - - return 0 -} - -func (val *Value) Int() int64 { - if Val, ok := val.Val.(int8); ok { - return int64(Val) - } else if Val, ok := val.Val.(int16); ok { - return int64(Val) - } else if Val, ok := val.Val.(int32); ok { - return int64(Val) - } else if Val, ok := val.Val.(int64); ok { - return Val - } else if Val, ok := val.Val.(int); ok { - return int64(Val) - } else if Val, ok := val.Val.(float32); ok { - return int64(Val) - } else if Val, ok := val.Val.(float64); ok { - return int64(Val) - } else if Val, ok := val.Val.([]byte); ok { - return new(big.Int).SetBytes(Val).Int64() - } else if Val, ok := val.Val.(*big.Int); ok { - return Val.Int64() - } else if Val, ok := val.Val.(string); ok { - n, _ := strconv.Atoi(Val) - return int64(n) - } - - return 0 -} - -func (val *Value) Byte() byte { - if Val, ok := val.Val.(byte); ok { - return Val - } - - return 0x0 -} - -func (val *Value) BigInt() *big.Int { - if a, ok := val.Val.([]byte); ok { - b := new(big.Int).SetBytes(a) - - return b - } else if a, ok := val.Val.(*big.Int); ok { - return a - } else if a, ok := val.Val.(string); ok { - return Big(a) - } else { - return big.NewInt(int64(val.Uint())) - } - - return big.NewInt(0) -} - -func (val *Value) Str() string { - if a, ok := val.Val.([]byte); ok { - return string(a) - } else if a, ok := val.Val.(string); ok { - return a - } else if a, ok := val.Val.(byte); ok { - return string(a) - } - - return "" -} - -func (val *Value) Bytes() []byte { - if a, ok := val.Val.([]byte); ok { - return a - } else if s, ok := val.Val.(byte); ok { - return []byte{s} - } else if s, ok := val.Val.(string); ok { - return []byte(s) - } else if s, ok := val.Val.(*big.Int); ok { - return s.Bytes() - } else { - return big.NewInt(val.Int()).Bytes() - } - - return []byte{} -} - -func (val *Value) Err() error { - if err, ok := val.Val.(error); ok { - return err - } - - return nil -} - -func (val *Value) Slice() []interface{} { - if d, ok := val.Val.([]interface{}); ok { - return d - } - - return []interface{}{} -} - -func (val *Value) SliceFrom(from int) *Value { - slice := val.Slice() - - return NewValue(slice[from:]) -} - -func (val *Value) SliceTo(to int) *Value { - slice := val.Slice() - - return NewValue(slice[:to]) -} - -func (val *Value) SliceFromTo(from, to int) *Value { - slice := val.Slice() - - return NewValue(slice[from:to]) -} - -// TODO More type checking methods -func (val *Value) IsSlice() bool { - return val.Type() == reflect.Slice -} - -func (val *Value) IsStr() bool { - return val.Type() == reflect.String -} - -func (self *Value) IsErr() bool { - _, ok := self.Val.(error) - return ok -} - -// Special list checking function. Something is considered -// a list if it's of type []interface{}. The list is usually -// used in conjunction with rlp decoded streams. -func (val *Value) IsList() bool { - _, ok := val.Val.([]interface{}) - - return ok -} - -func (val *Value) IsEmpty() bool { - return val.Val == nil || ((val.IsSlice() || val.IsStr()) && val.Len() == 0) -} - -// Threat the value as a slice -func (val *Value) Get(idx int) *Value { - if d, ok := val.Val.([]interface{}); ok { - // Guard for oob - if len(d) <= idx { - return NewValue(nil) - } - - if idx < 0 { - return NewValue(nil) - } - - return NewValue(d[idx]) - } - - // If this wasn't a slice you probably shouldn't be using this function - return NewValue(nil) -} - -func (self *Value) Copy() *Value { - switch val := self.Val.(type) { - case *big.Int: - return NewValue(new(big.Int).Set(val)) - case []byte: - return NewValue(CopyBytes(val)) - default: - return NewValue(self.Val) - } - - return nil -} - -func (val *Value) Cmp(o *Value) bool { - return reflect.DeepEqual(val.Val, o.Val) -} - -func (self *Value) DeepCmp(o *Value) bool { - return bytes.Compare(self.Bytes(), o.Bytes()) == 0 -} - -func (self *Value) DecodeRLP(s *rlp.Stream) error { - var v interface{} - if err := s.Decode(&v); err != nil { - return err - } - self.Val = v - return nil -} - -func (self *Value) EncodeRLP(w io.Writer) error { - if self == nil { - w.Write(rlp.EmptyList) - return nil - } else { - return rlp.Encode(w, self.Val) - } -} - -// NewValueFromBytes decodes RLP data. -// The contained value will be nil if data contains invalid RLP. -func NewValueFromBytes(data []byte) *Value { - v := new(Value) - if len(data) != 0 { - if err := rlp.DecodeBytes(data, v); err != nil { - v.Val = nil - } - } - return v -} - -// Value setters -func NewSliceValue(s interface{}) *Value { - list := EmptyValue() - - if s != nil { - if slice, ok := s.([]interface{}); ok { - for _, val := range slice { - list.Append(val) - } - } else if slice, ok := s.([]string); ok { - for _, val := range slice { - list.Append(val) - } - } - } - - return list -} - -func EmptyValue() *Value { - return NewValue([]interface{}{}) -} - -func (val *Value) AppendList() *Value { - list := EmptyValue() - val.Val = append(val.Slice(), list) - - return list -} - -func (val *Value) Append(v interface{}) *Value { - val.Val = append(val.Slice(), v) - - return val -} - -const ( - valOpAdd = iota - valOpDiv - valOpMul - valOpPow - valOpSub -) - -// Math stuff -func (self *Value) doOp(op int, other interface{}) *Value { - left := self.BigInt() - right := NewValue(other).BigInt() - - switch op { - case valOpAdd: - self.Val = left.Add(left, right) - case valOpDiv: - self.Val = left.Div(left, right) - case valOpMul: - self.Val = left.Mul(left, right) - case valOpPow: - self.Val = left.Exp(left, right, Big0) - case valOpSub: - self.Val = left.Sub(left, right) - } - - return self -} - -func (self *Value) Add(other interface{}) *Value { - return self.doOp(valOpAdd, other) -} - -func (self *Value) Sub(other interface{}) *Value { - return self.doOp(valOpSub, other) -} - -func (self *Value) Div(other interface{}) *Value { - return self.doOp(valOpDiv, other) -} - -func (self *Value) Mul(other interface{}) *Value { - return self.doOp(valOpMul, other) -} - -func (self *Value) Pow(other interface{}) *Value { - return self.doOp(valOpPow, other) -} - -type ValueIterator struct { - value *Value - currentValue *Value - idx int -} - -func (val *Value) NewIterator() *ValueIterator { - return &ValueIterator{value: val} -} - -func (it *ValueIterator) Len() int { - return it.value.Len() -} - -func (it *ValueIterator) Next() bool { - if it.idx >= it.value.Len() { - return false - } - - it.currentValue = it.value.Get(it.idx) - it.idx++ - - return true -} - -func (it *ValueIterator) Value() *Value { - return it.currentValue -} - -func (it *ValueIterator) Idx() int { - return it.idx - 1 -} diff --git a/core/canary.go b/core/canary.go deleted file mode 100644 index d8cbdacf029d3..0000000000000 --- a/core/canary.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package core - -import ( - "math/big" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/state" -) - -var ( - jeff = common.HexToAddress("959c33de5961820567930eccce51ea715c496f85") - vitalik = common.HexToAddress("c8158da0b567a8cc898991c2c2a073af67dc03a9") - christoph = common.HexToAddress("7a19a893f91d5b6e2cdf941b6acbba2cbcf431ee") - gav = common.HexToAddress("539dd9aaf45c3feb03f9c004f4098bd3268fef6b") -) - -// Canary will check the 0'd address of the 4 contracts above. -// If two or more are set to anything other than a 0 the canary -// dies a horrible death. -func Canary(statedb *state.StateDB) bool { - var r int - if (statedb.GetState(jeff, common.Hash{}).Big().Cmp(big.NewInt(0)) > 0) { - r++ - } - if (statedb.GetState(gav, common.Hash{}).Big().Cmp(big.NewInt(0)) > 0) { - r++ - } - if (statedb.GetState(christoph, common.Hash{}).Big().Cmp(big.NewInt(0)) > 0) { - r++ - } - if (statedb.GetState(vitalik, common.Hash{}).Big().Cmp(big.NewInt(0)) > 0) { - r++ - } - return r > 10 -} diff --git a/core/config.go b/core/config.go deleted file mode 100644 index 14947427ba72b..0000000000000 --- a/core/config.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "errors" - "math/big" - - "github.com/expanse-org/go-expanse/core/vm" -) - -var ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general config not found error - -// ChainConfig is the core config which determines the blockchain settings. -// -// ChainConfig is stored in the database on a per block basis. This means -// that any network, identified by its genesis block, can have its own -// set of configuration options. -type ChainConfig struct { - HomesteadBlock *big.Int `json:"homesteadBlock"` // Homestead switch block (nil = no fork, 0 = already homestead) - DAOForkBlock *big.Int `json:"daoForkBlock"` // TheDAO hard-fork switch block (nil = no fork) - DAOForkSupport bool `json:"daoForkSupport"` // Whether the nodes supports or opposes the DAO hard-fork - - VmConfig vm.Config `json:"-"` -} - -// IsHomestead returns whether num is either equal to the homestead block or greater. -func (c *ChainConfig) IsHomestead(num *big.Int) bool { - if c.HomesteadBlock == nil || num == nil { - return false - } - return num.Cmp(c.HomesteadBlock) >= 0 -} diff --git a/core/execution.go b/core/execution.go deleted file mode 100644 index fc3d1f720cc44..0000000000000 --- a/core/execution.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package core - -import ( - "math/big" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/params" - -) - -// Call executes within the given contract -func Call(env vm.Environment, caller vm.ContractRef, addr common.Address, input []byte, gas, gasPrice, value *big.Int) (ret []byte, err error) { - ret, _, err = exec(env, caller, &addr, &addr, env.Db().GetCodeHash(addr), input, env.Db().GetCode(addr), gas, gasPrice, value) - return ret, err -} - -// CallCode executes the given address' code as the given contract address -func CallCode(env vm.Environment, caller vm.ContractRef, addr common.Address, input []byte, gas, gasPrice, value *big.Int) (ret []byte, err error) { - callerAddr := caller.Address() - ret, _, err = exec(env, caller, &callerAddr, &addr, env.Db().GetCodeHash(addr), input, env.Db().GetCode(addr), gas, gasPrice, value) - return ret, err -} - -// DelegateCall is equivalent to CallCode except that sender and value propagates from parent scope to child scope -func DelegateCall(env vm.Environment, caller vm.ContractRef, addr common.Address, input []byte, gas, gasPrice *big.Int) (ret []byte, err error) { - callerAddr := caller.Address() - originAddr := env.Origin() - callerValue := caller.Value() - ret, _, err = execDelegateCall(env, caller, &originAddr, &callerAddr, &addr, env.Db().GetCodeHash(addr), input, env.Db().GetCode(addr), gas, gasPrice, callerValue) - return ret, err -} - -// Create creates a new contract with the given code -func Create(env vm.Environment, caller vm.ContractRef, code []byte, gas, gasPrice, value *big.Int) (ret []byte, address common.Address, err error) { - ret, address, err = exec(env, caller, nil, nil, crypto.Keccak256Hash(code), nil, code, gas, gasPrice, value) - // Here we get an error if we run into maximum stack depth, - // See: https://github.com/expanse-org/yellowpaper/pull/131 - // and YP definitions for CREATE instruction - if err != nil { - return nil, address, err - } - return ret, address, err -} - -func exec(env vm.Environment, caller vm.ContractRef, address, codeAddr *common.Address, codeHash common.Hash, input, code []byte, gas, gasPrice, value *big.Int) (ret []byte, addr common.Address, err error) { - evm := env.Vm() - // Depth check execution. Fail if we're trying to execute above the - // limit. - if env.Depth() > int(params.CallCreateDepth.Int64()) { - caller.ReturnGas(gas, gasPrice) - - return nil, common.Address{}, vm.DepthError - } - - if !env.CanTransfer(caller.Address(), value) { - caller.ReturnGas(gas, gasPrice) - - return nil, common.Address{}, ValueTransferErr("insufficient funds to transfer value. Req %v, has %v", value, env.Db().GetBalance(caller.Address())) - } - - var createAccount bool - if address == nil { - // Create a new account on the state - nonce := env.Db().GetNonce(caller.Address()) - env.Db().SetNonce(caller.Address(), nonce+1) - addr = crypto.CreateAddress(caller.Address(), nonce) - address = &addr - createAccount = true - } - - snapshotPreTransfer := env.SnapshotDatabase() - var ( - from = env.Db().GetAccount(caller.Address()) - to vm.Account - ) - if createAccount { - to = env.Db().CreateAccount(*address) - } else { - if !env.Db().Exist(*address) { - to = env.Db().CreateAccount(*address) - } else { - to = env.Db().GetAccount(*address) - } - } - env.Transfer(from, to, value) - - // initialise a new contract and set the code that is to be used by the - // EVM. The contract is a scoped environment for this execution context - // only. - contract := vm.NewContract(caller, to, value, gas, gasPrice) - contract.SetCallCode(codeAddr, codeHash, code) - defer contract.Finalise() - - ret, err = evm.Run(contract, input) - // if the contract creation ran successfully and no errors were returned - // calculate the gas required to store the code. If the code could not - // be stored due to not enough gas set an error and let it be handled - // by the error checking condition below. - if err == nil && createAccount { - dataGas := big.NewInt(int64(len(ret))) - dataGas.Mul(dataGas, params.CreateDataGas) - if contract.UseGas(dataGas) { - env.Db().SetCode(*address, ret) - } else { - err = vm.CodeStoreOutOfGasError - } - } - - // When an error was returned by the EVM or when setting the creation code - // above we revert to the snapshot and consume any gas remaining. Additionally - // when we're in homestead this also counts for code storage gas errors. - if err != nil && (env.RuleSet().IsHomestead(env.BlockNumber()) || err != vm.CodeStoreOutOfGasError) { - contract.UseGas(contract.Gas) - - env.RevertToSnapshot(snapshotPreTransfer) - } - - return ret, addr, err -} - -func execDelegateCall(env vm.Environment, caller vm.ContractRef, originAddr, toAddr, codeAddr *common.Address, codeHash common.Hash, input, code []byte, gas, gasPrice, value *big.Int) (ret []byte, addr common.Address, err error) { - evm := env.Vm() - // Depth check execution. Fail if we're trying to execute above the - // limit. - if env.Depth() > int(params.CallCreateDepth.Int64()) { - caller.ReturnGas(gas, gasPrice) - return nil, common.Address{}, vm.DepthError - } - - snapshot := env.SnapshotDatabase() - - var to vm.Account - if !env.Db().Exist(*toAddr) { - to = env.Db().CreateAccount(*toAddr) - } else { - to = env.Db().GetAccount(*toAddr) - } - - // Iinitialise a new contract and make initialise the delegate values - contract := vm.NewContract(caller, to, value, gas, gasPrice).AsDelegate() - contract.SetCallCode(codeAddr, codeHash, code) - defer contract.Finalise() - - ret, err = evm.Run(contract, input) - if err != nil { - contract.UseGas(contract.Gas) - - env.RevertToSnapshot(snapshot) - } - - return ret, addr, err -} - -// generic transfer method -func Transfer(from, to vm.Account, amount *big.Int) { - from.SubBalance(amount) - to.AddBalance(amount) -} diff --git a/core/types/bloom9.go b/core/types/bloom9.go index a5189b7beb902..4485e32fdb133 100644 --- a/core/types/bloom9.go +++ b/core/types/bloom9.go @@ -19,7 +19,7 @@ package types import ( "fmt" "math/big" - + "github.com/expanse-org/go-expanse/common/hexutil" "github.com/expanse-org/go-expanse/crypto" ) diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 33f9e6c0d96cc..09704d9f0eb02 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -29,8 +29,7 @@ import ( ) // The values in those tests are from the Transaction Tests -// at github.com/expanse-org/tests. - +// at github.com/ethereum/tests. var ( emptyTx = NewTransaction( 0, diff --git a/core/vm/environment.go b/core/vm/environment.go deleted file mode 100644 index 4d18cec0cc2d2..0000000000000 --- a/core/vm/environment.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package vm - -import ( - "math/big" - - - "github.com/expanse-org/go-expanse/common" -) - -// RuleSet is an interface that defines the current rule set during the -// execution of the EVM instructions (e.g. whether it's homestead) -type RuleSet interface { - IsHomestead(*big.Int) bool -} - -// Environment is an EVM requirement and helper which allows access to outside -// information such as states. -type Environment interface { - // The current ruleset - RuleSet() RuleSet - // The state database - Db() Database - // Creates a restorable snapshot - SnapshotDatabase() int - // Set database to previous snapshot - RevertToSnapshot(int) - // Address of the original invoker (first occurrence of the VM invoker) - Origin() common.Address - // The block number this VM is invoked on - BlockNumber() *big.Int - // The n'th hash ago from this block number - GetHash(uint64) common.Hash - // The handler's address - Coinbase() common.Address - // The current time (block time) - Time() *big.Int - // Difficulty set on the current block - Difficulty() *big.Int - // The gas limit of the block - GasLimit() *big.Int - // Determines whether it's possible to transact - CanTransfer(from common.Address, balance *big.Int) bool - // Transfers amount from one account to the other - Transfer(from, to Account, amount *big.Int) - // Adds a LOG to the state - AddLog(*Log) - // Type of the VM - Vm() Vm - // Get the curret calling depth - Depth() int - // Set the current calling depth - SetDepth(i int) - // Call another contract - Call(me ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) - // Take another's contract code and execute within our own context - CallCode(me ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) - // Same as CallCode except sender and value is propagated from parent to child scope - DelegateCall(me ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) - // Create a new contract - Create(me ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) -} - -// Vm is the basic interface for an implementation of the EVM. -type Vm interface { - // Run should execute the given contract with the input given in in - // and return the contract execution return bytes or an error if it - // failed. - Run(c *Contract, in []byte) ([]byte, error) -} - -// Database is a EVM database for full state querying. -type Database interface { - GetAccount(common.Address) Account - CreateAccount(common.Address) Account - - AddBalance(common.Address, *big.Int) - GetBalance(common.Address) *big.Int - - GetNonce(common.Address) uint64 - SetNonce(common.Address, uint64) - - GetCodeHash(common.Address) common.Hash - GetCodeSize(common.Address) int - GetCode(common.Address) []byte - SetCode(common.Address, []byte) - - AddRefund(*big.Int) - GetRefund() *big.Int - - GetState(common.Address, common.Hash) common.Hash - SetState(common.Address, common.Hash, common.Hash) - - Suicide(common.Address) bool - HasSuicided(common.Address) bool - - // Exist reports whether the given account exists in state. - // Notably this should also return true for suicided accounts. - Exist(common.Address) bool -} - -// Account represents a contract or basic expanse account. -type Account interface { - SubBalance(amount *big.Int) - AddBalance(amount *big.Int) - SetBalance(*big.Int) - SetNonce(uint64) - Balance() *big.Int - Address() common.Address - ReturnGas(*big.Int, *big.Int) - SetCode(common.Hash, []byte) - ForEachStorage(cb func(key, value common.Hash) bool) - Value() *big.Int -} diff --git a/core/vm/jit.go b/core/vm/jit.go deleted file mode 100644 index b1fac8f5f63d4..0000000000000 --- a/core/vm/jit.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package vm - -import ( - "fmt" - "math/big" - "sync/atomic" - "time" - - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/params" - "github.com/hashicorp/golang-lru" -) - -// progStatus is the type for the JIT program status. -type progStatus int32 - -const ( - progUnknown progStatus = iota // unknown status - progCompile // compile status - progReady // ready for use status - progError // error status (usually caused during compilation) - - defaultJitMaxCache int = 64 // maximum amount of jit cached programs -) - -var MaxProgSize int // Max cache size for JIT programs - -var programs *lru.Cache // lru cache for the JIT programs. - -func init() { - SetJITCacheSize(defaultJitMaxCache) -} - -// SetJITCacheSize recreates the program cache with the max given size. Setting -// a new cache is **not** thread safe. Use with caution. -func SetJITCacheSize(size int) { - programs, _ = lru.New(size) -} - -// GetProgram returns the program by id or nil when non-existent -func GetProgram(id common.Hash) *Program { - if p, ok := programs.Get(id); ok { - return p.(*Program) - } - - return nil -} - -// GenProgramStatus returns the status of the given program id -func GetProgramStatus(id common.Hash) progStatus { - program := GetProgram(id) - if program != nil { - return progStatus(atomic.LoadInt32(&program.status)) - } - - return progUnknown -} - -// Program is a compiled program for the JIT VM and holds all required for -// running a compiled JIT program. -type Program struct { - Id common.Hash // Id of the program - status int32 // status should be accessed atomically - - contract *Contract - - instructions []programInstruction // instruction set - mapping map[uint64]uint64 // real PC mapping to array indices - destinations map[uint64]struct{} // cached jump destinations - - code []byte -} - -// NewProgram returns a new JIT program -func NewProgram(code []byte) *Program { - program := &Program{ - Id: crypto.Keccak256Hash(code), - mapping: make(map[uint64]uint64), - destinations: make(map[uint64]struct{}), - code: code, - } - - programs.Add(program.Id, program) - return program -} - -func (p *Program) addInstr(op OpCode, pc uint64, fn instrFn, data *big.Int) { - // PUSH and DUP are a bit special. They all cost the same but we do want to have checking on stack push limit - // PUSH is also allowed to calculate the same price for all PUSHes - // DUP requirements are handled elsewhere (except for the stack limit check) - baseOp := op - if op >= PUSH1 && op <= PUSH32 { - baseOp = PUSH1 - } - if op >= DUP1 && op <= DUP16 { - baseOp = DUP1 - } - base := _baseCheck[baseOp] - - returns := op == RETURN || op == SUICIDE || op == STOP - instr := instruction{op, pc, fn, data, base.gas, base.stackPop, base.stackPush, returns} - - p.instructions = append(p.instructions, instr) - p.mapping[pc] = uint64(len(p.instructions) - 1) -} - -// CompileProgram compiles the given program and return an error when it fails -func CompileProgram(program *Program) (err error) { - if progStatus(atomic.LoadInt32(&program.status)) == progCompile { - return nil - } - atomic.StoreInt32(&program.status, int32(progCompile)) - defer func() { - if err != nil { - atomic.StoreInt32(&program.status, int32(progError)) - } else { - atomic.StoreInt32(&program.status, int32(progReady)) - } - }() - if glog.V(logger.Debug) { - glog.Infof("compiling %x\n", program.Id[:4]) - tstart := time.Now() - defer func() { - glog.Infof("compiled %x instrc: %d time: %v\n", program.Id[:4], len(program.instructions), time.Since(tstart)) - }() - } - - // loop thru the opcodes and "compile" in to instructions - for pc := uint64(0); pc < uint64(len(program.code)); pc++ { - switch op := OpCode(program.code[pc]); op { - case ADD: - program.addInstr(op, pc, opAdd, nil) - case SUB: - program.addInstr(op, pc, opSub, nil) - case MUL: - program.addInstr(op, pc, opMul, nil) - case DIV: - program.addInstr(op, pc, opDiv, nil) - case SDIV: - program.addInstr(op, pc, opSdiv, nil) - case MOD: - program.addInstr(op, pc, opMod, nil) - case SMOD: - program.addInstr(op, pc, opSmod, nil) - case EXP: - program.addInstr(op, pc, opExp, nil) - case SIGNEXTEND: - program.addInstr(op, pc, opSignExtend, nil) - case NOT: - program.addInstr(op, pc, opNot, nil) - case LT: - program.addInstr(op, pc, opLt, nil) - case GT: - program.addInstr(op, pc, opGt, nil) - case SLT: - program.addInstr(op, pc, opSlt, nil) - case SGT: - program.addInstr(op, pc, opSgt, nil) - case EQ: - program.addInstr(op, pc, opEq, nil) - case ISZERO: - program.addInstr(op, pc, opIszero, nil) - case AND: - program.addInstr(op, pc, opAnd, nil) - case OR: - program.addInstr(op, pc, opOr, nil) - case XOR: - program.addInstr(op, pc, opXor, nil) - case BYTE: - program.addInstr(op, pc, opByte, nil) - case ADDMOD: - program.addInstr(op, pc, opAddmod, nil) - case MULMOD: - program.addInstr(op, pc, opMulmod, nil) - case SHA3: - program.addInstr(op, pc, opSha3, nil) - case ADDRESS: - program.addInstr(op, pc, opAddress, nil) - case BALANCE: - program.addInstr(op, pc, opBalance, nil) - case ORIGIN: - program.addInstr(op, pc, opOrigin, nil) - case CALLER: - program.addInstr(op, pc, opCaller, nil) - case CALLVALUE: - program.addInstr(op, pc, opCallValue, nil) - case CALLDATALOAD: - program.addInstr(op, pc, opCalldataLoad, nil) - case CALLDATASIZE: - program.addInstr(op, pc, opCalldataSize, nil) - case CALLDATACOPY: - program.addInstr(op, pc, opCalldataCopy, nil) - case CODESIZE: - program.addInstr(op, pc, opCodeSize, nil) - case EXTCODESIZE: - program.addInstr(op, pc, opExtCodeSize, nil) - case CODECOPY: - program.addInstr(op, pc, opCodeCopy, nil) - case EXTCODECOPY: - program.addInstr(op, pc, opExtCodeCopy, nil) - case GASPRICE: - program.addInstr(op, pc, opGasprice, nil) - case BLOCKHASH: - program.addInstr(op, pc, opBlockhash, nil) - case COINBASE: - program.addInstr(op, pc, opCoinbase, nil) - case TIMESTAMP: - program.addInstr(op, pc, opTimestamp, nil) - case NUMBER: - program.addInstr(op, pc, opNumber, nil) - case DIFFICULTY: - program.addInstr(op, pc, opDifficulty, nil) - case GASLIMIT: - program.addInstr(op, pc, opGasLimit, nil) - case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: - size := uint64(op - PUSH1 + 1) - bytes := getData([]byte(program.code), new(big.Int).SetUint64(pc+1), new(big.Int).SetUint64(size)) - - program.addInstr(op, pc, opPush, common.Bytes2Big(bytes)) - - pc += size - - case POP: - program.addInstr(op, pc, opPop, nil) - case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16: - program.addInstr(op, pc, opDup, big.NewInt(int64(op-DUP1+1))) - case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16: - program.addInstr(op, pc, opSwap, big.NewInt(int64(op-SWAP1+2))) - case LOG0, LOG1, LOG2, LOG3, LOG4: - program.addInstr(op, pc, opLog, big.NewInt(int64(op-LOG0))) - case MLOAD: - program.addInstr(op, pc, opMload, nil) - case MSTORE: - program.addInstr(op, pc, opMstore, nil) - case MSTORE8: - program.addInstr(op, pc, opMstore8, nil) - case SLOAD: - program.addInstr(op, pc, opSload, nil) - case SSTORE: - program.addInstr(op, pc, opSstore, nil) - case JUMP: - program.addInstr(op, pc, opJump, nil) - case JUMPI: - program.addInstr(op, pc, opJumpi, nil) - case JUMPDEST: - program.addInstr(op, pc, opJumpdest, nil) - program.destinations[pc] = struct{}{} - case PC: - program.addInstr(op, pc, opPc, big.NewInt(int64(pc))) - case MSIZE: - program.addInstr(op, pc, opMsize, nil) - case GAS: - program.addInstr(op, pc, opGas, nil) - case CREATE: - program.addInstr(op, pc, opCreate, nil) - case DELEGATECALL: - // Instruction added regardless of homestead phase. - // Homestead (and execution of the opcode) is checked during - // runtime. - program.addInstr(op, pc, opDelegateCall, nil) - case CALL: - program.addInstr(op, pc, opCall, nil) - case CALLCODE: - program.addInstr(op, pc, opCallCode, nil) - case RETURN: - program.addInstr(op, pc, opReturn, nil) - case SUICIDE: - program.addInstr(op, pc, opSuicide, nil) - case STOP: // Stop the contract - program.addInstr(op, pc, opStop, nil) - default: - program.addInstr(op, pc, nil, nil) - } - } - - optimiseProgram(program) - - return nil -} - -// RunProgram runs the program given the environment and contract and returns an -// error if the execution failed (non-consensus) -func RunProgram(program *Program, env Environment, contract *Contract, input []byte) ([]byte, error) { - return runProgram(program, 0, NewMemory(), newstack(), env, contract, input) -} - -func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env Environment, contract *Contract, input []byte) ([]byte, error) { - contract.Input = input - - var ( - pc uint64 = program.mapping[pcstart] - instrCount = 0 - ) - - if glog.V(logger.Debug) { - glog.Infof("running JIT program %x\n", program.Id[:4]) - tstart := time.Now() - defer func() { - glog.Infof("JIT program %x done. time: %v instrc: %v\n", program.Id[:4], time.Since(tstart), instrCount) - }() - } - - homestead := env.RuleSet().IsHomestead(env.BlockNumber()) - for pc < uint64(len(program.instructions)) { - instrCount++ - - instr := program.instructions[pc] - if instr.Op() == DELEGATECALL && !homestead { - return nil, fmt.Errorf("Invalid opcode 0x%x", instr.Op()) - } - - ret, err := instr.do(program, &pc, env, contract, mem, stack) - if err != nil { - return nil, err - } - - if instr.halts() { - return ret, nil - } - } - - contract.Input = nil - - return nil, nil -} - -// validDest checks if the given destination is a valid one given the -// destination table of the program -func validDest(dests map[uint64]struct{}, dest *big.Int) bool { - // PC cannot go beyond len(code) and certainly can't be bigger than 64bits. - // Don't bother checking for JUMPDEST in that case. - if dest.Cmp(bigMaxUint64) > 0 { - return false - } - _, ok := dests[dest.Uint64()] - return ok -} - -// jitCalculateGasAndSize calculates the required given the opcode and stack items calculates the new memorysize for -// the operation. This does not reduce gas or resizes the memory. -func jitCalculateGasAndSize(env Environment, contract *Contract, instr instruction, statedb Database, mem *Memory, stack *stack) (*big.Int, *big.Int, error) { - var ( - gas = new(big.Int) - newMemSize *big.Int = new(big.Int) - ) - err := jitBaseCheck(instr, stack, gas) - if err != nil { - return nil, nil, err - } - - // stack Check, memory resize & gas phase - switch op := instr.op; op { - case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16: - n := int(op - SWAP1 + 2) - err := stack.require(n) - if err != nil { - return nil, nil, err - } - gas.Set(GasFastestStep) - case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16: - n := int(op - DUP1 + 1) - err := stack.require(n) - if err != nil { - return nil, nil, err - } - gas.Set(GasFastestStep) - case LOG0, LOG1, LOG2, LOG3, LOG4: - n := int(op - LOG0) - err := stack.require(n + 2) - if err != nil { - return nil, nil, err - } - - mSize, mStart := stack.data[stack.len()-2], stack.data[stack.len()-1] - - add := new(big.Int) - gas.Add(gas, params.LogGas) - gas.Add(gas, add.Mul(big.NewInt(int64(n)), params.LogTopicGas)) - gas.Add(gas, add.Mul(mSize, params.LogDataGas)) - - newMemSize = calcMemSize(mStart, mSize) - case EXP: - gas.Add(gas, new(big.Int).Mul(big.NewInt(int64(len(stack.data[stack.len()-2].Bytes()))), params.ExpByteGas)) - case SSTORE: - err := stack.require(2) - if err != nil { - return nil, nil, err - } - - var g *big.Int - y, x := stack.data[stack.len()-2], stack.data[stack.len()-1] - val := statedb.GetState(contract.Address(), common.BigToHash(x)) - - // This checks for 3 scenario's and calculates gas accordingly - // 1. From a zero-value address to a non-zero value (NEW VALUE) - // 2. From a non-zero value address to a zero-value address (DELETE) - // 3. From a non-zero to a non-zero (CHANGE) - if common.EmptyHash(val) && !common.EmptyHash(common.BigToHash(y)) { - g = params.SstoreSetGas - } else if !common.EmptyHash(val) && common.EmptyHash(common.BigToHash(y)) { - statedb.AddRefund(params.SstoreRefundGas) - - g = params.SstoreClearGas - } else { - g = params.SstoreClearGas - } - gas.Set(g) - case SUICIDE: - if !statedb.HasSuicided(contract.Address()) { - statedb.AddRefund(params.SuicideRefundGas) - } - case MLOAD: - newMemSize = calcMemSize(stack.peek(), u256(32)) - case MSTORE8: - newMemSize = calcMemSize(stack.peek(), u256(1)) - case MSTORE: - newMemSize = calcMemSize(stack.peek(), u256(32)) - case RETURN: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-2]) - case SHA3: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-2]) - - words := toWordSize(stack.data[stack.len()-2]) - gas.Add(gas, words.Mul(words, params.Sha3WordGas)) - case CALLDATACOPY: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-3]) - - words := toWordSize(stack.data[stack.len()-3]) - gas.Add(gas, words.Mul(words, params.CopyGas)) - case CODECOPY: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-3]) - - words := toWordSize(stack.data[stack.len()-3]) - gas.Add(gas, words.Mul(words, params.CopyGas)) - case EXTCODECOPY: - newMemSize = calcMemSize(stack.data[stack.len()-2], stack.data[stack.len()-4]) - - words := toWordSize(stack.data[stack.len()-4]) - gas.Add(gas, words.Mul(words, params.CopyGas)) - - case CREATE: - newMemSize = calcMemSize(stack.data[stack.len()-2], stack.data[stack.len()-3]) - case CALL, CALLCODE: - gas.Add(gas, stack.data[stack.len()-1]) - - if op == CALL { - if !env.Db().Exist(common.BigToAddress(stack.data[stack.len()-2])) { - gas.Add(gas, params.CallNewAccountGas) - } - } - - if len(stack.data[stack.len()-3].Bytes()) > 0 { - gas.Add(gas, params.CallValueTransferGas) - } - - x := calcMemSize(stack.data[stack.len()-6], stack.data[stack.len()-7]) - y := calcMemSize(stack.data[stack.len()-4], stack.data[stack.len()-5]) - - newMemSize = common.BigMax(x, y) - case DELEGATECALL: - gas.Add(gas, stack.data[stack.len()-1]) - - x := calcMemSize(stack.data[stack.len()-5], stack.data[stack.len()-6]) - y := calcMemSize(stack.data[stack.len()-3], stack.data[stack.len()-4]) - - newMemSize = common.BigMax(x, y) - } - quadMemGas(mem, newMemSize, gas) - - return newMemSize, gas, nil -} - -// jitBaseCheck is the same as baseCheck except it doesn't do the look up in the -// gas table. This is done during compilation instead. -func jitBaseCheck(instr instruction, stack *stack, gas *big.Int) error { - err := stack.require(instr.spop) - if err != nil { - return err - } - - if instr.spush > 0 && stack.len()-instr.spop+instr.spush > int(params.StackLimit.Int64()) { - return fmt.Errorf("stack limit reached %d (%d)", stack.len(), params.StackLimit.Int64()) - } - - // nil on gas means no base calculation - if instr.gas == nil { - return nil - } - - gas.Add(gas, instr.gas) - - return nil -} diff --git a/core/vm/jit_optimiser.go b/core/vm/jit_optimiser.go deleted file mode 100644 index 4070a9a2abeaa..0000000000000 --- a/core/vm/jit_optimiser.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package vm - -import ( - "math/big" - "time" - - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -// optimeProgram optimises a JIT program creating segments out of program -// instructions. Currently covered are multi-pushes and static jumps -func optimiseProgram(program *Program) { - var load []instruction - - var ( - statsJump = 0 - statsPush = 0 - ) - - if glog.V(logger.Debug) { - glog.Infof("optimising %x\n", program.Id[:4]) - tstart := time.Now() - defer func() { - glog.Infof("optimised %x done in %v with JMP: %d PSH: %d\n", program.Id[:4], time.Since(tstart), statsJump, statsPush) - }() - } - - /* - code := Parse(program.code) - for _, test := range [][]OpCode{ - []OpCode{PUSH, PUSH, ADD}, - []OpCode{PUSH, PUSH, SUB}, - []OpCode{PUSH, PUSH, MUL}, - []OpCode{PUSH, PUSH, DIV}, - } { - matchCount := 0 - MatchFn(code, test, func(i int) bool { - matchCount++ - return true - }) - fmt.Printf("found %d match count on: %v\n", matchCount, test) - } - */ - - for i := 0; i < len(program.instructions); i++ { - instr := program.instructions[i].(instruction) - - switch { - case instr.op.IsPush(): - load = append(load, instr) - case instr.op.IsStaticJump(): - if len(load) == 0 { - continue - } - // if the push load is greater than 1, finalise that - // segment first - if len(load) > 2 { - seg, size := makePushSeg(load[:len(load)-1]) - program.instructions[i-size-1] = seg - statsPush++ - } - // create a segment consisting of a pre determined - // jump, destination and validity. - seg := makeStaticJumpSeg(load[len(load)-1].data, program) - program.instructions[i-1] = seg - statsJump++ - - load = nil - default: - // create a new N pushes segment - if len(load) > 1 { - seg, size := makePushSeg(load) - program.instructions[i-size] = seg - statsPush++ - } - load = nil - } - } -} - -// makePushSeg creates a new push segment from N amount of push instructions -func makePushSeg(instrs []instruction) (pushSeg, int) { - var ( - data []*big.Int - gas = new(big.Int) - ) - - for _, instr := range instrs { - data = append(data, instr.data) - gas.Add(gas, instr.gas) - } - - return pushSeg{data, gas}, len(instrs) -} - -// makeStaticJumpSeg creates a new static jump segment from a predefined -// destination (PUSH, JUMP). -func makeStaticJumpSeg(to *big.Int, program *Program) jumpSeg { - gas := new(big.Int) - gas.Add(gas, _baseCheck[PUSH1].gas) - gas.Add(gas, _baseCheck[JUMP].gas) - - contract := &Contract{Code: program.code} - pos, err := jump(program.mapping, program.destinations, contract, to) - return jumpSeg{pos, err, gas} -} diff --git a/core/vm/jit_test.go b/core/vm/jit_test.go deleted file mode 100644 index a92ca6b39c5f2..0000000000000 --- a/core/vm/jit_test.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . -package vm - -import ( - "math/big" - "testing" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" -) - -const maxRun = 1000 - -func TestSegmenting(t *testing.T) { - prog := NewProgram([]byte{byte(PUSH1), 0x1, byte(PUSH1), 0x1, 0x0}) - err := CompileProgram(prog) - if err != nil { - t.Fatal(err) - } - - if instr, ok := prog.instructions[0].(pushSeg); ok { - if len(instr.data) != 2 { - t.Error("expected 2 element width pushSegment, got", len(instr.data)) - } - } else { - t.Errorf("expected instr[0] to be a pushSeg, got %T", prog.instructions[0]) - } - - prog = NewProgram([]byte{byte(PUSH1), 0x1, byte(PUSH1), 0x1, byte(JUMP)}) - err = CompileProgram(prog) - if err != nil { - t.Fatal(err) - } - if _, ok := prog.instructions[1].(jumpSeg); ok { - } else { - t.Errorf("expected instr[1] to be jumpSeg, got %T", prog.instructions[1]) - } - - prog = NewProgram([]byte{byte(PUSH1), 0x1, byte(PUSH1), 0x1, byte(PUSH1), 0x1, byte(JUMP)}) - err = CompileProgram(prog) - if err != nil { - t.Fatal(err) - } - if instr, ok := prog.instructions[0].(pushSeg); ok { - if len(instr.data) != 2 { - t.Error("expected 2 element width pushSegment, got", len(instr.data)) - } - } else { - t.Errorf("expected instr[0] to be a pushSeg, got %T", prog.instructions[0]) - } - if _, ok := prog.instructions[2].(jumpSeg); ok { - } else { - t.Errorf("expected instr[1] to be jumpSeg, got %T", prog.instructions[1]) - } -} - -func TestCompiling(t *testing.T) { - prog := NewProgram([]byte{0x60, 0x10}) - err := CompileProgram(prog) - if err != nil { - t.Error("didn't expect compile error") - } - - if len(prog.instructions) != 1 { - t.Error("expected 1 compiled instruction, got", len(prog.instructions)) - } -} - -func TestResetInput(t *testing.T) { - var sender account - - env := NewEnv(false, true) - contract := NewContract(sender, sender, big.NewInt(100), big.NewInt(10000), big.NewInt(0)) - contract.CodeAddr = &common.Address{} - - program := NewProgram([]byte{}) - RunProgram(program, env, contract, []byte{0xbe, 0xef}) - if contract.Input != nil { - t.Errorf("expected input to be nil, got %x", contract.Input) - } -} - -func TestPcMappingToInstruction(t *testing.T) { - program := NewProgram([]byte{byte(PUSH2), 0xbe, 0xef, byte(ADD)}) - CompileProgram(program) - if program.mapping[3] != 1 { - t.Error("expected mapping PC 4 to me instr no. 2, got", program.mapping[4]) - } -} - -var benchmarks = map[string]vmBench{ - "pushes": vmBench{ - false, false, false, - common.Hex2Bytes("600a600a01600a600a01600a600a01600a600a01600a600a01600a600a01600a600a01600a600a01600a600a01600a600a01"), nil, - }, -} - -func BenchmarkPushes(b *testing.B) { - runVmBench(benchmarks["pushes"], b) -} - -type vmBench struct { - precompile bool // compile prior to executing - nojit bool // ignore jit (sets DisbaleJit = true - forcejit bool // forces the jit, precompile is ignored - - code []byte - input []byte -} - -type account struct{} - -func (account) SubBalance(amount *big.Int) {} -func (account) AddBalance(amount *big.Int) {} -func (account) SetAddress(common.Address) {} -func (account) Value() *big.Int { return nil } -func (account) SetBalance(*big.Int) {} -func (account) SetNonce(uint64) {} -func (account) Balance() *big.Int { return nil } -func (account) Address() common.Address { return common.Address{} } -func (account) ReturnGas(*big.Int, *big.Int) {} -func (account) SetCode(common.Hash, []byte) {} -func (account) ForEachStorage(cb func(key, value common.Hash) bool) {} - -func runVmBench(test vmBench, b *testing.B) { - var sender account - - if test.precompile && !test.forcejit { - NewProgram(test.code) - } - env := NewEnv(test.nojit, test.forcejit) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - context := NewContract(sender, sender, big.NewInt(100), big.NewInt(10000), big.NewInt(0)) - context.Code = test.code - context.CodeAddr = &common.Address{} - _, err := env.Vm().Run(context, test.input) - if err != nil { - b.Error(err) - b.FailNow() - } - } -} - -type Env struct { - gasLimit *big.Int - depth int - evm *EVM -} - -func NewEnv(noJit, forceJit bool) *Env { - env := &Env{gasLimit: big.NewInt(10000), depth: 0} - env.evm = New(env, Config{ - EnableJit: !noJit, - ForceJit: forceJit, - }) - return env -} - -func (self *Env) RuleSet() RuleSet { return ruleSet{new(big.Int)} } -func (self *Env) Vm() Vm { return self.evm } -func (self *Env) Origin() common.Address { return common.Address{} } -func (self *Env) BlockNumber() *big.Int { return big.NewInt(0) } -func (self *Env) AddStructLog(log StructLog) { -} -func (self *Env) StructLogs() []StructLog { - return nil -} - -//func (self *Env) PrevHash() []byte { return self.parent } -func (self *Env) Coinbase() common.Address { return common.Address{} } -func (self *Env) SnapshotDatabase() int { return 0 } -func (self *Env) RevertToSnapshot(int) {} -func (self *Env) Time() *big.Int { return big.NewInt(time.Now().Unix()) } -func (self *Env) Difficulty() *big.Int { return big.NewInt(0) } -func (self *Env) Db() Database { return nil } -func (self *Env) GasLimit() *big.Int { return self.gasLimit } -func (self *Env) VmType() Type { return StdVmTy } -func (self *Env) GetHash(n uint64) common.Hash { - return common.BytesToHash(crypto.Keccak256([]byte(big.NewInt(int64(n)).String()))) -} -func (self *Env) AddLog(log *Log) { -} -func (self *Env) Depth() int { return self.depth } -func (self *Env) SetDepth(i int) { self.depth = i } -func (self *Env) CanTransfer(from common.Address, balance *big.Int) bool { - return true -} -func (self *Env) Transfer(from, to Account, amount *big.Int) {} -func (self *Env) Call(caller ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) { - return nil, nil -} -func (self *Env) CallCode(caller ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) { - return nil, nil -} -func (self *Env) Create(caller ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) { - return nil, common.Address{}, nil -} -func (self *Env) DelegateCall(me ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) { - return nil, nil -} diff --git a/core/vm/log.go b/core/vm/log.go deleted file mode 100644 index e9eeaba0a57ef..0000000000000 --- a/core/vm/log.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package vm - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/rlp" -) - -type Log struct { - // Consensus fields - Address common.Address - Topics []common.Hash - Data []byte - - // Derived fields (don't reorder!) - BlockNumber uint64 - TxHash common.Hash - TxIndex uint - BlockHash common.Hash - Index uint -} - -func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log { - return &Log{Address: address, Topics: topics, Data: data, BlockNumber: number} -} - -func (l *Log) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, []interface{}{l.Address, l.Topics, l.Data}) -} - -func (l *Log) DecodeRLP(s *rlp.Stream) error { - var log struct { - Address common.Address - Topics []common.Hash - Data []byte - } - if err := s.Decode(&log); err != nil { - return err - } - l.Address, l.Topics, l.Data = log.Address, log.Topics, log.Data - return nil -} - -func (l *Log) String() string { - return fmt.Sprintf(`log: %x %x %x %x %d %x %d`, l.Address, l.Topics, l.Data, l.TxHash, l.TxIndex, l.BlockHash, l.Index) -} - -func (r *Log) MarshalJSON() ([]byte, error) { - fields := map[string]interface{}{ - "address": r.Address, - "data": fmt.Sprintf("%#x", r.Data), - "blockNumber": fmt.Sprintf("%#x", r.BlockNumber), - "logIndex": fmt.Sprintf("%#x", r.Index), - "blockHash": r.BlockHash, - "transactionHash": r.TxHash, - "transactionIndex": fmt.Sprintf("%#x", r.TxIndex), - "topics": r.Topics, - } - - return json.Marshal(fields) -} - -type Logs []*Log - -// LogForStorage is a wrapper around a Log that flattens and parses the entire -// content of a log, as opposed to only the consensus fields originally (by hiding -// the rlp interface methods). -type LogForStorage Log diff --git a/core/vm/vm.go b/core/vm/vm.go deleted file mode 100644 index 37d9ce789a95c..0000000000000 --- a/core/vm/vm.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package vm - -import ( - "fmt" - "math/big" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/params" -) - -// Config are the configuration options for the EVM -type Config struct { - Debug bool - EnableJit bool - ForceJit bool - Logger LogConfig -} - -// EVM is used to run Expanse based contracts and will utilise the -// passed environment to query external sources for state information. -// The EVM will run the byte code VM or JIT VM based on the passed -// configuration. -type EVM struct { - env Environment - jumpTable vmJumpTable - cfg Config - - logger *Logger -} - -// New returns a new instance of the EVM. -func New(env Environment, cfg Config) *EVM { - var logger *Logger - if cfg.Debug { - logger = newLogger(cfg.Logger, env) - } - - return &EVM{ - env: env, - jumpTable: newJumpTable(env.RuleSet(), env.BlockNumber()), - cfg: cfg, - logger: logger, - } -} - -// Run loops and evaluates the contract's code with the given input data -func (evm *EVM) Run(contract *Contract, input []byte) (ret []byte, err error) { - evm.env.SetDepth(evm.env.Depth() + 1) - defer evm.env.SetDepth(evm.env.Depth() - 1) - if contract.CodeAddr != nil { - if p := Precompiled[contract.CodeAddr.Str()]; p != nil { - return evm.RunPrecompiled(p, input, contract) - } - } - - // Don't bother with the execution if there's no code. - if len(contract.Code) == 0 { - return nil, nil - } - - codehash := contract.CodeHash // codehash is used when doing jump dest caching - if codehash == (common.Hash{}) { - codehash = crypto.Keccak256Hash(contract.Code) - } - var program *Program - if evm.cfg.EnableJit { - // If the JIT is enabled check the status of the JIT program, - // if it doesn't exist compile a new program in a separate - // goroutine or wait for compilation to finish if the JIT is - // forced. - switch GetProgramStatus(codehash) { - case progReady: - return RunProgram(GetProgram(codehash), evm.env, contract, input) - case progUnknown: - if evm.cfg.ForceJit { - // Create and compile program - program = NewProgram(contract.Code) - perr := CompileProgram(program) - if perr == nil { - return RunProgram(program, evm.env, contract, input) - } - glog.V(logger.Info).Infoln("error compiling program", err) - } else { - // create and compile the program. Compilation - // is done in a separate goroutine - program = NewProgram(contract.Code) - go func() { - err := CompileProgram(program) - if err != nil { - glog.V(logger.Info).Infoln("error compiling program", err) - return - } - }() - } - } - } - - var ( - caller = contract.caller - code = contract.Code - instrCount = 0 - - op OpCode // current opcode - mem = NewMemory() // bound memory - stack = newstack() // local stack - statedb = evm.env.Db() // current state - // For optimisation reason we're using uint64 as the program counter. - // It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Practically much less so feasible. - pc = uint64(0) // program counter - - // jump evaluates and checks whether the given jump destination is a valid one - // if valid move the `pc` otherwise return an error. - jump = func(from uint64, to *big.Int) error { - if !contract.jumpdests.has(codehash, code, to) { - nop := contract.GetOp(to.Uint64()) - return fmt.Errorf("invalid jump destination (%v) %v", nop, to) - } - - pc = to.Uint64() - - return nil - } - - newMemSize *big.Int - cost *big.Int - ) - contract.Input = input - - // User defer pattern to check for an error and, based on the error being nil or not, use all gas and return. - defer func() { - if err != nil && evm.cfg.Debug { - evm.logger.captureState(pc, op, contract.Gas, cost, mem, stack, contract, evm.env.Depth(), err) - } - }() - - if glog.V(logger.Debug) { - glog.Infof("running byte VM %x\n", codehash[:4]) - tstart := time.Now() - defer func() { - glog.Infof("byte VM %x done. time: %v instrc: %v\n", codehash[:4], time.Since(tstart), instrCount) - }() - } - - for ; ; instrCount++ { - /* - if EnableJit && it%100 == 0 { - if program != nil && progStatus(atomic.LoadInt32(&program.status)) == progReady { - // move execution - fmt.Println("moved", it) - glog.V(logger.Info).Infoln("Moved execution to JIT") - return runProgram(program, pc, mem, stack, evm.env, contract, input) - } - } - */ - - // Get the memory location of pc - op = contract.GetOp(pc) - // calculate the new memory size and gas price for the current executing opcode - newMemSize, cost, err = calculateGasAndSize(evm.env, contract, caller, op, statedb, mem, stack) - if err != nil { - return nil, err - } - - // Use the calculated gas. When insufficient gas is present, use all gas and return an - // Out Of Gas error - if !contract.UseGas(cost) { - return nil, OutOfGasError - } - - // Resize the memory calculated previously - mem.Resize(newMemSize.Uint64()) - // Add a log message - if evm.cfg.Debug { - evm.logger.captureState(pc, op, contract.Gas, cost, mem, stack, contract, evm.env.Depth(), nil) - } - - if opPtr := evm.jumpTable[op]; opPtr.valid { - if opPtr.fn != nil { - opPtr.fn(instruction{}, &pc, evm.env, contract, mem, stack) - } else { - switch op { - case PC: - opPc(instruction{data: new(big.Int).SetUint64(pc)}, &pc, evm.env, contract, mem, stack) - case JUMP: - if err := jump(pc, stack.pop()); err != nil { - return nil, err - } - - continue - case JUMPI: - pos, cond := stack.pop(), stack.pop() - - if cond.Cmp(common.BigTrue) >= 0 { - if err := jump(pc, pos); err != nil { - return nil, err - } - - continue - } - case RETURN: - offset, size := stack.pop(), stack.pop() - ret := mem.GetPtr(offset.Int64(), size.Int64()) - - return ret, nil - case SUICIDE: - opSuicide(instruction{}, nil, evm.env, contract, mem, stack) - - fallthrough - case STOP: // Stop the contract - return nil, nil - } - } - } else { - return nil, fmt.Errorf("Invalid opcode %x", op) - } - - pc++ - - } -} - -// calculateGasAndSize calculates the required given the opcode and stack items calculates the new memorysize for -// the operation. This does not reduce gas or resizes the memory. -func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef, op OpCode, statedb Database, mem *Memory, stack *stack) (*big.Int, *big.Int, error) { - var ( - gas = new(big.Int) - newMemSize *big.Int = new(big.Int) - ) - err := baseCheck(op, stack, gas) - if err != nil { - return nil, nil, err - } - - // stack Check, memory resize & gas phase - switch op { - case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16: - n := int(op - SWAP1 + 2) - err := stack.require(n) - if err != nil { - return nil, nil, err - } - gas.Set(GasFastestStep) - case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16: - n := int(op - DUP1 + 1) - err := stack.require(n) - if err != nil { - return nil, nil, err - } - gas.Set(GasFastestStep) - case LOG0, LOG1, LOG2, LOG3, LOG4: - n := int(op - LOG0) - err := stack.require(n + 2) - if err != nil { - return nil, nil, err - } - - mSize, mStart := stack.data[stack.len()-2], stack.data[stack.len()-1] - - gas.Add(gas, params.LogGas) - gas.Add(gas, new(big.Int).Mul(big.NewInt(int64(n)), params.LogTopicGas)) - gas.Add(gas, new(big.Int).Mul(mSize, params.LogDataGas)) - - newMemSize = calcMemSize(mStart, mSize) - case EXP: - gas.Add(gas, new(big.Int).Mul(big.NewInt(int64(len(stack.data[stack.len()-2].Bytes()))), params.ExpByteGas)) - case SSTORE: - err := stack.require(2) - if err != nil { - return nil, nil, err - } - - var g *big.Int - y, x := stack.data[stack.len()-2], stack.data[stack.len()-1] - val := statedb.GetState(contract.Address(), common.BigToHash(x)) - - // This checks for 3 scenario's and calculates gas accordingly - // 1. From a zero-value address to a non-zero value (NEW VALUE) - // 2. From a non-zero value address to a zero-value address (DELETE) - // 3. From a non-zero to a non-zero (CHANGE) - if common.EmptyHash(val) && !common.EmptyHash(common.BigToHash(y)) { - // 0 => non 0 - g = params.SstoreSetGas - } else if !common.EmptyHash(val) && common.EmptyHash(common.BigToHash(y)) { - statedb.AddRefund(params.SstoreRefundGas) - - g = params.SstoreClearGas - } else { - // non 0 => non 0 (or 0 => 0) - g = params.SstoreClearGas - } - gas.Set(g) - case SUICIDE: - if !statedb.HasSuicided(contract.Address()) { - statedb.AddRefund(params.SuicideRefundGas) - } - case MLOAD: - newMemSize = calcMemSize(stack.peek(), u256(32)) - case MSTORE8: - newMemSize = calcMemSize(stack.peek(), u256(1)) - case MSTORE: - newMemSize = calcMemSize(stack.peek(), u256(32)) - case RETURN: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-2]) - case SHA3: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-2]) - - words := toWordSize(stack.data[stack.len()-2]) - gas.Add(gas, words.Mul(words, params.Sha3WordGas)) - case CALLDATACOPY: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-3]) - - words := toWordSize(stack.data[stack.len()-3]) - gas.Add(gas, words.Mul(words, params.CopyGas)) - case CODECOPY: - newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-3]) - - words := toWordSize(stack.data[stack.len()-3]) - gas.Add(gas, words.Mul(words, params.CopyGas)) - case EXTCODECOPY: - newMemSize = calcMemSize(stack.data[stack.len()-2], stack.data[stack.len()-4]) - - words := toWordSize(stack.data[stack.len()-4]) - gas.Add(gas, words.Mul(words, params.CopyGas)) - - case CREATE: - newMemSize = calcMemSize(stack.data[stack.len()-2], stack.data[stack.len()-3]) - case CALL, CALLCODE: - gas.Add(gas, stack.data[stack.len()-1]) - - if op == CALL { - if !env.Db().Exist(common.BigToAddress(stack.data[stack.len()-2])) { - gas.Add(gas, params.CallNewAccountGas) - } - } - - if len(stack.data[stack.len()-3].Bytes()) > 0 { - gas.Add(gas, params.CallValueTransferGas) - } - - x := calcMemSize(stack.data[stack.len()-6], stack.data[stack.len()-7]) - y := calcMemSize(stack.data[stack.len()-4], stack.data[stack.len()-5]) - - newMemSize = common.BigMax(x, y) - case DELEGATECALL: - gas.Add(gas, stack.data[stack.len()-1]) - - x := calcMemSize(stack.data[stack.len()-5], stack.data[stack.len()-6]) - y := calcMemSize(stack.data[stack.len()-3], stack.data[stack.len()-4]) - - newMemSize = common.BigMax(x, y) - } - quadMemGas(mem, newMemSize, gas) - - return newMemSize, gas, nil -} - -// RunPrecompile runs and evaluate the output of a precompiled contract defined in contracts.go -func (evm *EVM) RunPrecompiled(p *PrecompiledAccount, input []byte, contract *Contract) (ret []byte, err error) { - gas := p.Gas(len(input)) - if contract.UseGas(gas) { - ret = p.Call(input) - - return ret, nil - } else { - return nil, OutOfGasError - } -} diff --git a/core/vm/vm_jit.go b/core/vm/vm_jit.go index fc64df42ab559..32f6078d8664f 100644 --- a/core/vm/vm_jit.go +++ b/core/vm/vm_jit.go @@ -25,7 +25,7 @@ int evmjit_run(void* _jit, void* _data, void* _env); void evmjit_destroy(void* _jit); // Shared library evmjit (e.g. libevmjit.so) is expected to be installed in /usr/local/lib -// More: https://github.com/expanse-org/evmjit +// More: https://github.com/ethereum/evmjit #cgo LDFLAGS: -levmjit */ import "C" diff --git a/core/vm_env.go b/core/vm_env.go deleted file mode 100644 index de266945bd7d0..0000000000000 --- a/core/vm_env.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package core - -import ( - "math/big" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" -) - -// GetHashFn returns a function for which the VM env can query block hashes through -// up to the limit defined by the Yellow Paper and uses the given block chain -// to query for information. -func GetHashFn(ref common.Hash, chain *BlockChain) func(n uint64) common.Hash { - return func(n uint64) common.Hash { - for block := chain.GetBlock(ref); block != nil; block = chain.GetBlock(block.ParentHash()) { - if block.NumberU64() == n { - return block.Hash() - } - } - - return common.Hash{} - } -} - -type VMEnv struct { - chainConfig *ChainConfig // Chain configuration - state *state.StateDB // State to use for executing - evm *vm.EVM // The Expanse Virtual Machine - depth int // Current execution depth - msg Message // Message appliod - - header *types.Header // Header information - chain *BlockChain // Blockchain handle - logs []vm.StructLog // Logs for the custom structured logger - getHashFn func(uint64) common.Hash // getHashFn callback is used to retrieve block hashes -} - -func NewEnv(state *state.StateDB, chainConfig *ChainConfig, chain *BlockChain, msg Message, header *types.Header, cfg vm.Config) *VMEnv { - env := &VMEnv{ - chainConfig: chainConfig, - chain: chain, - state: state, - header: header, - msg: msg, - getHashFn: GetHashFn(header.ParentHash, chain), - } - - // if no log collector is present set self as the collector - if cfg.Logger.Collector == nil { - cfg.Logger.Collector = env - } - - env.evm = vm.New(env, cfg) - return env -} - -func (self *VMEnv) RuleSet() vm.RuleSet { return self.chainConfig } -func (self *VMEnv) Vm() vm.Vm { return self.evm } -func (self *VMEnv) Origin() common.Address { f, _ := self.msg.From(); return f } -func (self *VMEnv) BlockNumber() *big.Int { return self.header.Number } -func (self *VMEnv) Coinbase() common.Address { return self.header.Coinbase } -func (self *VMEnv) Time() *big.Int { return self.header.Time } -func (self *VMEnv) Difficulty() *big.Int { return self.header.Difficulty } -func (self *VMEnv) GasLimit() *big.Int { return self.header.GasLimit } -func (self *VMEnv) Value() *big.Int { return self.msg.Value() } -func (self *VMEnv) Db() vm.Database { return self.state } -func (self *VMEnv) Depth() int { return self.depth } -func (self *VMEnv) SetDepth(i int) { self.depth = i } -func (self *VMEnv) GetHash(n uint64) common.Hash { - return self.getHashFn(n) -} - -func (self *VMEnv) AddLog(log *vm.Log) { - self.state.AddLog(log) -} -func (self *VMEnv) CanTransfer(from common.Address, balance *big.Int) bool { - return self.state.GetBalance(from).Cmp(balance) >= 0 -} - -func (self *VMEnv) SnapshotDatabase() int { - return self.state.Snapshot() -} - -func (self *VMEnv) RevertToSnapshot(snapshot int) { - self.state.RevertToSnapshot(snapshot) -} - -func (self *VMEnv) Transfer(from, to vm.Account, amount *big.Int) { - Transfer(from, to, amount) -} - -func (self *VMEnv) Call(me vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) { - return Call(self, me, addr, data, gas, price, value) -} -func (self *VMEnv) CallCode(me vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) { - return CallCode(self, me, addr, data, gas, price, value) -} - -func (self *VMEnv) DelegateCall(me vm.ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) { - return DelegateCall(self, me, addr, data, gas, price) -} - -func (self *VMEnv) Create(me vm.ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) { - return Create(self, me, data, gas, price, value) -} - -func (self *VMEnv) StructLogs() []vm.StructLog { - return self.logs -} - -func (self *VMEnv) AddStructLog(log vm.StructLog) { - self.logs = append(self.logs, log) -} diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go index 846f4d2174621..959068cc9adc5 100644 --- a/crypto/ecies/ecies_test.go +++ b/crypto/ecies/ecies_test.go @@ -42,6 +42,7 @@ import ( "testing" "github.com/expanse-org/go-expanse/crypto" + ) var dumpEnc bool diff --git a/crypto/keypair.go b/crypto/keypair.go deleted file mode 100644 index c3988960ab2fd..0000000000000 --- a/crypto/keypair.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package crypto - -import ( - "strings" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto/secp256k1" -) - -type KeyPair struct { - PrivateKey []byte - PublicKey []byte - address []byte - mnemonic string - // The associated account - // account *StateObject -} - -func GenerateNewKeyPair() *KeyPair { - _, prv := secp256k1.GenerateKeyPair() - keyPair, _ := NewKeyPairFromSec(prv) // swallow error, this one cannot err - return keyPair -} - -func NewKeyPairFromSec(seckey []byte) (*KeyPair, error) { - pubkey, err := secp256k1.GeneratePubKey(seckey) - if err != nil { - return nil, err - } - - return &KeyPair{PrivateKey: seckey, PublicKey: pubkey}, nil -} - -func (k *KeyPair) Address() []byte { - if k.address == nil { - k.address = Sha3(k.PublicKey[1:])[12:] - } - return k.address -} - -func (k *KeyPair) Mnemonic() string { - if k.mnemonic == "" { - k.mnemonic = strings.Join(MnemonicEncode(common.Bytes2Hex(k.PrivateKey)), " ") - } - return k.mnemonic -} - -func (k *KeyPair) AsStrings() (string, string, string, string) { - return k.Mnemonic(), common.Bytes2Hex(k.Address()), common.Bytes2Hex(k.PrivateKey), common.Bytes2Hex(k.PublicKey) -} diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 9f4b72e7d7500..0000000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM ubuntu:wily -MAINTAINER chrisfranko - - -ENV DEBIAN_FRONTEND noninteractive - -# Usual update / upgrade -RUN apt-get update -RUN apt-get upgrade -q -y -RUN apt-get dist-upgrade -q -y - -# Let our containers upgrade themselves -RUN apt-get install -q -y unattended-upgrades - -# Install Expanse -RUN apt-get install -q -y curl git mercurial binutils bison gcc make libgmp3-dev build-essential - -# Install Go -RUN \ - mkdir -p /goroot && \ - curl https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | tar xvzf - -C /goroot --strip-components=1 - -# Set environment variables. -ENV GOROOT /goroot -ENV GOPATH /gopath -ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH - -RUN git clone http://www.github.com/expanse-org/go-expanse.git -RUN cd go-expanse && make gexp - -RUN cp build/bin/gexp /usr/bin/gexp - -EXPOSE 9656 -EXPOSE 42786 - -ENTRYPOINT ["/usr/bin/gexp"] diff --git a/errs/errors.go b/errs/errors.go deleted file mode 100644 index 4c363e280044b..0000000000000 --- a/errs/errors.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package errs - -import ( - "fmt" - - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -/* -Errors implements an error handler providing standardised errors for a package. -Fields: - - Errors: - a map from error codes to description - - Package: - name of the package/component - - Level: - a function mapping error code to logger.LogLevel (severity) - if not given, errors default to logger.InfoLevel -*/ -type Errors struct { - Errors map[int]string - Package string - Level func(code int) logger.LogLevel -} - -/* -Error implements the standard go error interface. - - errors.New(code, format, params ...interface{}) - -Prints as: - - [package] description: details - -where details is fmt.Sprintf(self.format, self.params...) -*/ -type Error struct { - Code int - Name string - Package string - level logger.LogLevel - message string - format string - params []interface{} -} - -func (self *Errors) New(code int, format string, params ...interface{}) *Error { - name, ok := self.Errors[code] - if !ok { - panic("invalid error code") - } - level := logger.InfoLevel - if self.Level != nil { - level = self.Level(code) - } - return &Error{ - Code: code, - Name: name, - Package: self.Package, - level: level, - format: format, - params: params, - } -} - -func (self Error) Error() (message string) { - if len(message) == 0 { - self.message = fmt.Sprintf("[%s] ERROR: %s", self.Package, self.Name) - if self.format != "" { - self.message += ": " + fmt.Sprintf(self.format, self.params...) - } - } - return self.message -} - -func (self Error) Log(v glog.Verbose) { - if v { - v.Infoln(self) - } -} - -/* -err.Fatal() is true if err's severity level is 0 or 1 (logger.ErrorLevel or logger.Silence) -*/ -func (self *Error) Fatal() (fatal bool) { - if self.level < logger.WarnLevel { - fatal = true - } - return -} diff --git a/errs/errors_test.go b/errs/errors_test.go deleted file mode 100644 index 5d64fc47d4222..0000000000000 --- a/errs/errors_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package errs - -import ( - "fmt" - "testing" - - "github.com/expanse-org/go-expanse/logger" -) - -func testErrors() *Errors { - return &Errors{ - Package: "TEST", - Errors: map[int]string{ - 0: "zero", - 1: "one", - }, - Level: func(i int) (l logger.LogLevel) { - if i == 0 { - l = logger.ErrorLevel - } else { - l = logger.WarnLevel - } - return - }, - } -} - -func TestErrorMessage(t *testing.T) { - err := testErrors().New(0, "zero detail %v", "available") - message := fmt.Sprintf("%v", err) - exp := "[TEST] ERROR: zero: zero detail available" - if message != exp { - t.Errorf("error message incorrect. expected %v, got %v", exp, message) - } -} - -func TestErrorSeverity(t *testing.T) { - err0 := testErrors().New(0, "zero detail") - if !err0.Fatal() { - t.Errorf("error should be fatal") - } - err1 := testErrors().New(1, "one detail") - if err1.Fatal() { - t.Errorf("error should not be fatal") - } -} diff --git a/ethdb/README.md b/ethdb/README.md deleted file mode 100644 index 3e293dc662aa7..0000000000000 --- a/ethdb/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# ethdb - -The ethdb package contains the expanse database interfaces - -# Installation - -`go get github.com/expanse-org/ethdb-go` - -# Usage - -Todo :-) diff --git a/exp/api.go b/exp/api.go deleted file mode 100644 index ef323c8ebeb6c..0000000000000 --- a/exp/api.go +++ /dev/null @@ -1,1968 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package exp - -import ( - "bytes" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "os" - "runtime" - "strings" - "sync" - "time" - - "github.com/expanse-org/ethash" - "github.com/expanse-org/go-expanse/accounts" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/common/compiler" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/miner" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rlp" - "github.com/expanse-org/go-expanse/rpc" - "github.com/syndtr/goleveldb/leveldb" - "golang.org/x/net/context" -) - -const defaultGas = uint64(90000) - -// blockByNumber is a commonly used helper function which retrieves and returns -// the block for the given block number, capable of handling two special blocks: -// rpc.LatestBlockNumber and rpc.PendingBlockNumber. It returns nil when no block -// could be found. -func blockByNumber(m *miner.Miner, bc *core.BlockChain, blockNr rpc.BlockNumber) *types.Block { - // Pending block is only known by the miner - if blockNr == rpc.PendingBlockNumber { - block, _ := m.Pending() - return block - } - // Otherwise resolve and return the block - if blockNr == rpc.LatestBlockNumber { - return bc.CurrentBlock() - } - return bc.GetBlockByNumber(uint64(blockNr)) -} - -// stateAndBlockByNumber is a commonly used helper function which retrieves and -// returns the state and containing block for the given block number, capable of -// handling two special states: rpc.LatestBlockNumber and rpc.PendingBlockNumber. -// It returns nil when no block or state could be found. -func stateAndBlockByNumber(m *miner.Miner, bc *core.BlockChain, blockNr rpc.BlockNumber, chainDb ethdb.Database) (*state.StateDB, *types.Block, error) { - // Pending state is only known by the miner - if blockNr == rpc.PendingBlockNumber { - block, state := m.Pending() - return state, block, nil - } - // Otherwise resolve the block number and return its state - block := blockByNumber(m, bc, blockNr) - if block == nil { - return nil, nil, nil - } - stateDb, err := state.New(block.Root(), chainDb) - return stateDb, block, err -} - -// PublicEthereumAPI provides an API to access Expanse related information. -// It offers only methods that operate on public data that is freely available to anyone. -type PublicEthereumAPI struct { - e *Expanse - gpo *GasPriceOracle -} - -// NewPublicEthereumAPI creates a new Expanse protocol API. -func NewPublicEthereumAPI(e *Expanse) *PublicEthereumAPI { - return &PublicEthereumAPI{ - e: e, - gpo: e.gpo, - } -} - -// GasPrice returns a suggestion for a gas price. -func (s *PublicEthereumAPI) GasPrice() *big.Int { - return s.gpo.SuggestPrice() -} - -// GetCompilers returns the collection of available smart contract compilers -func (s *PublicEthereumAPI) GetCompilers() ([]string, error) { - solc, err := s.e.Solc() - if err == nil && solc != nil { - return []string{"Solidity"}, nil - } - - return []string{}, nil -} - -// CompileSolidity compiles the given solidity source -func (s *PublicEthereumAPI) CompileSolidity(source string) (map[string]*compiler.Contract, error) { - solc, err := s.e.Solc() - if err != nil { - return nil, err - } - - if solc == nil { - return nil, errors.New("solc (solidity compiler) not found") - } - - return solc.Compile(source) -} - -// Etherbase is the address that mining rewards will be send to -func (s *PublicEthereumAPI) Etherbase() (common.Address, error) { - return s.e.Etherbase() -} - -// Coinbase is the address that mining rewards will be send to (alias for Etherbase) -func (s *PublicEthereumAPI) Coinbase() (common.Address, error) { - return s.Etherbase() -} - -// ProtocolVersion returns the current Expanse protocol version this node supports -func (s *PublicEthereumAPI) ProtocolVersion() *rpc.HexNumber { - return rpc.NewHexNumber(s.e.EthVersion()) -} - -// Hashrate returns the POW hashrate -func (s *PublicEthereumAPI) Hashrate() *rpc.HexNumber { - return rpc.NewHexNumber(s.e.Miner().HashRate()) -} - -// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not -// yet received the latest block headers from its pears. In case it is synchronizing: -// - startingBlock: block number this node started to synchronise from -// - currentBlock: block number this node is currently importing -// - highestBlock: block number of the highest block header this node has received from peers -// - pulledStates: number of state entries processed until now -// - knownStates: number of known state entries that still need to be pulled -func (s *PublicEthereumAPI) Syncing() (interface{}, error) { - origin, current, height, pulled, known := s.e.Downloader().Progress() - - // Return not syncing if the synchronisation already completed - if current >= height { - return false, nil - } - // Otherwise gather the block sync stats - return map[string]interface{}{ - "startingBlock": rpc.NewHexNumber(origin), - "currentBlock": rpc.NewHexNumber(current), - "highestBlock": rpc.NewHexNumber(height), - "pulledStates": rpc.NewHexNumber(pulled), - "knownStates": rpc.NewHexNumber(known), - }, nil -} - -// PublicMinerAPI provides an API to control the miner. -// It offers only methods that operate on data that pose no security risk when it is publicly accessible. -type PublicMinerAPI struct { - e *Expanse - agent *miner.RemoteAgent -} - -// NewPublicMinerAPI create a new PublicMinerAPI instance. -func NewPublicMinerAPI(e *Expanse) *PublicMinerAPI { - agent := miner.NewRemoteAgent() - e.Miner().Register(agent) - - return &PublicMinerAPI{e, agent} -} - -// Mining returns an indication if this node is currently mining. -func (s *PublicMinerAPI) Mining() bool { - return s.e.IsMining() -} - -// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was -// accepted. Note, this is not an indication if the provided work was valid! -func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common.Hash) bool { - return s.agent.SubmitWork(nonce.Uint64(), digest, solution) -} - -// GetWork returns a work package for external miner. The work package consists of 3 strings -// result[0], 32 bytes hex encoded current block header pow-hash -// result[1], 32 bytes hex encoded seed hash used for DAG -// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -func (s *PublicMinerAPI) GetWork() (work [3]string, err error) { - if !s.e.IsMining() { - if err := s.e.StartMining(0, ""); err != nil { - return work, err - } - } - if work, err = s.agent.GetWork(); err == nil { - return - } - glog.V(logger.Debug).Infof("%v", err) - return work, fmt.Errorf("mining not ready") -} - -// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined -// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which -// must be unique between nodes. -func (s *PublicMinerAPI) SubmitHashrate(hashrate rpc.HexNumber, id common.Hash) bool { - s.agent.SubmitHashrate(id, hashrate.Uint64()) - return true -} - -// PrivateMinerAPI provides private RPC methods to control the miner. -// These methods can be abused by external users and must be considered insecure for use by untrusted users. -type PrivateMinerAPI struct { - e *Expanse -} - -// NewPrivateMinerAPI create a new RPC service which controls the miner of this node. -func NewPrivateMinerAPI(e *Expanse) *PrivateMinerAPI { - return &PrivateMinerAPI{e: e} -} - -// Start the miner with the given number of threads. If threads is nil the number of -// workers started is equal to the number of logical CPU's that are usable by this process. -func (s *PrivateMinerAPI) Start(threads *rpc.HexNumber) (bool, error) { - s.e.StartAutoDAG() - - if threads == nil { - threads = rpc.NewHexNumber(runtime.NumCPU()) - } - - err := s.e.StartMining(threads.Int(), "") - if err == nil { - return true, nil - } - return false, err -} - -// Stop the miner -func (s *PrivateMinerAPI) Stop() bool { - s.e.StopMining() - return true -} - -// SetExtra sets the extra data string that is included when this miner mines a block. -func (s *PrivateMinerAPI) SetExtra(extra string) (bool, error) { - if err := s.e.Miner().SetExtra([]byte(extra)); err != nil { - return false, err - } - return true, nil -} - -// SetGasPrice sets the minimum accepted gas price for the miner. -func (s *PrivateMinerAPI) SetGasPrice(gasPrice rpc.HexNumber) bool { - s.e.Miner().SetGasPrice(gasPrice.BigInt()) - return true -} - -// SetEtherbase sets the etherbase of the miner -func (s *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool { - s.e.SetEtherbase(etherbase) - return true -} - -// StartAutoDAG starts auto DAG generation. This will prevent the DAG generating on epoch change -// which will cause the node to stop mining during the generation process. -func (s *PrivateMinerAPI) StartAutoDAG() bool { - s.e.StartAutoDAG() - return true -} - -// StopAutoDAG stops auto DAG generation -func (s *PrivateMinerAPI) StopAutoDAG() bool { - s.e.StopAutoDAG() - return true -} - -// MakeDAG creates the new DAG for the given block number -func (s *PrivateMinerAPI) MakeDAG(blockNr rpc.BlockNumber) (bool, error) { - if err := ethash.MakeDAG(uint64(blockNr.Int64()), ""); err != nil { - return false, err - } - return true, nil -} - -// PublicTxPoolAPI offers and API for the transaction pool. It only operates on data that is non confidential. -type PublicTxPoolAPI struct { - e *Expanse -} - -// NewPublicTxPoolAPI creates a new tx pool service that gives information about the transaction pool. -func NewPublicTxPoolAPI(e *Expanse) *PublicTxPoolAPI { - return &PublicTxPoolAPI{e} -} - -// Content returns the transactions contained within the transaction pool. -func (s *PublicTxPoolAPI) Content() map[string]map[string]map[string][]*RPCTransaction { - content := map[string]map[string]map[string][]*RPCTransaction{ - "pending": make(map[string]map[string][]*RPCTransaction), - "queued": make(map[string]map[string][]*RPCTransaction), - } - pending, queue := s.e.TxPool().Content() - - // Flatten the pending transactions - for account, batches := range pending { - dump := make(map[string][]*RPCTransaction) - for _, tx := range batches { - nonce := fmt.Sprintf("%d", tx.Nonce()) - dump[nonce] = []*RPCTransaction{newRPCPendingTransaction(tx)} - } - content["pending"][account.Hex()] = dump - } - // Flatten the queued transactions - for account, batches := range queue { - dump := make(map[string][]*RPCTransaction) - for _, tx := range batches { - nonce := fmt.Sprintf("%d", tx.Nonce()) - dump[nonce] = []*RPCTransaction{newRPCPendingTransaction(tx)} - } - content["queued"][account.Hex()] = dump - } - return content -} - -// Status returns the number of pending and queued transaction in the pool. -func (s *PublicTxPoolAPI) Status() map[string]*rpc.HexNumber { - pending, queue := s.e.TxPool().Stats() - return map[string]*rpc.HexNumber{ - "pending": rpc.NewHexNumber(pending), - "queued": rpc.NewHexNumber(queue), - } -} - -// Inspect retrieves the content of the transaction pool and flattens it into an -// easily inspectable list. -func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string][]string { - content := map[string]map[string]map[string][]string{ - "pending": make(map[string]map[string][]string), - "queued": make(map[string]map[string][]string), - } - pending, queue := s.e.TxPool().Content() - - // Define a formatter to flatten a transaction into a string - var format = func(tx *types.Transaction) string { - if to := tx.To(); to != nil { - return fmt.Sprintf("%s: %v wei + %v × %v gas", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice()) - } - return fmt.Sprintf("contract creation: %v wei + %v × %v gas", tx.Value(), tx.Gas(), tx.GasPrice()) - } - // Flatten the pending transactions - for account, batches := range pending { - dump := make(map[string][]string) - for _, tx := range batches { - nonce := fmt.Sprintf("%d", tx.Nonce()) - dump[nonce] = []string{format(tx)} - } - content["pending"][account.Hex()] = dump - } - // Flatten the queued transactions - for account, batches := range queue { - dump := make(map[string][]string) - for _, tx := range batches { - nonce := fmt.Sprintf("%d", tx.Nonce()) - dump[nonce] = []string{format(tx)} - } - content["queued"][account.Hex()] = dump - } - return content -} - -// PublicAccountAPI provides an API to access accounts managed by this node. -// It offers only methods that can retrieve accounts. -type PublicAccountAPI struct { - am *accounts.Manager -} - -// NewPublicAccountAPI creates a new PublicAccountAPI. -func NewPublicAccountAPI(am *accounts.Manager) *PublicAccountAPI { - return &PublicAccountAPI{am: am} -} - -// Accounts returns the collection of accounts this node manages -func (s *PublicAccountAPI) Accounts() []accounts.Account { - return s.am.Accounts() -} - -// PrivateAccountAPI provides an API to access accounts managed by this node. -// It offers methods to create, (un)lock en list accounts. Some methods accept -// passwords and are therefore considered private by default. -type PrivateAccountAPI struct { - am *accounts.Manager - txPool *core.TxPool - txMu *sync.Mutex - gpo *GasPriceOracle -} - -// NewPrivateAccountAPI create a new PrivateAccountAPI. -func NewPrivateAccountAPI(e *Expanse) *PrivateAccountAPI { - return &PrivateAccountAPI{ - am: e.accountManager, - txPool: e.txPool, - txMu: &e.txMu, - gpo: e.gpo, - } -} - -// ListAccounts will return a list of addresses for accounts this node manages. -func (s *PrivateAccountAPI) ListAccounts() []common.Address { - accounts := s.am.Accounts() - addresses := make([]common.Address, len(accounts)) - for i, acc := range accounts { - addresses[i] = acc.Address - } - return addresses -} - -// NewAccount will create a new account and returns the address for the new account. -func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error) { - acc, err := s.am.NewAccount(password) - if err == nil { - return acc.Address, nil - } - return common.Address{}, err -} - -// ImportRawKey stores the given hex encoded ECDSA key into the key directory, -// encrypting it with the passphrase. -func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) { - hexkey, err := hex.DecodeString(privkey) - if err != nil { - return common.Address{}, err - } - - acc, err := s.am.ImportECDSA(crypto.ToECDSA(hexkey), password) - return acc.Address, err -} - -// UnlockAccount will unlock the account associated with the given address with -// the given password for duration seconds. If duration is nil it will use a -// default of 300 seconds. It returns an indication if the account was unlocked. -func (s *PrivateAccountAPI) UnlockAccount(addr common.Address, password string, duration *rpc.HexNumber) (bool, error) { - if duration == nil { - duration = rpc.NewHexNumber(300) - } - a := accounts.Account{Address: addr} - d := time.Duration(duration.Int64()) * time.Second - if err := s.am.TimedUnlock(a, password, d); err != nil { - return false, err - } - return true, nil -} - -// LockAccount will lock the account associated with the given address when it's unlocked. -func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool { - return s.am.Lock(addr) == nil -} - -// SignAndSendTransaction will create a transaction from the given arguments and -// tries to sign it with the key associated with args.To. If the given passwd isn't -// able to decrypt the key it fails. -func (s *PrivateAccountAPI) SignAndSendTransaction(args SendTxArgs, passwd string) (common.Hash, error) { - args = prepareSendTxArgs(args, s.gpo) - - s.txMu.Lock() - defer s.txMu.Unlock() - - if args.Nonce == nil { - args.Nonce = rpc.NewHexNumber(s.txPool.State().GetNonce(args.From)) - } - - var tx *types.Transaction - if args.To == nil { - tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data)) - } else { - tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data)) - } - - signature, err := s.am.SignWithPassphrase(args.From, passwd, tx.SigHash().Bytes()) - if err != nil { - return common.Hash{}, err - } - - return submitTransaction(s.txPool, tx, signature) -} - -// PublicBlockChainAPI provides an API to access the Ethereum blockchain. -// It offers only methods that operate on public data that is freely available to anyone. -type PublicBlockChainAPI struct { - config *core.ChainConfig - bc *core.BlockChain - chainDb ethdb.Database - eventMux *event.TypeMux - muNewBlockSubscriptions sync.Mutex // protects newBlocksSubscriptions - newBlockSubscriptions map[string]func(core.ChainEvent) error // callbacks for new block subscriptions - am *accounts.Manager - miner *miner.Miner - gpo *GasPriceOracle -} - -// NewPublicBlockChainAPI creates a new Etheruem blockchain API. -func NewPublicBlockChainAPI(config *core.ChainConfig, bc *core.BlockChain, m *miner.Miner, chainDb ethdb.Database, gpo *GasPriceOracle, eventMux *event.TypeMux, am *accounts.Manager) *PublicBlockChainAPI { - api := &PublicBlockChainAPI{ - config: config, - bc: bc, - miner: m, - chainDb: chainDb, - eventMux: eventMux, - am: am, - newBlockSubscriptions: make(map[string]func(core.ChainEvent) error), - gpo: gpo, - } - - go api.subscriptionLoop() - - return api -} - -// subscriptionLoop reads events from the global event mux and creates notifications for the matched subscriptions. -func (s *PublicBlockChainAPI) subscriptionLoop() { - sub := s.eventMux.Subscribe(core.ChainEvent{}) - for event := range sub.Chan() { - if chainEvent, ok := event.Data.(core.ChainEvent); ok { - s.muNewBlockSubscriptions.Lock() - for id, notifyOf := range s.newBlockSubscriptions { - if notifyOf(chainEvent) == rpc.ErrNotificationNotFound { - delete(s.newBlockSubscriptions, id) - } - } - s.muNewBlockSubscriptions.Unlock() - } - } -} - -// BlockNumber returns the block number of the chain head. -func (s *PublicBlockChainAPI) BlockNumber() *big.Int { - return s.bc.CurrentHeader().Number -} - -// GetBalance returns the amount of wei for the given address in the state of the -// given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta -// block numbers are also allowed. -func (s *PublicBlockChainAPI) GetBalance(address common.Address, blockNr rpc.BlockNumber) (*big.Int, error) { - state, _, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb) - if state == nil || err != nil { - return nil, err - } - return state.GetBalance(address), nil -} - -// GetBlockByNumber returns the requested block. When blockNr is -1 the chain head is returned. When fullTx is true all -// transactions in the block are returned in full detail, otherwise only the transaction hash is returned. -func (s *PublicBlockChainAPI) GetBlockByNumber(blockNr rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { - if block := blockByNumber(s.miner, s.bc, blockNr); block != nil { - response, err := s.rpcOutputBlock(block, true, fullTx) - if err == nil && blockNr == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "logsBloom", "miner"} { - response[field] = nil - } - } - return response, err - } - return nil, nil -} - -// GetBlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full -// detail, otherwise only the transaction hash is returned. -func (s *PublicBlockChainAPI) GetBlockByHash(blockHash common.Hash, fullTx bool) (map[string]interface{}, error) { - if block := s.bc.GetBlock(blockHash); block != nil { - return s.rpcOutputBlock(block, true, fullTx) - } - return nil, nil -} - -// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index. When fullTx is true -// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned. -func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(blockNr rpc.BlockNumber, index rpc.HexNumber) (map[string]interface{}, error) { - if block := blockByNumber(s.miner, s.bc, blockNr); block != nil { - uncles := block.Uncles() - if index.Int() < 0 || index.Int() >= len(uncles) { - glog.V(logger.Debug).Infof("uncle block on index %d not found for block #%d", index.Int(), blockNr) - return nil, nil - } - block = types.NewBlockWithHeader(uncles[index.Int()]) - return s.rpcOutputBlock(block, false, false) - } - return nil, nil -} - -// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index. When fullTx is true -// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned. -func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(blockHash common.Hash, index rpc.HexNumber) (map[string]interface{}, error) { - if block := s.bc.GetBlock(blockHash); block != nil { - uncles := block.Uncles() - if index.Int() < 0 || index.Int() >= len(uncles) { - glog.V(logger.Debug).Infof("uncle block on index %d not found for block %s", index.Int(), blockHash.Hex()) - return nil, nil - } - block = types.NewBlockWithHeader(uncles[index.Int()]) - return s.rpcOutputBlock(block, false, false) - } - return nil, nil -} - -// GetUncleCountByBlockNumber returns number of uncles in the block for the given block number -func (s *PublicBlockChainAPI) GetUncleCountByBlockNumber(blockNr rpc.BlockNumber) *rpc.HexNumber { - if block := blockByNumber(s.miner, s.bc, blockNr); block != nil { - return rpc.NewHexNumber(len(block.Uncles())) - } - return nil -} - -// GetUncleCountByBlockHash returns number of uncles in the block for the given block hash -func (s *PublicBlockChainAPI) GetUncleCountByBlockHash(blockHash common.Hash) *rpc.HexNumber { - if block := s.bc.GetBlock(blockHash); block != nil { - return rpc.NewHexNumber(len(block.Uncles())) - } - return nil -} - -// NewBlocksArgs allows the user to specify if the returned block should include transactions and in which format. -type NewBlocksArgs struct { - IncludeTransactions bool `json:"includeTransactions"` - TransactionDetails bool `json:"transactionDetails"` -} - -// NewBlocks triggers a new block event each time a block is appended to the chain. It accepts an argument which allows -// the caller to specify whether the output should contain transactions and in what format. -func (s *PublicBlockChainAPI) NewBlocks(ctx context.Context, args NewBlocksArgs) (rpc.Subscription, error) { - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return nil, rpc.ErrNotificationsUnsupported - } - - // create a subscription that will remove itself when unsubscribed/cancelled - subscription, err := notifier.NewSubscription(func(subId string) { - s.muNewBlockSubscriptions.Lock() - delete(s.newBlockSubscriptions, subId) - s.muNewBlockSubscriptions.Unlock() - }) - - if err != nil { - return nil, err - } - - // add a callback that is called on chain events which will format the block and notify the client - s.muNewBlockSubscriptions.Lock() - s.newBlockSubscriptions[subscription.ID()] = func(e core.ChainEvent) error { - notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails) - if err == nil { - return subscription.Notify(notification) - } - glog.V(logger.Warn).Info("unable to format block %v\n", err) - return nil - } - s.muNewBlockSubscriptions.Unlock() - return subscription, nil -} - -// GetCode returns the code stored at the given address in the state for the given block number. -func (s *PublicBlockChainAPI) GetCode(address common.Address, blockNr rpc.BlockNumber) (string, error) { - state, _, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb) - if state == nil || err != nil { - return "", err - } - res := state.GetCode(address) - if len(res) == 0 { // backwards compatibility - return "0x", nil - } - return common.ToHex(res), nil -} - -// GetStorageAt returns the storage from the state at the given address, key and -// block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block -// numbers are also allowed. -func (s *PublicBlockChainAPI) GetStorageAt(address common.Address, key string, blockNr rpc.BlockNumber) (string, error) { - state, _, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb) - if state == nil || err != nil { - return "0x", err - } - return state.GetState(address, common.HexToHash(key)).Hex(), nil -} - -// callmsg is the message type used for call transactions. -type callmsg struct { - from *state.StateObject - to *common.Address - gas, gasPrice *big.Int - value *big.Int - data []byte -} - -// accessor boilerplate to implement core.Message -func (m callmsg) From() (common.Address, error) { return m.from.Address(), nil } -func (m callmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil } -func (m callmsg) Nonce() uint64 { return m.from.Nonce() } -func (m callmsg) To() *common.Address { return m.to } -func (m callmsg) GasPrice() *big.Int { return m.gasPrice } -func (m callmsg) Gas() *big.Int { return m.gas } -func (m callmsg) Value() *big.Int { return m.value } -func (m callmsg) Data() []byte { return m.data } - -// CallArgs represents the arguments for a call. -type CallArgs struct { - From common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *rpc.HexNumber `json:"gas"` - GasPrice *rpc.HexNumber `json:"gasPrice"` - Value rpc.HexNumber `json:"value"` - Data string `json:"data"` -} - -func (s *PublicBlockChainAPI) doCall(args CallArgs, blockNr rpc.BlockNumber) (string, *big.Int, error) { - // Fetch the state associated with the block number - stateDb, block, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb) - if stateDb == nil || err != nil { - return "0x", nil, err - } - stateDb = stateDb.Copy() - - // Retrieve the account state object to interact with - var from *state.StateObject - if args.From == (common.Address{}) { - accounts := s.am.Accounts() - if len(accounts) == 0 { - from = stateDb.GetOrNewStateObject(common.Address{}) - } else { - from = stateDb.GetOrNewStateObject(accounts[0].Address) - } - } else { - from = stateDb.GetOrNewStateObject(args.From) - } - from.SetBalance(common.MaxBig) - - // Assemble the CALL invocation - msg := callmsg{ - from: from, - to: args.To, - gas: args.Gas.BigInt(), - gasPrice: args.GasPrice.BigInt(), - value: args.Value.BigInt(), - data: common.FromHex(args.Data), - } - if msg.gas == nil { - msg.gas = big.NewInt(50000000) - } - if msg.gasPrice == nil { - msg.gasPrice = s.gpo.SuggestPrice() - } - - // Execute the call and return - vmenv := core.NewEnv(stateDb, s.config, s.bc, msg, block.Header(), s.config.VmConfig) - gp := new(core.GasPool).AddGas(common.MaxBig) - - res, requiredGas, _, err := core.NewStateTransition(vmenv, msg, gp).TransitionDb() - if len(res) == 0 { // backwards compatibility - return "0x", requiredGas, err - } - return common.ToHex(res), requiredGas, err -} - -// Call executes the given transaction on the state for the given block number. -// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values. -func (s *PublicBlockChainAPI) Call(args CallArgs, blockNr rpc.BlockNumber) (string, error) { - result, _, err := s.doCall(args, blockNr) - return result, err -} - -// EstimateGas returns an estimate of the amount of gas needed to execute the given transaction. -func (s *PublicBlockChainAPI) EstimateGas(args CallArgs) (*rpc.HexNumber, error) { - _, gas, err := s.doCall(args, rpc.PendingBlockNumber) - return rpc.NewHexNumber(gas), err -} - -// rpcOutputBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are -// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain -// transaction hashes. -func (s *PublicBlockChainAPI) rpcOutputBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - fields := map[string]interface{}{ - "number": rpc.NewHexNumber(b.Number()), - "hash": b.Hash(), - "parentHash": b.ParentHash(), - "nonce": b.Header().Nonce, - "sha3Uncles": b.UncleHash(), - "logsBloom": b.Bloom(), - "stateRoot": b.Root(), - "miner": b.Coinbase(), - "difficulty": rpc.NewHexNumber(b.Difficulty()), - "totalDifficulty": rpc.NewHexNumber(s.bc.GetTd(b.Hash())), - "extraData": fmt.Sprintf("0x%x", b.Extra()), - "size": rpc.NewHexNumber(b.Size().Int64()), - "gasLimit": rpc.NewHexNumber(b.GasLimit()), - "gasUsed": rpc.NewHexNumber(b.GasUsed()), - "timestamp": rpc.NewHexNumber(b.Time()), - "transactionsRoot": b.TxHash(), - "receiptRoot": b.ReceiptHash(), - } - - if inclTx { - formatTx := func(tx *types.Transaction) (interface{}, error) { - return tx.Hash(), nil - } - - if fullTx { - formatTx = func(tx *types.Transaction) (interface{}, error) { - return newRPCTransaction(b, tx.Hash()) - } - } - - txs := b.Transactions() - transactions := make([]interface{}, len(txs)) - var err error - for i, tx := range b.Transactions() { - if transactions[i], err = formatTx(tx); err != nil { - return nil, err - } - } - fields["transactions"] = transactions - } - - uncles := b.Uncles() - uncleHashes := make([]common.Hash, len(uncles)) - for i, uncle := range uncles { - uncleHashes[i] = uncle.Hash() - } - fields["uncles"] = uncleHashes - - return fields, nil -} - -// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction -type RPCTransaction struct { - BlockHash common.Hash `json:"blockHash"` - BlockNumber *rpc.HexNumber `json:"blockNumber"` - From common.Address `json:"from"` - Gas *rpc.HexNumber `json:"gas"` - GasPrice *rpc.HexNumber `json:"gasPrice"` - Hash common.Hash `json:"hash"` - Input string `json:"input"` - Nonce *rpc.HexNumber `json:"nonce"` - To *common.Address `json:"to"` - TransactionIndex *rpc.HexNumber `json:"transactionIndex"` - Value *rpc.HexNumber `json:"value"` -} - -// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation -func newRPCPendingTransaction(tx *types.Transaction) *RPCTransaction { - from, _ := tx.FromFrontier() - - return &RPCTransaction{ - From: from, - Gas: rpc.NewHexNumber(tx.Gas()), - GasPrice: rpc.NewHexNumber(tx.GasPrice()), - Hash: tx.Hash(), - Input: fmt.Sprintf("0x%x", tx.Data()), - Nonce: rpc.NewHexNumber(tx.Nonce()), - To: tx.To(), - Value: rpc.NewHexNumber(tx.Value()), - } -} - -// newRPCTransaction returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockIndex(b *types.Block, txIndex int) (*RPCTransaction, error) { - if txIndex >= 0 && txIndex < len(b.Transactions()) { - tx := b.Transactions()[txIndex] - from, err := tx.FromFrontier() - if err != nil { - return nil, err - } - - return &RPCTransaction{ - BlockHash: b.Hash(), - BlockNumber: rpc.NewHexNumber(b.Number()), - From: from, - Gas: rpc.NewHexNumber(tx.Gas()), - GasPrice: rpc.NewHexNumber(tx.GasPrice()), - Hash: tx.Hash(), - Input: fmt.Sprintf("0x%x", tx.Data()), - Nonce: rpc.NewHexNumber(tx.Nonce()), - To: tx.To(), - TransactionIndex: rpc.NewHexNumber(txIndex), - Value: rpc.NewHexNumber(tx.Value()), - }, nil - } - - return nil, nil -} - -// newRPCTransaction returns a transaction that will serialize to the RPC representation. -func newRPCTransaction(b *types.Block, txHash common.Hash) (*RPCTransaction, error) { - for idx, tx := range b.Transactions() { - if tx.Hash() == txHash { - return newRPCTransactionFromBlockIndex(b, idx) - } - } - - return nil, nil -} - -// PublicTransactionPoolAPI exposes methods for the RPC interface -type PublicTransactionPoolAPI struct { - eventMux *event.TypeMux - chainDb ethdb.Database - gpo *GasPriceOracle - bc *core.BlockChain - miner *miner.Miner - am *accounts.Manager - txPool *core.TxPool - txMu *sync.Mutex - muPendingTxSubs sync.Mutex - pendingTxSubs map[string]rpc.Subscription -} - -// NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool. -func NewPublicTransactionPoolAPI(e *Expanse) *PublicTransactionPoolAPI { - api := &PublicTransactionPoolAPI{ - eventMux: e.eventMux, - gpo: e.gpo, - chainDb: e.chainDb, - bc: e.blockchain, - am: e.accountManager, - txPool: e.txPool, - txMu: &e.txMu, - miner: e.miner, - pendingTxSubs: make(map[string]rpc.Subscription), - } - go api.subscriptionLoop() - - return api -} - -// subscriptionLoop listens for events on the global event mux and creates notifications for subscriptions. -func (s *PublicTransactionPoolAPI) subscriptionLoop() { - sub := s.eventMux.Subscribe(core.TxPreEvent{}) - for event := range sub.Chan() { - tx := event.Data.(core.TxPreEvent) - if from, err := tx.Tx.FromFrontier(); err == nil { - if s.am.HasAddress(from) { - s.muPendingTxSubs.Lock() - for id, sub := range s.pendingTxSubs { - if sub.Notify(tx.Tx.Hash()) == rpc.ErrNotificationNotFound { - delete(s.pendingTxSubs, id) - } - } - s.muPendingTxSubs.Unlock() - } - } - } -} - -func getTransaction(chainDb ethdb.Database, txPool *core.TxPool, txHash common.Hash) (*types.Transaction, bool, error) { - txData, err := chainDb.Get(txHash.Bytes()) - isPending := false - tx := new(types.Transaction) - - if err == nil && len(txData) > 0 { - if err := rlp.DecodeBytes(txData, tx); err != nil { - return nil, isPending, err - } - } else { - // pending transaction? - tx = txPool.Get(txHash) - isPending = true - } - - return tx, isPending, nil -} - -// GetBlockTransactionCountByNumber returns the number of transactions in the block with the given block number. -func (s *PublicTransactionPoolAPI) GetBlockTransactionCountByNumber(blockNr rpc.BlockNumber) *rpc.HexNumber { - if block := blockByNumber(s.miner, s.bc, blockNr); block != nil { - return rpc.NewHexNumber(len(block.Transactions())) - } - return nil -} - -// GetBlockTransactionCountByHash returns the number of transactions in the block with the given hash. -func (s *PublicTransactionPoolAPI) GetBlockTransactionCountByHash(blockHash common.Hash) *rpc.HexNumber { - if block := s.bc.GetBlock(blockHash); block != nil { - return rpc.NewHexNumber(len(block.Transactions())) - } - return nil -} - -// GetTransactionByBlockNumberAndIndex returns the transaction for the given block number and index. -func (s *PublicTransactionPoolAPI) GetTransactionByBlockNumberAndIndex(blockNr rpc.BlockNumber, index rpc.HexNumber) (*RPCTransaction, error) { - if block := blockByNumber(s.miner, s.bc, blockNr); block != nil { - return newRPCTransactionFromBlockIndex(block, index.Int()) - } - return nil, nil -} - -// GetTransactionByBlockHashAndIndex returns the transaction for the given block hash and index. -func (s *PublicTransactionPoolAPI) GetTransactionByBlockHashAndIndex(blockHash common.Hash, index rpc.HexNumber) (*RPCTransaction, error) { - if block := s.bc.GetBlock(blockHash); block != nil { - return newRPCTransactionFromBlockIndex(block, index.Int()) - } - return nil, nil -} - -// GetTransactionCount returns the number of transactions the given address has sent for the given block number -func (s *PublicTransactionPoolAPI) GetTransactionCount(address common.Address, blockNr rpc.BlockNumber) (*rpc.HexNumber, error) { - state, _, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb) - if state == nil || err != nil { - return nil, err - } - return rpc.NewHexNumber(state.GetNonce(address)), nil -} - -// getTransactionBlockData fetches the meta data for the given transaction from the chain database. This is useful to -// retrieve block information for a hash. It returns the block hash, block index and transaction index. -func getTransactionBlockData(chainDb ethdb.Database, txHash common.Hash) (common.Hash, uint64, uint64, error) { - var txBlock struct { - BlockHash common.Hash - BlockIndex uint64 - Index uint64 - } - - blockData, err := chainDb.Get(append(txHash.Bytes(), 0x0001)) - if err != nil { - return common.Hash{}, uint64(0), uint64(0), err - } - - reader := bytes.NewReader(blockData) - if err = rlp.Decode(reader, &txBlock); err != nil { - return common.Hash{}, uint64(0), uint64(0), err - } - - return txBlock.BlockHash, txBlock.BlockIndex, txBlock.Index, nil -} - -// GetTransactionByHash returns the transaction for the given hash -func (s *PublicTransactionPoolAPI) GetTransactionByHash(txHash common.Hash) (*RPCTransaction, error) { - var tx *types.Transaction - var isPending bool - var err error - - if tx, isPending, err = getTransaction(s.chainDb, s.txPool, txHash); err != nil { - glog.V(logger.Debug).Infof("%v\n", err) - return nil, nil - } else if tx == nil { - return nil, nil - } - - if isPending { - return newRPCPendingTransaction(tx), nil - } - - blockHash, _, _, err := getTransactionBlockData(s.chainDb, txHash) - if err != nil { - glog.V(logger.Debug).Infof("%v\n", err) - return nil, nil - } - - if block := s.bc.GetBlock(blockHash); block != nil { - return newRPCTransaction(block, txHash) - } - - return nil, nil -} - -// GetTransactionReceipt returns the transaction receipt for the given transaction hash. -func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) { - receipt := core.GetReceipt(s.chainDb, txHash) - if receipt == nil { - glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex()) - return nil, nil - } - - tx, _, err := getTransaction(s.chainDb, s.txPool, txHash) - if err != nil { - glog.V(logger.Debug).Infof("%v\n", err) - return nil, nil - } - - txBlock, blockIndex, index, err := getTransactionBlockData(s.chainDb, txHash) - if err != nil { - glog.V(logger.Debug).Infof("%v\n", err) - return nil, nil - } - - from, err := tx.FromFrontier() - if err != nil { - glog.V(logger.Debug).Infof("%v\n", err) - return nil, nil - } - - fields := map[string]interface{}{ - "root": common.Bytes2Hex(receipt.PostState), - "blockHash": txBlock, - "blockNumber": rpc.NewHexNumber(blockIndex), - "transactionHash": txHash, - "transactionIndex": rpc.NewHexNumber(index), - "from": from, - "to": tx.To(), - "gasUsed": rpc.NewHexNumber(receipt.GasUsed), - "cumulativeGasUsed": rpc.NewHexNumber(receipt.CumulativeGasUsed), - "contractAddress": nil, - "logs": receipt.Logs, - } - - if receipt.Logs == nil { - fields["logs"] = []vm.Logs{} - } - - // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation - if bytes.Compare(receipt.ContractAddress.Bytes(), bytes.Repeat([]byte{0}, 20)) != 0 { - fields["contractAddress"] = receipt.ContractAddress - } - - return fields, nil -} - -// sign is a helper function that signs a transaction with the private key of the given address. -func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { - signature, err := s.am.Sign(addr, tx.SigHash().Bytes()) - if err != nil { - return nil, err - } - return tx.WithSignature(signature) -} - -// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool. -type SendTxArgs struct { - From common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *rpc.HexNumber `json:"gas"` - GasPrice *rpc.HexNumber `json:"gasPrice"` - Value *rpc.HexNumber `json:"value"` - Data string `json:"data"` - Nonce *rpc.HexNumber `json:"nonce"` -} - -// prepareSendTxArgs is a helper function that fills in default values for unspecified tx fields. -func prepareSendTxArgs(args SendTxArgs, gpo *GasPriceOracle) SendTxArgs { - if args.Gas == nil { - args.Gas = rpc.NewHexNumber(defaultGas) - } - if args.GasPrice == nil { - args.GasPrice = rpc.NewHexNumber(gpo.SuggestPrice()) - } - if args.Value == nil { - args.Value = rpc.NewHexNumber(0) - } - return args -} - -// submitTransaction is a helper function that submits tx to txPool and creates a log entry. -func submitTransaction(txPool *core.TxPool, tx *types.Transaction, signature []byte) (common.Hash, error) { - signedTx, err := tx.WithSignature(signature) - if err != nil { - return common.Hash{}, err - } - - txPool.SetLocal(signedTx) - if err := txPool.Add(signedTx); err != nil { - return common.Hash{}, err - } - - if signedTx.To() == nil { - from, _ := signedTx.From() - addr := crypto.CreateAddress(from, signedTx.Nonce()) - glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex()) - } else { - glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex()) - } - - return signedTx.Hash(), nil -} - -// SendTransaction creates a transaction for the given argument, sign it and submit it to the -// transaction pool. -func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash, error) { - args = prepareSendTxArgs(args, s.gpo) - - s.txMu.Lock() - defer s.txMu.Unlock() - - if args.Nonce == nil { - args.Nonce = rpc.NewHexNumber(s.txPool.State().GetNonce(args.From)) - } - - var tx *types.Transaction - if args.To == nil { - tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data)) - } else { - tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data)) - } - - signature, err := s.am.Sign(args.From, tx.SigHash().Bytes()) - if err != nil { - return common.Hash{}, err - } - - return submitTransaction(s.txPool, tx, signature) -} - -// SendRawTransaction will add the signed transaction to the transaction pool. -// The sender is responsible for signing the transaction and using the correct nonce. -func (s *PublicTransactionPoolAPI) SendRawTransaction(encodedTx string) (string, error) { - tx := new(types.Transaction) - if err := rlp.DecodeBytes(common.FromHex(encodedTx), tx); err != nil { - return "", err - } - - s.txPool.SetLocal(tx) - if err := s.txPool.Add(tx); err != nil { - return "", err - } - - if tx.To() == nil { - from, err := tx.FromFrontier() - if err != nil { - return "", err - } - addr := crypto.CreateAddress(from, tx.Nonce()) - glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr) - } else { - glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To()) - } - - return tx.Hash().Hex(), nil -} - -// Sign signs the given hash using the key that matches the address. The key must be -// unlocked in order to sign the hash. -func (s *PublicTransactionPoolAPI) Sign(addr common.Address, hash common.Hash) (string, error) { - signature, error := s.am.Sign(addr, hash[:]) - return common.ToHex(signature), error -} - -// SignTransactionArgs represents the arguments to sign a transaction. -type SignTransactionArgs struct { - From common.Address - To *common.Address - Nonce *rpc.HexNumber - Value *rpc.HexNumber - Gas *rpc.HexNumber - GasPrice *rpc.HexNumber - Data string - - BlockNumber int64 -} - -// Tx is a helper object for argument and return values -type Tx struct { - tx *types.Transaction - - To *common.Address `json:"to"` - From common.Address `json:"from"` - Nonce *rpc.HexNumber `json:"nonce"` - Value *rpc.HexNumber `json:"value"` - Data string `json:"data"` - GasLimit *rpc.HexNumber `json:"gas"` - GasPrice *rpc.HexNumber `json:"gasPrice"` - Hash common.Hash `json:"hash"` -} - -// UnmarshalJSON parses JSON data into tx. -func (tx *Tx) UnmarshalJSON(b []byte) (err error) { - req := struct { - To *common.Address `json:"to"` - From common.Address `json:"from"` - Nonce *rpc.HexNumber `json:"nonce"` - Value *rpc.HexNumber `json:"value"` - Data string `json:"data"` - GasLimit *rpc.HexNumber `json:"gas"` - GasPrice *rpc.HexNumber `json:"gasPrice"` - Hash common.Hash `json:"hash"` - }{} - - if err := json.Unmarshal(b, &req); err != nil { - return err - } - - tx.To = req.To - tx.From = req.From - tx.Nonce = req.Nonce - tx.Value = req.Value - tx.Data = req.Data - tx.GasLimit = req.GasLimit - tx.GasPrice = req.GasPrice - tx.Hash = req.Hash - - data := common.Hex2Bytes(tx.Data) - - if tx.Nonce == nil { - return fmt.Errorf("need nonce") - } - if tx.Value == nil { - tx.Value = rpc.NewHexNumber(0) - } - if tx.GasLimit == nil { - tx.GasLimit = rpc.NewHexNumber(0) - } - if tx.GasPrice == nil { - tx.GasPrice = rpc.NewHexNumber(int64(50000000000)) - } - - if req.To == nil { - tx.tx = types.NewContractCreation(tx.Nonce.Uint64(), tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data) - } else { - tx.tx = types.NewTransaction(tx.Nonce.Uint64(), *tx.To, tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data) - } - - return nil -} - -// SignTransactionResult represents a RLP encoded signed transaction. -type SignTransactionResult struct { - Raw string `json:"raw"` - Tx *Tx `json:"tx"` -} - -func newTx(t *types.Transaction) *Tx { - from, _ := t.FromFrontier() - return &Tx{ - tx: t, - To: t.To(), - From: from, - Value: rpc.NewHexNumber(t.Value()), - Nonce: rpc.NewHexNumber(t.Nonce()), - Data: "0x" + common.Bytes2Hex(t.Data()), - GasLimit: rpc.NewHexNumber(t.Gas()), - GasPrice: rpc.NewHexNumber(t.GasPrice()), - Hash: t.Hash(), - } -} - -// SignTransaction will sign the given transaction with the from account. -// The node needs to have the private key of the account corresponding with -// the given from address and it needs to be unlocked. -func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*SignTransactionResult, error) { - if args.Gas == nil { - args.Gas = rpc.NewHexNumber(defaultGas) - } - if args.GasPrice == nil { - args.GasPrice = rpc.NewHexNumber(s.gpo.SuggestPrice()) - } - if args.Value == nil { - args.Value = rpc.NewHexNumber(0) - } - - s.txMu.Lock() - defer s.txMu.Unlock() - - if args.Nonce == nil { - args.Nonce = rpc.NewHexNumber(s.txPool.State().GetNonce(args.From)) - } - - var tx *types.Transaction - if args.To == nil { - tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data)) - } else { - tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data)) - } - - signedTx, err := s.sign(args.From, tx) - if err != nil { - return nil, err - } - - data, err := rlp.EncodeToBytes(signedTx) - if err != nil { - return nil, err - } - - return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(signedTx)}, nil -} - -// PendingTransactions returns the transactions that are in the transaction pool and have a from address that is one of -// the accounts this node manages. -func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction { - pending := s.txPool.Pending() - transactions := make([]*RPCTransaction, 0, len(pending)) - for addr, txs := range pending { - if s.am.HasAddress(addr) { - for _, tx := range txs { - transactions = append(transactions, newRPCPendingTransaction(tx)) - } - } - } - return transactions -} - -// NewPendingTransactions creates a subscription that is triggered each time a transaction enters the transaction pool -// and is send from one of the transactions this nodes manages. -func (s *PublicTransactionPoolAPI) NewPendingTransactions(ctx context.Context) (rpc.Subscription, error) { - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return nil, rpc.ErrNotificationsUnsupported - } - - subscription, err := notifier.NewSubscription(func(id string) { - s.muPendingTxSubs.Lock() - delete(s.pendingTxSubs, id) - s.muPendingTxSubs.Unlock() - }) - - if err != nil { - return nil, err - } - - s.muPendingTxSubs.Lock() - s.pendingTxSubs[subscription.ID()] = subscription - s.muPendingTxSubs.Unlock() - - return subscription, nil -} - -// Resend accepts an existing transaction and a new gas price and limit. It will remove the given transaction from the -// pool and reinsert it with the new gas price and limit. -func (s *PublicTransactionPoolAPI) Resend(tx Tx, gasPrice, gasLimit *rpc.HexNumber) (common.Hash, error) { - pending := s.txPool.Pending() - for addr, txs := range pending { - for _, p := range txs { - if addr == tx.From && p.SigHash() == tx.tx.SigHash() { - if gasPrice == nil { - gasPrice = rpc.NewHexNumber(tx.tx.GasPrice()) - } - if gasLimit == nil { - gasLimit = rpc.NewHexNumber(tx.tx.Gas()) - } - - var newTx *types.Transaction - if tx.tx.To() == nil { - newTx = types.NewContractCreation(tx.tx.Nonce(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data()) - } else { - newTx = types.NewTransaction(tx.tx.Nonce(), *tx.tx.To(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data()) - } - - signedTx, err := s.sign(tx.From, newTx) - if err != nil { - return common.Hash{}, err - } - - s.txPool.Remove(tx.Hash) - if err = s.txPool.Add(signedTx); err != nil { - return common.Hash{}, err - } - - return signedTx.Hash(), nil - } - } - } - - return common.Hash{}, fmt.Errorf("Transaction %#x not found", tx.Hash) -} - -// PrivateAdminAPI is the collection of Etheruem APIs exposed over the private -// admin endpoint. -type PrivateAdminAPI struct { - exp *Expanse -} - -// NewPrivateAdminAPI creates a new API definition for the private admin methods -// of the Expanse service. -func NewPrivateAdminAPI(exp *Expanse) *PrivateAdminAPI { - return &PrivateAdminAPI{exp: exp} -} - -// SetSolc sets the Solidity compiler path to be used by the node. -func (api *PrivateAdminAPI) SetSolc(path string) (string, error) { - solc, err := api.exp.SetSolc(path) - if err != nil { - return "", err - } - return solc.Info(), nil -} - -// ExportChain exports the current blockchain into a local file. -func (api *PrivateAdminAPI) ExportChain(file string) (bool, error) { - // Make sure we can create the file to export into - out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) - if err != nil { - return false, err - } - defer out.Close() - - // Export the blockchain - if err := api.exp.BlockChain().Export(out); err != nil { - return false, err - } - return true, nil -} - -func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { - for _, b := range bs { - if !chain.HasBlock(b.Hash()) { - return false - } - } - - return true -} - -// ImportChain imports a blockchain from a local file. -func (api *PrivateAdminAPI) ImportChain(file string) (bool, error) { - // Make sure the can access the file to import - in, err := os.Open(file) - if err != nil { - return false, err - } - defer in.Close() - - // Run actual the import in pre-configured batches - stream := rlp.NewStream(in, 0) - - blocks, index := make([]*types.Block, 0, 2500), 0 - for batch := 0; ; batch++ { - // Load a batch of blocks from the input file - for len(blocks) < cap(blocks) { - block := new(types.Block) - if err := stream.Decode(block); err == io.EOF { - break - } else if err != nil { - return false, fmt.Errorf("block %d: failed to parse: %v", index, err) - } - blocks = append(blocks, block) - index++ - } - if len(blocks) == 0 { - break - } - - if hasAllBlocks(api.exp.BlockChain(), blocks) { - blocks = blocks[:0] - continue - } - // Import the batch and reset the buffer - if _, err := api.exp.BlockChain().InsertChain(blocks); err != nil { - return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err) - } - blocks = blocks[:0] - } - return true, nil -} - -// PublicDebugAPI is the collection of Etheruem APIs exposed over the public -// debugging endpoint. -type PublicDebugAPI struct { - exp *Expanse -} - -// NewPublicDebugAPI creates a new API definition for the public debug methods -// of the Expanse service. -func NewPublicDebugAPI(exp *Expanse) *PublicDebugAPI { - return &PublicDebugAPI{exp: exp} -} - -// DumpBlock retrieves the entire state of the database at a given block. -func (api *PublicDebugAPI) DumpBlock(number uint64) (state.Dump, error) { - block := api.exp.BlockChain().GetBlockByNumber(number) - if block == nil { - return state.Dump{}, fmt.Errorf("block #%d not found", number) - } - stateDb, err := api.exp.BlockChain().StateAt(block.Root()) - if err != nil { - return state.Dump{}, err - } - return stateDb.RawDump(), nil -} - -// GetBlockRlp retrieves the RLP encoded for of a single block. -func (api *PublicDebugAPI) GetBlockRlp(number uint64) (string, error) { - block := api.exp.BlockChain().GetBlockByNumber(number) - if block == nil { - return "", fmt.Errorf("block #%d not found", number) - } - encoded, err := rlp.EncodeToBytes(block) - if err != nil { - return "", err - } - return fmt.Sprintf("%x", encoded), nil -} - -// PrintBlock retrieves a block and returns its pretty printed form. -func (api *PublicDebugAPI) PrintBlock(number uint64) (string, error) { - block := api.exp.BlockChain().GetBlockByNumber(number) - if block == nil { - return "", fmt.Errorf("block #%d not found", number) - } - return fmt.Sprintf("%s", block), nil -} - -// SeedHash retrieves the seed hash of a block. -func (api *PublicDebugAPI) SeedHash(number uint64) (string, error) { - block := api.exp.BlockChain().GetBlockByNumber(number) - if block == nil { - return "", fmt.Errorf("block #%d not found", number) - } - hash, err := ethash.GetSeedHash(number) - if err != nil { - return "", err - } - return fmt.Sprintf("0x%x", hash), nil -} - -// PrivateDebugAPI is the collection of Etheruem APIs exposed over the private -// debugging endpoint. -type PrivateDebugAPI struct { - config *core.ChainConfig - exp *Expanse -} - -// NewPrivateDebugAPI creates a new API definition for the private debug methods -// of the Expanse service. -func NewPrivateDebugAPI(config *core.ChainConfig, exp *Expanse) *PrivateDebugAPI { - return &PrivateDebugAPI{config: config, exp: exp} -} - -// ChaindbProperty returns leveldb properties of the chain database. -func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) { - ldb, ok := api.exp.chainDb.(interface { - LDB() *leveldb.DB - }) - if !ok { - return "", fmt.Errorf("chaindbProperty does not work for memory databases") - } - if property == "" { - property = "leveldb.stats" - } else if !strings.HasPrefix(property, "leveldb.") { - property = "leveldb." + property - } - return ldb.LDB().GetProperty(property) -} - -// BlockTraceResult is the returned value when replaying a block to check for -// consensus results and full VM trace logs for all included transactions. -type BlockTraceResult struct { - Validated bool `json:"validated"` - StructLogs []structLogRes `json:"structLogs"` - Error string `json:"error"` -} - -// TraceBlock processes the given block's RLP but does not import the block in to -// the chain. -func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.Config) BlockTraceResult { - var block types.Block - err := rlp.Decode(bytes.NewReader(blockRlp), &block) - if err != nil { - return BlockTraceResult{Error: fmt.Sprintf("could not decode block: %v", err)} - } - - validated, logs, err := api.traceBlock(&block, config) - return BlockTraceResult{ - Validated: validated, - StructLogs: formatLogs(logs), - Error: formatError(err), - } -} - -// TraceBlockFromFile loads the block's RLP from the given file name and attempts to -// process it but does not import the block in to the chain. -func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.Config) BlockTraceResult { - blockRlp, err := ioutil.ReadFile(file) - if err != nil { - return BlockTraceResult{Error: fmt.Sprintf("could not read file: %v", err)} - } - return api.TraceBlock(blockRlp, config) -} - -// TraceBlockByNumber processes the block by canonical block number. -func (api *PrivateDebugAPI) TraceBlockByNumber(number uint64, config *vm.Config) BlockTraceResult { - // Fetch the block that we aim to reprocess - block := api.exp.BlockChain().GetBlockByNumber(number) - if block == nil { - return BlockTraceResult{Error: fmt.Sprintf("block #%d not found", number)} - } - - validated, logs, err := api.traceBlock(block, config) - return BlockTraceResult{ - Validated: validated, - StructLogs: formatLogs(logs), - Error: formatError(err), - } -} - -// TraceBlockByHash processes the block by hash. -func (api *PrivateDebugAPI) TraceBlockByHash(hash common.Hash, config *vm.Config) BlockTraceResult { - // Fetch the block that we aim to reprocess - block := api.exp.BlockChain().GetBlock(hash) - if block == nil { - return BlockTraceResult{Error: fmt.Sprintf("block #%x not found", hash)} - } - - validated, logs, err := api.traceBlock(block, config) - return BlockTraceResult{ - Validated: validated, - StructLogs: formatLogs(logs), - Error: formatError(err), - } -} - -// TraceCollector collects EVM structered logs. -// -// TraceCollector implements vm.Collector -type TraceCollector struct { - traces []vm.StructLog -} - -// AddStructLog adds a structered log. -func (t *TraceCollector) AddStructLog(slog vm.StructLog) { - t.traces = append(t.traces, slog) -} - -// traceBlock processes the given block but does not save the state. -func (api *PrivateDebugAPI) traceBlock(block *types.Block, config *vm.Config) (bool, []vm.StructLog, error) { - // Validate and reprocess the block - var ( - blockchain = api.exp.BlockChain() - validator = blockchain.Validator() - processor = blockchain.Processor() - collector = &TraceCollector{} - ) - if config == nil { - config = new(vm.Config) - } - config.Debug = true // make sure debug is set. - config.Logger.Collector = collector - - if err := core.ValidateHeader(api.config, blockchain.AuxValidator(), block.Header(), blockchain.GetHeader(block.ParentHash()), true, false); err != nil { - return false, collector.traces, err - } - statedb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash()).Root()) - if err != nil { - return false, collector.traces, err - } - - receipts, _, usedGas, err := processor.Process(block, statedb, *config) - if err != nil { - return false, collector.traces, err - } - if err := validator.ValidateState(block, blockchain.GetBlock(block.ParentHash()), statedb, receipts, usedGas); err != nil { - return false, collector.traces, err - } - return true, collector.traces, nil -} - -// SetHead rewinds the head of the blockchain to a previous block. -func (api *PrivateDebugAPI) SetHead(number uint64) { - api.exp.BlockChain().SetHead(number) -} - -// ExecutionResult groups all structured logs emitted by the EVM -// while replaying a transaction in debug mode as well as the amount of -// gas used and the return value -type ExecutionResult struct { - Gas *big.Int `json:"gas"` - ReturnValue string `json:"returnValue"` - StructLogs []structLogRes `json:"structLogs"` -} - -// structLogRes stores a structured log emitted by the EVM while replaying a -// transaction in debug mode -type structLogRes struct { - Pc uint64 `json:"pc"` - Op string `json:"op"` - Gas *big.Int `json:"gas"` - GasCost *big.Int `json:"gasCost"` - Depth int `json:"depth"` - Error string `json:"error"` - Stack []string `json:"stack"` - Memory []string `json:"memory"` - Storage map[string]string `json:"storage"` -} - -// formatLogs formats EVM returned structured logs for json output -func formatLogs(structLogs []vm.StructLog) []structLogRes { - formattedStructLogs := make([]structLogRes, len(structLogs)) - for index, trace := range structLogs { - formattedStructLogs[index] = structLogRes{ - Pc: trace.Pc, - Op: trace.Op.String(), - Gas: trace.Gas, - GasCost: trace.GasCost, - Depth: trace.Depth, - Error: formatError(trace.Err), - Stack: make([]string, len(trace.Stack)), - Storage: make(map[string]string), - } - - for i, stackValue := range trace.Stack { - formattedStructLogs[index].Stack[i] = fmt.Sprintf("%x", common.LeftPadBytes(stackValue.Bytes(), 32)) - } - - for i := 0; i+32 <= len(trace.Memory); i += 32 { - formattedStructLogs[index].Memory = append(formattedStructLogs[index].Memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) - } - - for i, storageValue := range trace.Storage { - formattedStructLogs[index].Storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) - } - } - return formattedStructLogs -} - -// formatError formats a Go error into either an empty string or the data content -// of the error itself. -func formatError(err error) string { - if err == nil { - return "" - } - return err.Error() -} - -// TraceTransaction returns the structured logs created during the execution of EVM -// and returns them as a JSON object. -func (api *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ExecutionResult, error) { - if logger == nil { - logger = new(vm.LogConfig) - } - // Retrieve the tx from the chain and the containing block - tx, blockHash, _, txIndex := core.GetTransaction(api.exp.ChainDb(), txHash) - if tx == nil { - return nil, fmt.Errorf("transaction %x not found", txHash) - } - block := api.exp.BlockChain().GetBlock(blockHash) - if block == nil { - return nil, fmt.Errorf("block %x not found", blockHash) - } - // Create the state database to mutate and eventually trace - parent := api.exp.BlockChain().GetBlock(block.ParentHash()) - if parent == nil { - return nil, fmt.Errorf("block parent %x not found", block.ParentHash()) - } - stateDb, err := api.exp.BlockChain().StateAt(parent.Root()) - if err != nil { - return nil, err - } - // Mutate the state and trace the selected transaction - for idx, tx := range block.Transactions() { - // Assemble the transaction call message - from, err := tx.FromFrontier() - if err != nil { - return nil, fmt.Errorf("sender retrieval failed: %v", err) - } - msg := callmsg{ - from: stateDb.GetOrNewStateObject(from), - to: tx.To(), - gas: tx.Gas(), - gasPrice: tx.GasPrice(), - value: tx.Value(), - data: tx.Data(), - } - // Mutate the state if we haven't reached the tracing transaction yet - if uint64(idx) < txIndex { - vmenv := core.NewEnv(stateDb, api.config, api.exp.BlockChain(), msg, block.Header(), vm.Config{}) - _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())) - if err != nil { - return nil, fmt.Errorf("mutation failed: %v", err) - } - stateDb.DeleteSuicides() - continue - } - // Otherwise trace the transaction and return - vmenv := core.NewEnv(stateDb, api.config, api.exp.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger}) - ret, gas, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())) - if err != nil { - return nil, fmt.Errorf("tracing failed: %v", err) - } - return &ExecutionResult{ - Gas: gas, - ReturnValue: fmt.Sprintf("%x", ret), - StructLogs: formatLogs(vmenv.StructLogs()), - }, nil - } - return nil, errors.New("database inconsistency") -} - -// TraceCall executes a call and returns the amount of gas, created logs and optionally returned values. -func (s *PublicBlockChainAPI) TraceCall(args CallArgs, blockNr rpc.BlockNumber) (*ExecutionResult, error) { - // Fetch the state associated with the block number - stateDb, block, err := stateAndBlockByNumber(s.miner, s.bc, blockNr, s.chainDb) - if stateDb == nil || err != nil { - return nil, err - } - stateDb = stateDb.Copy() - - // Retrieve the account state object to interact with - var from *state.StateObject - if args.From == (common.Address{}) { - accounts := s.am.Accounts() - if len(accounts) == 0 { - from = stateDb.GetOrNewStateObject(common.Address{}) - } else { - from = stateDb.GetOrNewStateObject(accounts[0].Address) - } - } else { - from = stateDb.GetOrNewStateObject(args.From) - } - from.SetBalance(common.MaxBig) - - // Assemble the CALL invocation - msg := callmsg{ - from: from, - to: args.To, - gas: args.Gas.BigInt(), - gasPrice: args.GasPrice.BigInt(), - value: args.Value.BigInt(), - data: common.FromHex(args.Data), - } - if msg.gas.Cmp(common.Big0) == 0 { - msg.gas = big.NewInt(50000000) - } - if msg.gasPrice.Cmp(common.Big0) == 0 { - msg.gasPrice = new(big.Int).Mul(big.NewInt(50), common.Shannon) - } - - // Execute the call and return - vmenv := core.NewEnv(stateDb, s.config, s.bc, msg, block.Header(), vm.Config{ - Debug: true, - }) - gp := new(core.GasPool).AddGas(common.MaxBig) - - ret, gas, err := core.ApplyMessage(vmenv, msg, gp) - return &ExecutionResult{ - Gas: gas, - ReturnValue: fmt.Sprintf("%x", ret), - StructLogs: formatLogs(vmenv.StructLogs()), - }, nil -} - -// PublicNetAPI offers network related RPC methods -type PublicNetAPI struct { - net *p2p.Server - networkVersion int -} - -// NewPublicNetAPI creates a new net API instance. -func NewPublicNetAPI(net *p2p.Server, networkVersion int) *PublicNetAPI { - return &PublicNetAPI{net, networkVersion} -} - -// Listening returns an indication if the node is listening for network connections. -func (s *PublicNetAPI) Listening() bool { - return true // always listening -} - -// PeerCount returns the number of connected peers -func (s *PublicNetAPI) PeerCount() *rpc.HexNumber { - return rpc.NewHexNumber(s.net.PeerCount()) -} - -// Version returns the current ethereum protocol version. -func (s *PublicNetAPI) Version() string { - return fmt.Sprintf("%d", s.networkVersion) -} diff --git a/exp/backend.go b/exp/backend.go deleted file mode 100644 index 7038da3798c6f..0000000000000 --- a/exp/backend.go +++ /dev/null @@ -1,667 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Package exp implements the Expanse protocol. -package exp - -import ( - "bytes" - "errors" - "fmt" - "math/big" - "os" - "path/filepath" - "regexp" - "strings" - "sync" - "time" - - "github.com/expanse-org/ethash" - "github.com/expanse-org/go-expanse/accounts" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/common/compiler" - "github.com/expanse-org/go-expanse/common/httpclient" - "github.com/expanse-org/go-expanse/common/registrar/ethreg" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/exp/downloader" - "github.com/expanse-org/go-expanse/exp/filters" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/miner" - "github.com/expanse-org/go-expanse/node" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rlp" - "github.com/expanse-org/go-expanse/rpc" -) - -const ( - epochLength = 30000 - ethashRevision = 23 - - autoDAGcheckInterval = 10 * time.Hour - autoDAGepochHeight = epochLength / 2 -) - -var ( - datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true} - portInUseErrRE = regexp.MustCompile("address already in use") -) - -type Config struct { - ChainConfig *core.ChainConfig // chain configuration - - NetworkId int // Network ID to use for selecting peers to connect to - Genesis string // Genesis JSON to seed the chain database with - FastSync bool // Enables the state download based fast synchronisation algorithm - - BlockChainVersion int - SkipBcVersionCheck bool // e.g. blockchain export - DatabaseCache int - DatabaseHandles int - - NatSpec bool - DocRoot string - AutoDAG bool - PowTest bool - PowShared bool - ExtraData []byte - - AccountManager *accounts.Manager - Etherbase common.Address - GasPrice *big.Int - MinerThreads int - SolcPath string - - GpoMinGasPrice *big.Int - GpoMaxGasPrice *big.Int - GpoFullBlockRatio int - GpobaseStepDown int - GpobaseStepUp int - GpobaseCorrectionFactor int - - EnableJit bool - ForceJit bool - - TestGenesisBlock *types.Block // Genesis block to seed the chain database with (testing only!) - TestGenesisState ethdb.Database // Genesis state to seed the database with (testing only!) -} - -type Expanse struct { - chainConfig *core.ChainConfig - // Channel for shutting down the expanse - shutdownChan chan bool - - // DB interfaces - chainDb ethdb.Database // Block chain database - dappDb ethdb.Database // Dapp database - - // Handlers - txPool *core.TxPool - txMu sync.Mutex - blockchain *core.BlockChain - accountManager *accounts.Manager - pow *ethash.Ethash - protocolManager *ProtocolManager - SolcPath string - solc *compiler.Solidity - gpo *GasPriceOracle - - GpoMinGasPrice *big.Int - GpoMaxGasPrice *big.Int - GpoFullBlockRatio int - GpobaseStepDown int - GpobaseStepUp int - GpobaseCorrectionFactor int - - httpclient *httpclient.HTTPClient - - eventMux *event.TypeMux - miner *miner.Miner - - Mining bool - MinerThreads int - NatSpec bool - AutoDAG bool - PowTest bool - autodagquit chan bool - etherbase common.Address - netVersionId int - netRPCService *PublicNetAPI -} - -func New(ctx *node.ServiceContext, config *Config) (*Expanse, error) { - // Open the chain database and perform any upgrades needed - chainDb, err := ctx.OpenDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles) - if err != nil { - return nil, err - } - if db, ok := chainDb.(*ethdb.LDBDatabase); ok { - db.Meter("exp/db/chaindata/") - } - if err := upgradeChainDatabase(chainDb); err != nil { - return nil, err - } - if err := addMipmapBloomBins(chainDb); err != nil { - return nil, err - } - - dappDb, err := ctx.OpenDatabase("dapp", config.DatabaseCache, config.DatabaseHandles) - if err != nil { - return nil, err - } - if db, ok := dappDb.(*ethdb.LDBDatabase); ok { - db.Meter("exp/db/dapp/") - } - glog.V(logger.Info).Infof("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId) - - // Load up any custom genesis block if requested - if len(config.Genesis) > 0 { - block, err := core.WriteGenesisBlock(chainDb, strings.NewReader(config.Genesis)) - if err != nil { - return nil, err - } - glog.V(logger.Info).Infof("Successfully wrote custom genesis block: %x", block.Hash()) - } - - // Load up a test setup if directly injected - if config.TestGenesisState != nil { - chainDb = config.TestGenesisState - } - if config.TestGenesisBlock != nil { - core.WriteTd(chainDb, config.TestGenesisBlock.Hash(), config.TestGenesisBlock.Difficulty()) - core.WriteBlock(chainDb, config.TestGenesisBlock) - core.WriteCanonicalHash(chainDb, config.TestGenesisBlock.Hash(), config.TestGenesisBlock.NumberU64()) - core.WriteHeadBlockHash(chainDb, config.TestGenesisBlock.Hash()) - } - - if !config.SkipBcVersionCheck { - bcVersion := core.GetBlockChainVersion(chainDb) - if bcVersion != config.BlockChainVersion && bcVersion != 0 { - return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d). Run gexp upgradedb.\n", bcVersion, config.BlockChainVersion) - } - core.WriteBlockChainVersion(chainDb, config.BlockChainVersion) - } - glog.V(logger.Info).Infof("Blockchain DB Version: %d", config.BlockChainVersion) - - exp := &Expanse{ - shutdownChan: make(chan bool), - chainDb: chainDb, - dappDb: dappDb, - eventMux: ctx.EventMux, - accountManager: config.AccountManager, - etherbase: config.Etherbase, - netVersionId: config.NetworkId, - NatSpec: config.NatSpec, - MinerThreads: config.MinerThreads, - SolcPath: config.SolcPath, - AutoDAG: config.AutoDAG, - PowTest: config.PowTest, - GpoMinGasPrice: config.GpoMinGasPrice, - GpoMaxGasPrice: config.GpoMaxGasPrice, - GpoFullBlockRatio: config.GpoFullBlockRatio, - GpobaseStepDown: config.GpobaseStepDown, - GpobaseStepUp: config.GpobaseStepUp, - GpobaseCorrectionFactor: config.GpobaseCorrectionFactor, - httpclient: httpclient.New(config.DocRoot), - } - switch { - case config.PowTest: - glog.V(logger.Info).Infof("ethash used in test mode") - exp.pow, err = ethash.NewForTesting() - if err != nil { - return nil, err - } - case config.PowShared: - glog.V(logger.Info).Infof("ethash used in shared mode") - exp.pow = ethash.NewShared() - - default: - exp.pow = ethash.New() - } - - // load the genesis block or write a new one if no genesis - // block is prenent in the database. - genesis := core.GetBlock(chainDb, core.GetCanonicalHash(chainDb, 0)) - if genesis == nil { - genesis, err = core.WriteDefaultGenesisBlock(chainDb) - if err != nil { - return nil, err - } - glog.V(logger.Info).Infoln("WARNING: Wrote default expanse genesis block") - } - - if config.ChainConfig == nil { - return nil, errors.New("missing chain config") - } - core.WriteChainConfig(chainDb, genesis.Hash(), config.ChainConfig) - - exp.chainConfig = config.ChainConfig - exp.chainConfig.VmConfig = vm.Config{ - EnableJit: config.EnableJit, - ForceJit: config.ForceJit, - } - - exp.blockchain, err = core.NewBlockChain(chainDb, exp.chainConfig, exp.pow, exp.EventMux()) - if err != nil { - if err == core.ErrNoGenesis { - return nil, fmt.Errorf(`No chain found. Please initialise a new chain using the "init" subcommand.`) - } - return nil, err - } - exp.gpo = NewGasPriceOracle(exp) - - newPool := core.NewTxPool(exp.chainConfig, exp.EventMux(), exp.blockchain.State, exp.blockchain.GasLimit) - exp.txPool = newPool - - if exp.protocolManager, err = NewProtocolManager(exp.chainConfig, config.FastSync, config.NetworkId, exp.eventMux, exp.txPool, exp.pow, exp.blockchain, chainDb); err != nil { - return nil, err - } - exp.miner = miner.New(exp, exp.chainConfig, exp.EventMux(), exp.pow) - exp.miner.SetGasPrice(config.GasPrice) - exp.miner.SetExtra(config.ExtraData) - - return exp, nil -} - -// APIs returns the collection of RPC services the expanse package offers. -// NOTE, some of these services probably need to be moved to somewhere else. -func (s *Expanse) APIs() []rpc.API { - return []rpc.API{ - { - Namespace: "exp", - Version: "1.0", - Service: NewPublicEthereumAPI(s), - Public: true, - }, { - Namespace: "exp", - Version: "1.0", - Service: NewPublicAccountAPI(s.accountManager), - Public: true, - }, { - Namespace: "personal", - Version: "1.0", - Service: NewPrivateAccountAPI(s), - Public: false, - }, { - Namespace: "exp", - Version: "1.0", - Service: NewPublicBlockChainAPI(s.chainConfig, s.blockchain, s.miner, s.chainDb, s.gpo, s.eventMux, s.accountManager), - Public: true, - }, { - Namespace: "exp", - Version: "1.0", - Service: NewPublicTransactionPoolAPI(s), - Public: true, - }, { - Namespace: "exp", - Version: "1.0", - Service: NewPublicMinerAPI(s), - Public: true, - }, { - Namespace: "exp", - Version: "1.0", - Service: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux), - Public: true, - }, { - Namespace: "miner", - Version: "1.0", - Service: NewPrivateMinerAPI(s), - Public: false, - }, { - Namespace: "txpool", - Version: "1.0", - Service: NewPublicTxPoolAPI(s), - Public: true, - }, { - Namespace: "exp", - Version: "1.0", - Service: filters.NewPublicFilterAPI(s.chainDb, s.eventMux), - Public: true, - }, { - Namespace: "admin", - Version: "1.0", - Service: NewPrivateAdminAPI(s), - }, { - Namespace: "debug", - Version: "1.0", - Service: NewPublicDebugAPI(s), - Public: true, - }, { - Namespace: "debug", - Version: "1.0", - Service: NewPrivateDebugAPI(s.chainConfig, s), - }, { - Namespace: "net", - Version: "1.0", - Service: s.netRPCService, - Public: true, - }, { - Namespace: "admin", - Version: "1.0", - Service: ethreg.NewPrivateRegistarAPI(s.chainConfig, s.blockchain, s.chainDb, s.txPool, s.accountManager), - }, { - Namespace: "eth", - Version: "1.0", - Service: NewPublicEthereumAPI(s), - Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: NewPublicAccountAPI(s.accountManager), - Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: NewPublicBlockChainAPI(s.chainConfig, s.blockchain, s.miner, s.chainDb, s.gpo, s.eventMux, s.accountManager), - Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: NewPublicTransactionPoolAPI(s), - Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: NewPublicMinerAPI(s), - Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux), - Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: filters.NewPublicFilterAPI(s.chainDb, s.eventMux), - Public: true, - }, - } -} - -func (s *Expanse) ResetWithGenesisBlock(gb *types.Block) { - s.blockchain.ResetWithGenesisBlock(gb) -} - -func (s *Expanse) Etherbase() (eb common.Address, err error) { - eb = s.etherbase - if (eb == common.Address{}) { - firstAccount, err := s.AccountManager().AccountByIndex(0) - eb = firstAccount.Address - if err != nil { - return eb, fmt.Errorf("etherbase address must be explicitly specified") - } - } - return eb, nil -} - -// set in js console via admin interface or wrapper from cli flags -func (self *Expanse) SetEtherbase(etherbase common.Address) { - self.etherbase = etherbase - self.miner.SetEtherbase(etherbase) -} - -func (s *Expanse) StopMining() { s.miner.Stop() } -func (s *Expanse) IsMining() bool { return s.miner.Mining() } -func (s *Expanse) Miner() *miner.Miner { return s.miner } - -func (s *Expanse) AccountManager() *accounts.Manager { return s.accountManager } -func (s *Expanse) BlockChain() *core.BlockChain { return s.blockchain } -func (s *Expanse) TxPool() *core.TxPool { return s.txPool } -func (s *Expanse) EventMux() *event.TypeMux { return s.eventMux } -func (s *Expanse) ChainDb() ethdb.Database { return s.chainDb } -func (s *Expanse) DappDb() ethdb.Database { return s.dappDb } -func (s *Expanse) IsListening() bool { return true } // Always listening -func (s *Expanse) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) } -func (s *Expanse) NetVersion() int { return s.netVersionId } -func (s *Expanse) Downloader() *downloader.Downloader { return s.protocolManager.downloader } - -// Protocols implements node.Service, returning all the currently configured -// network protocols to start. -func (s *Expanse) Protocols() []p2p.Protocol { - return s.protocolManager.SubProtocols -} - -// Start implements node.Service, starting all internal goroutines needed by the -// Expanse protocol implementation. -func (s *Expanse) Start(srvr *p2p.Server) error { - if s.AutoDAG { - s.StartAutoDAG() - } - s.protocolManager.Start() - s.netRPCService = NewPublicNetAPI(srvr, s.NetVersion()) - return nil -} - -// Stop implements node.Service, terminating all internal goroutines used by the -// Expanse protocol. -func (s *Expanse) Stop() error { - s.blockchain.Stop() - s.protocolManager.Stop() - s.txPool.Stop() - s.miner.Stop() - s.eventMux.Stop() - - s.StopAutoDAG() - - s.chainDb.Close() - s.dappDb.Close() - close(s.shutdownChan) - - return nil -} - -// This function will wait for a shutdown and resumes main thread execution -func (s *Expanse) WaitForShutdown() { - <-s.shutdownChan -} - -// StartAutoDAG() spawns a go routine that checks the DAG every autoDAGcheckInterval -// by default that is 10 times per epoch -// in epoch n, if we past autoDAGepochHeight within-epoch blocks, -// it calls ethash.MakeDAG to pregenerate the DAG for the next epoch n+1 -// if it does not exist yet as well as remove the DAG for epoch n-1 -// the loop quits if autodagquit channel is closed, it can safely restart and -// stop any number of times. -// For any more sophisticated pattern of DAG generation, use CLI subcommand -// makedag -func (self *Expanse) StartAutoDAG() { - if self.autodagquit != nil { - return // already started - } - go func() { - glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir) - var nextEpoch uint64 - timer := time.After(0) - self.autodagquit = make(chan bool) - for { - select { - case <-timer: - glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir) - currentBlock := self.BlockChain().CurrentBlock().NumberU64() - thisEpoch := currentBlock / epochLength - if nextEpoch <= thisEpoch { - if currentBlock%epochLength > autoDAGepochHeight { - if thisEpoch > 0 { - previousDag, previousDagFull := dagFiles(thisEpoch - 1) - os.Remove(filepath.Join(ethash.DefaultDir, previousDag)) - os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull)) - glog.V(logger.Info).Infof("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag) - } - nextEpoch = thisEpoch + 1 - dag, _ := dagFiles(nextEpoch) - if _, err := os.Stat(dag); os.IsNotExist(err) { - glog.V(logger.Info).Infof("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag) - err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir - if err != nil { - glog.V(logger.Error).Infof("Error generating DAG for epoch %d (%s)", nextEpoch, dag) - return - } - } else { - glog.V(logger.Error).Infof("DAG for epoch %d (%s)", nextEpoch, dag) - } - } - } - timer = time.After(autoDAGcheckInterval) - case <-self.autodagquit: - return - } - } - }() -} - -// stopAutoDAG stops automatic DAG pregeneration by quitting the loop -func (self *Expanse) StopAutoDAG() { - if self.autodagquit != nil { - close(self.autodagquit) - self.autodagquit = nil - } - glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir) -} - - -// HTTPClient returns the light http client used for fetching offchain docs -// (natspec, source for verification) -func (self *Expanse) HTTPClient() *httpclient.HTTPClient { - return self.httpclient -} - -func (self *Expanse) Solc() (*compiler.Solidity, error) { - var err error - if self.solc == nil { - self.solc, err = compiler.New(self.SolcPath) - } - return self.solc, err -} - -// set in js console via admin interface or wrapper from cli flags -func (self *Expanse) SetSolc(solcPath string) (*compiler.Solidity, error) { - self.SolcPath = solcPath - self.solc = nil - return self.Solc() -} - -// dagFiles(epoch) returns the two alternative DAG filenames (not a path) -// 1) - 2) full-R- -func dagFiles(epoch uint64) (string, string) { - seedHash, _ := ethash.GetSeedHash(epoch * epochLength) - dag := fmt.Sprintf("full-R%d-%x", ethashRevision, seedHash[:8]) - return dag, "full-R" + dag -} - -// upgradeChainDatabase ensures that the chain database stores block split into -// separate header and body entries. -func upgradeChainDatabase(db ethdb.Database) error { - // Short circuit if the head block is stored already as separate header and body - data, err := db.Get([]byte("LastBlock")) - if err != nil { - return nil - } - head := common.BytesToHash(data) - - if block := core.GetBlockByHashOld(db, head); block == nil { - return nil - } - // At least some of the database is still the old format, upgrade (skip the head block!) - glog.V(logger.Info).Info("Old database detected, upgrading...") - - if db, ok := db.(*ethdb.LDBDatabase); ok { - blockPrefix := []byte("block-hash-") - for it := db.NewIterator(); it.Next(); { - // Skip anything other than a combined block - if !bytes.HasPrefix(it.Key(), blockPrefix) { - continue - } - // Skip the head block (merge last to signal upgrade completion) - if bytes.HasSuffix(it.Key(), head.Bytes()) { - continue - } - // Load the block, split and serialize (order!) - block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix))) - - if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil { - return err - } - if err := core.WriteBody(db, block.Hash(), block.Body()); err != nil { - return err - } - if err := core.WriteHeader(db, block.Header()); err != nil { - return err - } - if err := db.Delete(it.Key()); err != nil { - return err - } - } - // Lastly, upgrade the head block, disabling the upgrade mechanism - current := core.GetBlockByHashOld(db, head) - - if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil { - return err - } - if err := core.WriteBody(db, current.Hash(), current.Body()); err != nil { - return err - } - if err := core.WriteHeader(db, current.Header()); err != nil { - return err - } - } - return nil -} - -func addMipmapBloomBins(db ethdb.Database) (err error) { - const mipmapVersion uint = 2 - - // check if the version is set. We ignore data for now since there's - // only one version so we can easily ignore it for now - var data []byte - data, _ = db.Get([]byte("setting-mipmap-version")) - if len(data) > 0 { - var version uint - if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion { - return nil - } - } - - defer func() { - if err == nil { - var val []byte - val, err = rlp.EncodeToBytes(mipmapVersion) - if err == nil { - err = db.Put([]byte("setting-mipmap-version"), val) - } - return - } - }() - latestBlock := core.GetBlock(db, core.GetHeadBlockHash(db)) - if latestBlock == nil { // clean database - return - } - - tstart := time.Now() - glog.V(logger.Info).Infoln("upgrading db log bloom bins") - for i := uint64(0); i <= latestBlock.NumberU64(); i++ { - hash := core.GetCanonicalHash(db, i) - if (hash == common.Hash{}) { - return fmt.Errorf("chain db corrupted. Could not find block %d.", i) - } - core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash)) - } - glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart)) - return nil -} diff --git a/exp/backend_test.go b/exp/backend_test.go deleted file mode 100644 index 3b93b4ac1bd90..0000000000000 --- a/exp/backend_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// Copyright 2016 The go-expanse Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package exp - -import ( - "math/big" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/ethdb" -) - -func TestMipmapUpgrade(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - addr := common.BytesToAddress([]byte("jeff")) - genesis := core.WriteGenesisBlockForTesting(db) - - chain, receipts := core.GenerateChain(nil, genesis, db, 10, func(i int, gen *core.BlockGen) { - var receipts types.Receipts - switch i { - case 1: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{&vm.Log{Address: addr}} - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - case 2: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{&vm.Log{Address: addr}} - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - } - - // store the receipts - err := core.WriteReceipts(db, receipts) - if err != nil { - t.Fatal(err) - } - }) - for i, block := range chain { - core.WriteBlock(db, block) - if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil { - t.Fatal("error writing block receipts:", err) - } - } - - err := addMipmapBloomBins(db) - if err != nil { - t.Fatal(err) - } - - bloom := core.GetMipmapBloom(db, 1, core.MIPMapLevels[0]) - if (bloom == types.Bloom{}) { - t.Error("got empty bloom filter") - } - - data, _ := db.Get([]byte("setting-mipmap-version")) - if len(data) == 0 { - t.Error("setting-mipmap-version not written to database") - } -} diff --git a/exp/bad_block.go b/exp/bad_block.go deleted file mode 100644 index 4bf82ba475cfe..0000000000000 --- a/exp/bad_block.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package exp - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/rlp" -) - -const ( - // The Expanse main network genesis block. - defaultGenesisHash = "0x2fe75cf9ba10cb1105e1750d872911e75365ba24fdd5db7f099445c901fea895" - badBlocksURL = "https://badblocks.expanse.tech" -) - -var EnableBadBlockReporting = false - -func sendBadBlockReport(block *types.Block, err error) { - if !EnableBadBlockReporting { - return - } - - var ( - blockRLP, _ = rlp.EncodeToBytes(block) - params = map[string]interface{}{ - "block": common.Bytes2Hex(blockRLP), - "blockHash": block.Hash().Hex(), - "errortype": err.Error(), - "client": "go", - } - ) - if !block.ReceivedAt.IsZero() { - params["receivedAt"] = block.ReceivedAt.UTC().String() - } - if p, ok := block.ReceivedFrom.(*peer); ok { - params["receivedFrom"] = map[string]interface{}{ - "enode": fmt.Sprintf("enode://%x@%v", p.ID(), p.RemoteAddr()), - "name": p.Name(), - "protocolVersion": p.version, - } - } - jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "id": "1", "jsonrpc": "2.0", "params": []interface{}{params}}) - client := http.Client{Timeout: 8 * time.Second} - resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr)) - if err != nil { - glog.V(logger.Debug).Infoln(err) - return - } - glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode) - resp.Body.Close() -} diff --git a/exp/bind.go b/exp/bind.go deleted file mode 100644 index 98a273541d5b3..0000000000000 --- a/exp/bind.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package exp - -import ( - "math/big" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/rlp" - "github.com/expanse-org/go-expanse/rpc" -) - -// ContractBackend implements bind.ContractBackend with direct calls to Ethereum -// internals to support operating on contracts within subprotocols like eth and -// swarm. -// -// Internally this backend uses the already exposed API endpoints of the Ethereum -// object. These should be rewritten to internal Go method calls when the Go API -// is refactored to support a clean library use. -type ContractBackend struct { - eapi *PublicEthereumAPI // Wrapper around the Expanse object to access metadata - bcapi *PublicBlockChainAPI // Wrapper around the blockchain to access chain data - txapi *PublicTransactionPoolAPI // Wrapper around the transaction pool to access transaction data -} - -// NewContractBackend creates a new native contract backend using an existing -// Etheruem object. -func NewContractBackend(exp *Expanse) *ContractBackend { - return &ContractBackend{ - eapi: NewPublicEthereumAPI(exp), - bcapi: NewPublicBlockChainAPI(exp.chainConfig, exp.blockchain, exp.miner, exp.chainDb, exp.gpo, exp.eventMux, exp.accountManager), - txapi: NewPublicTransactionPoolAPI(exp), - } -} - -// HasCode implements bind.ContractVerifier.HasCode by retrieving any code associated -// with the contract from the local API, and checking its size. -func (b *ContractBackend) HasCode(contract common.Address, pending bool) (bool, error) { - block := rpc.LatestBlockNumber - if pending { - block = rpc.PendingBlockNumber - } - out, err := b.bcapi.GetCode(contract, block) - return len(common.FromHex(out)) > 0, err -} - -// ContractCall implements bind.ContractCaller executing an Ethereum contract -// call with the specified data as the input. The pending flag requests execution -// against the pending block, not the stable head of the chain. -func (b *ContractBackend) ContractCall(contract common.Address, data []byte, pending bool) ([]byte, error) { - // Convert the input args to the API spec - args := CallArgs{ - To: &contract, - Data: common.ToHex(data), - } - block := rpc.LatestBlockNumber - if pending { - block = rpc.PendingBlockNumber - } - // Execute the call and convert the output back to Go types - out, err := b.bcapi.Call(args, block) - return common.FromHex(out), err -} - -// PendingAccountNonce implements bind.ContractTransactor retrieving the current -// pending nonce associated with an account. -func (b *ContractBackend) PendingAccountNonce(account common.Address) (uint64, error) { - out, err := b.txapi.GetTransactionCount(account, rpc.PendingBlockNumber) - return out.Uint64(), err -} - -// SuggestGasPrice implements bind.ContractTransactor retrieving the currently -// suggested gas price to allow a timely execution of a transaction. -func (b *ContractBackend) SuggestGasPrice() (*big.Int, error) { - return b.eapi.GasPrice(), nil -} - -// EstimateGasLimit implements bind.ContractTransactor triing to estimate the gas -// needed to execute a specific transaction based on the current pending state of -// the backend blockchain. There is no guarantee that this is the true gas limit -// requirement as other transactions may be added or removed by miners, but it -// should provide a basis for setting a reasonable default. -func (b *ContractBackend) EstimateGasLimit(sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error) { - out, err := b.bcapi.EstimateGas(CallArgs{ - From: sender, - To: contract, - Value: *rpc.NewHexNumber(value), - Data: common.ToHex(data), - }) - return out.BigInt(), err -} - -// SendTransaction implements bind.ContractTransactor injects the transaction -// into the pending pool for execution. -func (b *ContractBackend) SendTransaction(tx *types.Transaction) error { - raw, _ := rlp.EncodeToBytes(tx) - _, err := b.txapi.SendRawTransaction(common.ToHex(raw)) - return err -} diff --git a/exp/cpu_mining.go b/exp/cpu_mining.go deleted file mode 100644 index d9fdfb4bbd27c..0000000000000 --- a/exp/cpu_mining.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// +build !opencl - -package exp - -import ( - "errors" - "fmt" - - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -const disabledInfo = "Set GO_OPENCL and re-build to enable." - -func (s *Expanse) StartMining(threads int, gpus string) error { - eb, err := s.Etherbase() - if err != nil { - err = fmt.Errorf("Cannot start mining without etherbase address: %v", err) - glog.V(logger.Error).Infoln(err) - return err - } - - if gpus != "" { - return errors.New("GPU mining disabled. " + disabledInfo) - } - - // CPU mining - go s.miner.Start(eb, threads) - return nil -} - -func GPUBench(gpuid uint64) { - fmt.Println("GPU mining disabled. " + disabledInfo) -} - -func PrintOpenCLDevices() { - fmt.Println("OpenCL disabled. " + disabledInfo) -} diff --git a/exp/downloader/api.go b/exp/downloader/api.go deleted file mode 100644 index 068d7291972d0..0000000000000 --- a/exp/downloader/api.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "sync" - - "golang.org/x/net/context" - - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/rpc" -) - -// PublicDownloaderAPI provides an API which gives information about the current synchronisation status. -// It offers only methods that operates on data that can be available to anyone without security risks. -type PublicDownloaderAPI struct { - d *Downloader - mux *event.TypeMux - muSyncSubscriptions sync.Mutex - syncSubscriptions map[string]rpc.Subscription -} - -// NewPublicDownloaderAPI create a new PublicDownloaderAPI. -func NewPublicDownloaderAPI(d *Downloader, m *event.TypeMux) *PublicDownloaderAPI { - api := &PublicDownloaderAPI{d: d, mux: m, syncSubscriptions: make(map[string]rpc.Subscription)} - - go api.run() - - return api -} - -func (api *PublicDownloaderAPI) run() { - sub := api.mux.Subscribe(StartEvent{}, DoneEvent{}, FailedEvent{}) - - for event := range sub.Chan() { - var notification interface{} - - switch event.Data.(type) { - case StartEvent: - result := &SyncingResult{Syncing: true} - result.Status.Origin, result.Status.Current, result.Status.Height, result.Status.Pulled, result.Status.Known = api.d.Progress() - notification = result - case DoneEvent, FailedEvent: - notification = false - } - - api.muSyncSubscriptions.Lock() - for id, sub := range api.syncSubscriptions { - if sub.Notify(notification) == rpc.ErrNotificationNotFound { - delete(api.syncSubscriptions, id) - } - } - api.muSyncSubscriptions.Unlock() - } -} - -// Progress gives progress indications when the node is synchronising with the Expanse network. -type Progress struct { - Origin uint64 `json:"startingBlock"` - Current uint64 `json:"currentBlock"` - Height uint64 `json:"highestBlock"` - Pulled uint64 `json:"pulledStates"` - Known uint64 `json:"knownStates"` -} - -// SyncingResult provides information about the current synchronisation status for this node. -type SyncingResult struct { - Syncing bool `json:"syncing"` - Status Progress `json:"status"` -} - -// Syncing provides information when this nodes starts synchronising with the Expanse network and when it's finished. -func (api *PublicDownloaderAPI) Syncing(ctx context.Context) (rpc.Subscription, error) { - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return nil, rpc.ErrNotificationsUnsupported - } - - subscription, err := notifier.NewSubscription(func(id string) { - api.muSyncSubscriptions.Lock() - delete(api.syncSubscriptions, id) - api.muSyncSubscriptions.Unlock() - }) - - if err != nil { - return nil, err - } - - api.muSyncSubscriptions.Lock() - api.syncSubscriptions[subscription.ID()] = subscription - api.muSyncSubscriptions.Unlock() - - return subscription, nil -} diff --git a/exp/downloader/downloader.go b/exp/downloader/downloader.go deleted file mode 100644 index f12431bd0a071..0000000000000 --- a/exp/downloader/downloader.go +++ /dev/null @@ -1,1498 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Package downloader contains the manual full chain synchronisation. -package downloader - -import ( - "crypto/rand" - "errors" - "fmt" - "math" - "math/big" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/params" - "github.com/expanse-org/go-expanse/trie" - "github.com/rcrowley/go-metrics" -) - -var ( - MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request - MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request - MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request - MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly - MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request - MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request - MaxStateFetch = 384 // Amount of node state values to allow fetching per request - - MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation - rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests - rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests - rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value - ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion - ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts - - qosTuningPeers = 5 // Number of peers to tune based on (best peers) - qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence - qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value - - maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) - maxHeadersProcess = 2048 // Number of header download results to import at once into the chain - maxResultsProcess = 2048 // Number of content download results to import at once into the chain - - fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync - fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected - fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it - fsPivotInterval = 512 // Number of headers out of which to randomize the pivot point - fsMinFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync - fsCriticalTrials = 10 // Number of times to retry in the cricical section before bailing -) - -var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errNoPeers = errors.New("no peers to keep download active") - errTimeout = errors.New("timeout") - errEmptyHeaderSet = errors.New("empty header set by peer") - errPeersUnavailable = errors.New("no peers available or all tried for download") - errInvalidAncestor = errors.New("retrieved ancestor is invalid") - errInvalidChain = errors.New("retrieved hash chain is invalid") - errInvalidBlock = errors.New("retrieved block is invalid") - errInvalidBody = errors.New("retrieved block body is invalid") - errInvalidReceipt = errors.New("retrieved receipt is invalid") - errCancelBlockFetch = errors.New("block download canceled (requested)") - errCancelHeaderFetch = errors.New("block header download canceled (requested)") - errCancelBodyFetch = errors.New("block body download canceled (requested)") - errCancelReceiptFetch = errors.New("receipt download canceled (requested)") - errCancelStateFetch = errors.New("state data download canceled (requested)") - errCancelHeaderProcessing = errors.New("header processing canceled (requested)") - errCancelContentProcessing = errors.New("content processing canceled (requested)") - errNoSyncActive = errors.New("no sync active") - errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") -) - -type Downloader struct { - mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) - mux *event.TypeMux // Event multiplexer to announce sync operation events - - queue *queue // Scheduler for selecting the hashes to download - peers *peerSet // Set of active peers from which download can proceed - - fsPivotLock *types.Header // Pivot header on critical section entry (cannot change between retries) - fsPivotFails int // Number of fast sync failures in the critical section - - rttEstimate uint64 // Round trip time to target for download requests - rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) - - // Statistics - syncStatsChainOrigin uint64 // Origin block number where syncing started at - syncStatsChainHeight uint64 // Highest block number known when syncing started - syncStatsStateDone uint64 // Number of state trie entries already pulled - syncStatsLock sync.RWMutex // Lock protecting the sync stats fields - - // Callbacks - hasHeader headerCheckFn // Checks if a header is present in the chain - hasBlockAndState blockAndStateCheckFn // Checks if a block and associated state is present in the chain - getHeader headerRetrievalFn // Retrieves a header from the chain - getBlock blockRetrievalFn // Retrieves a block from the chain - headHeader headHeaderRetrievalFn // Retrieves the head header from the chain - headBlock headBlockRetrievalFn // Retrieves the head block from the chain - headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain - commitHeadBlock headBlockCommitterFn // Commits a manually assembled block as the chain head - getTd tdRetrievalFn // Retrieves the TD of a block from the chain - insertHeaders headerChainInsertFn // Injects a batch of headers into the chain - insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain - insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain - rollback chainRollbackFn // Removes a batch of recently added chain links - dropPeer peerDropFn // Drops a peer for misbehaving - - // Status - synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing - synchronising int32 - notified int32 - - // Channels - newPeerCh chan *peer - headerCh chan dataPack // [eth/62] Channel receiving inbound block headers - bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies - receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts - stateCh chan dataPack // [eth/63] Channel receiving inbound node state data - bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks - receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks - stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks - headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks - - // Cancellation and termination - cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) - cancelCh chan struct{} // Channel to cancel mid-flight syncs - cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers - - quitCh chan struct{} // Quit channel to signal termination - quitLock sync.RWMutex // Lock to prevent double closes - - // Testing hooks - syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run - bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch - receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch - chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) -} - -// New creates a new downloader to fetch hashes and blocks from remote peers. -func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn, - getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, - headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, - insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader { - - dl := &Downloader{ - mode: FullSync, - mux: mux, - queue: newQueue(stateDb), - peers: newPeerSet(), - rttEstimate: uint64(rttMaxEstimate), - rttConfidence: uint64(1000000), - hasHeader: hasHeader, - hasBlockAndState: hasBlockAndState, - getHeader: getHeader, - getBlock: getBlock, - headHeader: headHeader, - headBlock: headBlock, - headFastBlock: headFastBlock, - commitHeadBlock: commitHeadBlock, - getTd: getTd, - insertHeaders: insertHeaders, - insertBlocks: insertBlocks, - insertReceipts: insertReceipts, - rollback: rollback, - dropPeer: dropPeer, - newPeerCh: make(chan *peer, 1), - headerCh: make(chan dataPack, 1), - bodyCh: make(chan dataPack, 1), - receiptCh: make(chan dataPack, 1), - stateCh: make(chan dataPack, 1), - bodyWakeCh: make(chan bool, 1), - receiptWakeCh: make(chan bool, 1), - stateWakeCh: make(chan bool, 1), - headerProcCh: make(chan []*types.Header, 1), - quitCh: make(chan struct{}), - } - go dl.qosTuner() - return dl -} - -// Progress retrieves the synchronisation boundaries, specifically the origin -// block where synchronisation started at (may have failed/suspended); the block -// or header sync is currently at; and the latest known block which the sync targets. -// -// In addition, during the state download phase of fast synchronisation the number -// of processed and the total number of known states are also returned. Otherwise -// these are zero. -func (d *Downloader) Progress() (uint64, uint64, uint64, uint64, uint64) { - // Fetch the pending state count outside of the lock to prevent unforeseen deadlocks - pendingStates := uint64(d.queue.PendingNodeData()) - - // Lock the current stats and return the progress - d.syncStatsLock.RLock() - defer d.syncStatsLock.RUnlock() - - current := uint64(0) - switch d.mode { - case FullSync: - current = d.headBlock().NumberU64() - case FastSync: - current = d.headFastBlock().NumberU64() - case LightSync: - current = d.headHeader().Number.Uint64() - } - return d.syncStatsChainOrigin, current, d.syncStatsChainHeight, d.syncStatsStateDone, d.syncStatsStateDone + pendingStates -} - -// Synchronising returns whether the downloader is currently retrieving blocks. -func (d *Downloader) Synchronising() bool { - return atomic.LoadInt32(&d.synchronising) > 0 -} - -// RegisterPeer injects a new download peer into the set of block source to be -// used for fetching hashes and blocks from. -func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHeadRetrievalFn, - getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, - getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error { - - glog.V(logger.Detail).Infoln("Registering peer", id) - if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil { - glog.V(logger.Error).Infoln("Register failed:", err) - return err - } - d.qosReduceConfidence() - - return nil -} - -// UnregisterPeer remove a peer from the known list, preventing any action from -// the specified peer. An effort is also made to return any pending fetches into -// the queue. -func (d *Downloader) UnregisterPeer(id string) error { - // Unregister the peer from the active peer set and revoke any fetch tasks - glog.V(logger.Detail).Infoln("Unregistering peer", id) - if err := d.peers.Unregister(id); err != nil { - glog.V(logger.Error).Infoln("Unregister failed:", err) - return err - } - d.queue.Revoke(id) - - // If this peer was the master peer, abort sync immediately - d.cancelLock.RLock() - master := id == d.cancelPeer - d.cancelLock.RUnlock() - - if master { - d.cancel() - } - return nil -} - -// Synchronise tries to sync up our local block chain with a remote peer, both -// adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { - glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td) - - err := d.synchronise(id, head, td, mode) - switch err { - case nil: - glog.V(logger.Detail).Infof("Synchronisation completed") - - case errBusy: - glog.V(logger.Detail).Infof("Synchronisation already in progress") - - case errTimeout, errBadPeer, errStallingPeer, - errEmptyHeaderSet, errPeersUnavailable, errTooOld, - errInvalidAncestor, errInvalidChain: - glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err) - d.dropPeer(id) - - default: - glog.V(logger.Warn).Infof("Synchronisation failed: %v", err) - } - return err -} - -// synchronise will select the peer and use it for synchronising. If an empty string is given -// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the -// checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { - // Mock out the synchronisation if testing - if d.synchroniseMock != nil { - return d.synchroniseMock(id, hash) - } - // Make sure only one goroutine is ever allowed past this point at once - if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { - return errBusy - } - defer atomic.StoreInt32(&d.synchronising, 0) - - // Post a user notification of the sync (only once per session) - if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { - glog.V(logger.Info).Infoln("Block synchronisation started") - } - // Reset the queue, peer set and wake channels to clean any internal leftover state - d.queue.Reset() - d.peers.Reset() - - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { - select { - case <-ch: - default: - } - } - for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} { - for empty := false; !empty; { - select { - case <-ch: - default: - empty = true - } - } - } - for empty := false; !empty; { - select { - case <-d.headerProcCh: - default: - empty = true - } - } - // Create cancel channel for aborting mid-flight and mark the master peer - d.cancelLock.Lock() - d.cancelCh = make(chan struct{}) - d.cancelPeer = id - d.cancelLock.Unlock() - - defer d.cancel() // No matter what, we can't leave the cancel channel open - - // Set the requested sync mode, unless it's forbidden - d.mode = mode - if d.mode == FastSync && d.fsPivotFails >= fsCriticalTrials { - d.mode = FullSync - } - // Retrieve the origin peer and initiate the downloading process - p := d.peers.Peer(id) - if p == nil { - return errUnknownPeer - } - return d.syncWithPeer(p, hash, td) -} - -// syncWithPeer starts a block synchronization based on the hash chain from the -// specified peer and head hash. -func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) { - d.mux.Post(StartEvent{}) - defer func() { - // reset on error - if err != nil { - d.mux.Post(FailedEvent{err}) - } else { - d.mux.Post(DoneEvent{}) - } - }() - if p.version < 62 { - return errTooOld - } - - glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version) - defer func(start time.Time) { - glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start)) - }(time.Now()) - - // Look up the sync boundaries: the common ancestor and the target block - latest, err := d.fetchHeight(p) - if err != nil { - return err - } - height := latest.Number.Uint64() - - origin, err := d.findAncestor(p, height) - if err != nil { - return err - } - d.syncStatsLock.Lock() - if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { - d.syncStatsChainOrigin = origin - } - d.syncStatsChainHeight = height - d.syncStatsLock.Unlock() - - // Initiate the sync using a concurrent header and content retrieval algorithm - pivot := uint64(0) - switch d.mode { - case LightSync: - pivot = height - case FastSync: - // Calculate the new fast/slow sync pivot point - if d.fsPivotLock == nil { - pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval))) - if err != nil { - panic(fmt.Sprintf("Failed to access crypto random source: %v", err)) - } - if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() { - pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64() - } - } else { - // Pivot point locked in, use this and do not pick a new one! - pivot = d.fsPivotLock.Number.Uint64() - } - // If the point is below the origin, move origin back to ensure state download - if pivot < origin { - if pivot > 0 { - origin = pivot - 1 - } else { - origin = 0 - } - } - glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot) - } - d.queue.Prepare(origin+1, d.mode, pivot, latest) - if d.syncInitHook != nil { - d.syncInitHook(origin, height) - } - return d.spawnSync(origin+1, - func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved - func() error { return d.processHeaders(origin+1, td) }, // Headers are always retrieved - func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync - func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync - func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync - ) -} - -// spawnSync runs d.process and all given fetcher functions to completion in -// separate goroutines, returning the first error that appears. -func (d *Downloader) spawnSync(origin uint64, fetchers ...func() error) error { - var wg sync.WaitGroup - errc := make(chan error, len(fetchers)+1) - wg.Add(len(fetchers) + 1) - go func() { defer wg.Done(); errc <- d.processContent() }() - for _, fn := range fetchers { - fn := fn - go func() { defer wg.Done(); errc <- fn() }() - } - // Wait for the first error, then terminate the others. - var err error - for i := 0; i < len(fetchers)+1; i++ { - if i == len(fetchers) { - // Close the queue when all fetchers have exited. - // This will cause the block processor to end when - // it has processed the queue. - d.queue.Close() - } - if err = <-errc; err != nil { - break - } - } - d.queue.Close() - d.cancel() - wg.Wait() - return err -} - -// cancel cancels all of the operations and resets the queue. It returns true -// if the cancel operation was completed. -func (d *Downloader) cancel() { - // Close the current cancel channel - d.cancelLock.Lock() - if d.cancelCh != nil { - select { - case <-d.cancelCh: - // Channel was already closed - default: - close(d.cancelCh) - } - } - d.cancelLock.Unlock() -} - -// Terminate interrupts the downloader, canceling all pending operations. -// The downloader cannot be reused after calling Terminate. -func (d *Downloader) Terminate() { - // Close the termination channel (make sure double close is allowed) - d.quitLock.Lock() - select { - case <-d.quitCh: - default: - close(d.quitCh) - } - d.quitLock.Unlock() - - // Cancel any pending download requests - d.cancel() -} - -// fetchHeight retrieves the head header of the remote peer to aid in estimating -// the total time a pending synchronisation would take. -func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) { - glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p) - - // Request the advertised remote head block and wait for the response - head, _ := p.currentHead() - go p.getRelHeaders(head, 1, 0, false) - - timeout := time.After(d.requestTTL()) - for { - select { - case <-d.cancelCh: - return nil, errCancelBlockFetch - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) != 1 { - glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers)) - return nil, errBadPeer - } - return headers[0], nil - - case <-timeout: - glog.V(logger.Debug).Infof("%v: head header timeout", p) - return nil, errTimeout - - case <-d.bodyCh: - case <-d.stateCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } - } -} - -// findAncestor tries to locate the common ancestor link of the local chain and -// a remote peers blockchain. In the general case when our node was in sync and -// on the correct chain, checking the top N links should already get us a match. -// In the rare scenario when we ended up on a long reorganisation (i.e. none of -// the head links match), we do a binary search to find the common ancestor. -func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { - glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height) - - // Figure out the valid ancestor range to prevent rewrite attacks - floor, ceil := int64(-1), d.headHeader().Number.Uint64() - if d.mode == FullSync { - ceil = d.headBlock().NumberU64() - } else if d.mode == FastSync { - ceil = d.headFastBlock().NumberU64() - } - if ceil >= MaxForkAncestry { - floor = int64(ceil - MaxForkAncestry) - } - // Request the topmost blocks to short circuit binary ancestor lookup - head := ceil - if head > height { - head = height - } - from := int64(head) - int64(MaxHeaderFetch) - if from < 0 { - from = 0 - } - // Span out with 15 block gaps into the future to catch bad head reports - limit := 2 * MaxHeaderFetch / 16 - count := 1 + int((int64(ceil)-from)/16) - if count > limit { - count = limit - } - go p.getAbsHeaders(uint64(from), count, 15, false) - - // Wait for the remote response to the head fetch - number, hash := uint64(0), common.Hash{} - timeout := time.After(d.requestTTL()) - - for finished := false; !finished; { - select { - case <-d.cancelCh: - return 0, errCancelHeaderFetch - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) == 0 { - glog.V(logger.Warn).Infof("%v: empty head header set", p) - return 0, errEmptyHeaderSet - } - // Make sure the peer's reply conforms to the request - for i := 0; i < len(headers); i++ { - if number := headers[i].Number.Int64(); number != from+int64(i)*16 { - glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number) - return 0, errInvalidChain - } - } - // Check if a common ancestor was found - finished = true - for i := len(headers) - 1; i >= 0; i-- { - - // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { - continue - } - // Otherwise check if we already know the header or not - if (d.mode == FullSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) { - number, hash = headers[i].Number.Uint64(), headers[i].Hash() - - // If every header is known, even future ones, the peer straight out lied about its head - if number > height && i == limit-1 { - glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number) - return 0, errStallingPeer - } - break - } - } - - case <-timeout: - glog.V(logger.Debug).Infof("%v: head header timeout", p) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.stateCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } - } - // If the head fetch already found an ancestor, return - if !common.EmptyHash(hash) { - if int64(number) <= floor { - glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor) - return 0, errInvalidAncestor - } - glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4]) - return number, nil - } - // Ancestor not found, we need to binary search over our chain - start, end := uint64(0), head - if floor > 0 { - start = uint64(floor) - } - for start+1 < end { - // Split our chain interval in two, and request the hash to cross check - check := (start + end) / 2 - - timeout := time.After(d.requestTTL()) - go p.getAbsHeaders(uint64(check), 1, 0, false) - - // Wait until a reply arrives to this request - for arrived := false; !arrived; { - select { - case <-d.cancelCh: - return 0, errCancelHeaderFetch - - case packer := <-d.headerCh: - // Discard anything not from the origin peer - if packer.PeerId() != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packer.(*headerPack).headers - if len(headers) != 1 { - glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers)) - return 0, errBadPeer - } - arrived = true - - // Modify the search interval based on the response - if (d.mode == FullSync && !d.hasBlockAndState(headers[0].Hash())) || (d.mode != FullSync && !d.hasHeader(headers[0].Hash())) { - end = check - break - } - header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists - if header.Number.Uint64() != check { - glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check) - return 0, errBadPeer - } - start = check - - case <-timeout: - glog.V(logger.Debug).Infof("%v: search header timeout", p) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.stateCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } - } - } - // Ensure valid ancestry and return - if int64(start) <= floor { - glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor) - return 0, errInvalidAncestor - } - glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4]) - return start, nil -} - -// fetchHeaders keeps retrieving headers concurrently from the number -// requested, until no more are returned, potentially throttling on the way. To -// facilitate concurrency but still protect against malicious nodes sending bad -// headers, we construct a header chain skeleton using the "origin" peer we are -// syncing with, and fill in the missing headers using anyone else. Headers from -// other peers are only accepted if they map cleanly to the skeleton. If no one -// can fill in the skeleton - not even the origin peer - it's assumed invalid and -// the origin is dropped. -func (d *Downloader) fetchHeaders(p *peer, from uint64) error { - glog.V(logger.Debug).Infof("%v: directing header downloads from #%d", p, from) - defer glog.V(logger.Debug).Infof("%v: header download terminated", p) - - // Create a timeout timer, and the associated header fetcher - skeleton := true // Skeleton assembly phase or finishing up - request := time.Now() // time of the last skeleton fetch request - timeout := time.NewTimer(0) // timer to dump a non-responsive active peer - <-timeout.C // timeout channel should be initially empty - defer timeout.Stop() - - getHeaders := func(from uint64) { - request = time.Now() - timeout.Reset(d.requestTTL()) - - if skeleton { - glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from) - go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - } else { - glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from) - go p.getAbsHeaders(from, MaxHeaderFetch, 0, false) - } - } - // Start pulling the header chain skeleton until all is done - getHeaders(from) - - for { - select { - case <-d.cancelCh: - return errCancelHeaderFetch - - case packet := <-d.headerCh: - // Make sure the active peer is giving us the skeleton headers - if packet.PeerId() != p.id { - glog.V(logger.Debug).Infof("Received skeleton headers from incorrect peer (%s)", packet.PeerId()) - break - } - headerReqTimer.UpdateSince(request) - timeout.Stop() - - // If the skeleton's finished, pull any remaining head headers directly from the origin - if packet.Items() == 0 && skeleton { - skeleton = false - getHeaders(from) - continue - } - // If no more headers are inbound, notify the content fetchers and return - if packet.Items() == 0 { - glog.V(logger.Debug).Infof("%v: no available headers", p) - select { - case d.headerProcCh <- nil: - return nil - case <-d.cancelCh: - return errCancelHeaderFetch - } - } - headers := packet.(*headerPack).headers - - // If we received a skeleton batch, resolve internals concurrently - if skeleton { - filled, proced, err := d.fillHeaderSkeleton(from, headers) - if err != nil { - glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err) - return errInvalidChain - } - headers = filled[proced:] - from += uint64(proced) - } - // Insert all the new headers and fetch the next batch - if len(headers) > 0 { - glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) - select { - case d.headerProcCh <- headers: - case <-d.cancelCh: - return errCancelHeaderFetch - } - from += uint64(len(headers)) - } - getHeaders(from) - - case <-timeout.C: - // Header retrieval timed out, consider the peer bad and drop - glog.V(logger.Debug).Infof("%v: header request timed out", p) - headerTimeoutMeter.Mark(1) - d.dropPeer(p.id) - - // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - select { - case d.headerProcCh <- nil: - case <-d.cancelCh: - } - return errBadPeer - } - } -} - -// fillHeaderSkeleton concurrently retrieves headers from all our available peers -// and maps them to the provided skeleton header chain. -// -// Any partial results from the beginning of the skeleton is (if possible) forwarded -// immediately to the header processor to keep the rest of the pipeline full even -// in the case of header stalls. -// -// The method returs the entire filled skeleton and also the number of headers -// already forwarded for processing. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { - glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from) - d.queue.ScheduleSkeleton(from, skeleton) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*headerPack) - return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) - } - expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } - throttle = func() bool { return false } - reserve = func(p *peer, count int) (*fetchRequest, bool, error) { - return d.queue.ReserveHeaders(p, count), false, nil - } - fetch = func(p *peer, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } - capacity = func(p *peer) int { return p.HeaderCapacity(d.requestRTT()) } - setIdle = func(p *peer, accepted int) { p.SetHeadersIdle(accepted) } - ) - err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, - d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, - nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header") - - glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err) - - filled, proced := d.queue.RetrieveHeaders() - return filled, proced, err -} - -// fetchBodies iteratively downloads the scheduled block bodies, taking any -// available peers, reserving a chunk of blocks for each, waiting for delivery -// and also periodically checking for timeouts. -func (d *Downloader) fetchBodies(from uint64) error { - glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*bodyPack) - return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) - } - expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } - fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) } - capacity = func(p *peer) int { return p.BlockCapacity(d.requestRTT()) } - setIdle = func(p *peer, accepted int) { p.SetBodiesIdle(accepted) } - ) - err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, - d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, - d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body") - - glog.V(logger.Debug).Infof("Block body download terminated: %v", err) - return err -} - -// fetchReceipts iteratively downloads the scheduled block receipts, taking any -// available peers, reserving a chunk of receipts for each, waiting for delivery -// and also periodically checking for timeouts. -func (d *Downloader) fetchReceipts(from uint64) error { - glog.V(logger.Debug).Infof("Downloading receipts from #%d", from) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*receiptPack) - return d.queue.DeliverReceipts(pack.peerId, pack.receipts) - } - expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } - fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) } - capacity = func(p *peer) int { return p.ReceiptCapacity(d.requestRTT()) } - setIdle = func(p *peer, accepted int) { p.SetReceiptsIdle(accepted) } - ) - err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, - d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, - d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") - - glog.V(logger.Debug).Infof("Receipt download terminated: %v", err) - return err -} - -// fetchNodeData iteratively downloads the scheduled state trie nodes, taking any -// available peers, reserving a chunk of nodes for each, waiting for delivery and -// also periodically checking for timeouts. -func (d *Downloader) fetchNodeData() error { - glog.V(logger.Debug).Infof("Downloading node state data") - - var ( - deliver = func(packet dataPack) (int, error) { - start := time.Now() - return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) { - // If the peer returned old-requested data, forgive - if err == trie.ErrNotRequested { - glog.V(logger.Info).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId()) - return - } - if err != nil { - // If the node data processing failed, the root hash is very wrong, abort - glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err) - d.cancel() - return - } - // Processing succeeded, notify state fetcher of continuation - pending := d.queue.PendingNodeData() - if pending > 0 { - select { - case d.stateWakeCh <- true: - default: - } - } - d.syncStatsLock.Lock() - d.syncStatsStateDone += uint64(delivered) - d.syncStatsLock.Unlock() - - // Log a message to the user and return - if delivered > 0 { - glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d, pending at least %d", delivered, time.Since(start), d.syncStatsStateDone, pending) - } - }) - } - expire = func() map[string]int { return d.queue.ExpireNodeData(d.requestTTL()) } - throttle = func() bool { return false } - reserve = func(p *peer, count int) (*fetchRequest, bool, error) { - return d.queue.ReserveNodeData(p, count), false, nil - } - fetch = func(p *peer, req *fetchRequest) error { return p.FetchNodeData(req) } - capacity = func(p *peer) int { return p.NodeDataCapacity(d.requestRTT()) } - setIdle = func(p *peer, accepted int) { p.SetNodeDataIdle(accepted) } - ) - err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire, - d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch, - d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State") - - glog.V(logger.Debug).Infof("Node state data download terminated: %v", err) - return err -} - -// fetchParts iteratively downloads scheduled block parts, taking any available -// peers, reserving a chunk of fetch requests for each, waiting for delivery and -// also periodically checking for timeouts. -// -// As the scheduling/timeout logic mostly is the same for all downloaded data -// types, this method is used by each for data gathering and is instrumented with -// various callbacks to handle the slight differences between processing them. -// -// The instrumentation parameters: -// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) -// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) -// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) -// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) -// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) -// - pending: task callback for the number of requests still needing download (detect completion/non-completability) -// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) -// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) -// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) -// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) -// - fetch: network callback to actually send a particular download request to a physical remote peer -// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) -// - capacity: network callback to retreive the estimated type-specific bandwidth capacity of a peer (traffic shaping) -// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks -// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) -// - kind: textual label of the type being downloaded to display in log mesages -func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, - expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), - fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, - idle func() ([]*peer, int), setIdle func(*peer, int), kind string) error { - - // Create a ticker to detect expired retrieval tasks - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - update := make(chan struct{}, 1) - - // Prepare the queue and fetch block parts until the block header fetcher's done - finished := false - for { - select { - case <-d.cancelCh: - return errCancel - - case packet := <-deliveryCh: - // If the peer was previously banned and failed to deliver it's pack - // in a reasonable time frame, ignore it's message. - if peer := d.peers.Peer(packet.PeerId()); peer != nil { - // Deliver the received chunk of data and check chain validity - accepted, err := deliver(packet) - if err == errInvalidChain { - return err - } - // Unless a peer delivered something completely else than requested (usually - // caused by a timed out request which came through in the end), set it to - // idle. If the delivery's stale, the peer should have already been idled. - if err != errStaleDelivery { - setIdle(peer, accepted) - } - // Issue a log to the user to see what's going on - switch { - case err == nil && packet.Items() == 0: - glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind)) - case err == nil: - glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)) - default: - glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err) - } - } - // Blocks assembled, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case cont := <-wakeCh: - // The header fetcher sent a continuation flag, check if it's done - if !cont { - finished = true - } - // Headers arrive, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case <-ticker.C: - // Sanity check update the progress - select { - case update <- struct{}{}: - default: - } - - case <-update: - // Short circuit if we lost all our peers - if d.peers.Len() == 0 { - return errNoPeers - } - // Check for fetch request timeouts and demote the responsible peers - for pid, fails := range expire() { - if peer := d.peers.Peer(pid); peer != nil { - // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps - // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times - // out that sync wise we need to get rid of the peer. - // - // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth - // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing - // how response times reacts, to it always requests one more than the minimum (i.e. min 2). - if fails > 2 { - glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind)) - setIdle(peer, 0) - } else { - glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind)) - d.dropPeer(pid) - } - } - } - // If there's nothing more to fetch, wait or terminate - if pending() == 0 { - if !inFlight() && finished { - glog.V(logger.Debug).Infof("%s fetching completed", kind) - return nil - } - break - } - // Send a download request to all idle peers, until throttled - progressed, throttled, running := false, false, inFlight() - idles, total := idle() - - for _, peer := range idles { - // Short circuit if throttling activated - if throttle() { - throttled = true - break - } - // Reserve a chunk of fetches for a peer. A nil can mean either that - // no more headers are available, or that the peer is known not to - // have them. - request, progress, err := reserve(peer, capacity(peer)) - if err != nil { - return err - } - if progress { - progressed = true - } - if request == nil { - continue - } - if glog.V(logger.Detail) { - if request.From > 0 { - glog.Infof("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From) - } else if len(request.Headers) > 0 { - glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number) - } else { - glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind)) - } - } - // Fetch the chunk and make sure any errors return the hashes to the queue - if fetchHook != nil { - fetchHook(request.Headers) - } - if err := fetch(peer, request); err != nil { - // Although we could try and make an attempt to fix this, this error really - // means that we've double allocated a fetch task to a peer. If that is the - // case, the internal state of the downloader and the queue is very wrong so - // better hard crash and note the error instead of silently accumulating into - // a much bigger issue. - panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, strings.ToLower(kind))) - } - running = true - } - // Make sure that we have peers available for fetching. If all peers have been tried - // and all failed throw an error - if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { - return errPeersUnavailable - } - } - } -} - -// processHeaders takes batches of retrieved headers from an input channel and -// keeps processing and scheduling them into the header chain and downloader's -// queue until the stream ends or a failure occurs. -func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { - // Calculate the pivoting point for switching from fast to slow sync - pivot := d.queue.FastSyncPivot() - - // Keep a count of uncertain headers to roll back - rollback := []*types.Header{} - defer func() { - if len(rollback) > 0 { - // Flatten the headers and roll them back - hashes := make([]common.Hash, len(rollback)) - for i, header := range rollback { - hashes[i] = header.Hash() - } - lastHeader, lastFastBlock, lastBlock := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number() - d.rollback(hashes) - glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", - len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, d.headFastBlock().Number(), lastBlock, d.headBlock().Number()) - - // If we're already past the pivot point, this could be an attack, thread carefully - if rollback[len(rollback)-1].Number.Uint64() > pivot { - // If we didn't ever fail, lock in te pivot header (must! not! change!) - if d.fsPivotFails == 0 { - for _, header := range rollback { - if header.Number.Uint64() == pivot { - glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]) - d.fsPivotLock = header - } - } - } - d.fsPivotFails++ - } - } - }() - - // Wait for batches of headers to process - gotHeaders := false - - for { - select { - case <-d.cancelCh: - return errCancelHeaderProcessing - - case headers := <-d.headerProcCh: - // Terminate header processing if we synced up - if len(headers) == 0 { - // Notify everyone that headers are fully processed - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - // If no headers were retrieved at all, the peer violated it's TD promise that it had a - // better chain compared to ours. The only exception is if it's promised blocks were - // already imported by other means (e.g. fecher): - // - // R , L : Both at block 10 - // R: Mine block 11, and propagate it to L - // L: Queue block 11 for import - // L: Notice that R's head and TD increased compared to ours, start sync - // L: Import of block 11 finishes - // L: Sync begins, and finds common ancestor at 11 - // L: Request new headers up from 11 (R's TD was higher, it must have something) - // R: Nothing to give - if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { - return errStallingPeer - } - // If fast or light syncing, ensure promised headers are indeed delivered. This is - // needed to detect scenarios where an attacker feeds a bad pivot and then bails out - // of delivering the post-pivot blocks that would flag the invalid content. - // - // This check cannot be executed "as is" for full imports, since blocks may still be - // queued for processing when the header download completes. However, as long as the - // peer gave us something useful, we're already happy/progressed (above check). - if d.mode == FastSync || d.mode == LightSync { - if td.Cmp(d.getTd(d.headHeader().Hash())) > 0 { - return errStallingPeer - } - } - // Disable any rollback and return - rollback = nil - return nil - } - // Otherwise split the chunk of headers into batches and process them - gotHeaders = true - - for len(headers) > 0 { - // Terminate if something failed in between processing chunks - select { - case <-d.cancelCh: - return errCancelHeaderProcessing - default: - } - // Select the next chunk of headers to import - limit := maxHeadersProcess - if limit > len(headers) { - limit = len(headers) - } - chunk := headers[:limit] - - // In case of header only syncing, validate the chunk immediately - if d.mode == FastSync || d.mode == LightSync { - // Collect the yet unknown headers to mark them as uncertain - unknown := make([]*types.Header, 0, len(headers)) - for _, header := range chunk { - if !d.hasHeader(header.Hash()) { - unknown = append(unknown, header) - } - } - // If we're importing pure headers, verify based on their recentness - frequency := fsHeaderCheckFrequency - if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { - frequency = 1 - } - if n, err := d.insertHeaders(chunk, frequency); err != nil { - // If some headers were inserted, add them too to the rollback list - if n > 0 { - rollback = append(rollback, chunk[:n]...) - } - glog.V(logger.Debug).Infof("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err) - return errInvalidChain - } - // All verifications passed, store newly found uncertain headers - rollback = append(rollback, unknown...) - if len(rollback) > fsHeaderSafetyNet { - rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) - } - } - // If we're fast syncing and just pulled in the pivot, make sure it's the one locked in - if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot { - if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() { - glog.V(logger.Warn).Infof("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]) - return errInvalidChain - } - } - // Unless we're doing light chains, schedule the headers for associated content retrieval - if d.mode == FullSync || d.mode == FastSync { - // If we've reached the allowed number of pending headers, stall a bit - for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { - select { - case <-d.cancelCh: - return errCancelHeaderProcessing - case <-time.After(time.Second): - } - } - // Otherwise insert the headers for content retrieval - inserts := d.queue.Schedule(chunk, origin) - if len(inserts) != len(chunk) { - glog.V(logger.Debug).Infof("stale headers") - return errBadPeer - } - } - headers = headers[limit:] - origin += uint64(limit) - } - // Signal the content downloaders of the availablility of new tasks - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { - select { - case ch <- true: - default: - } - } - } - } -} - -// processContent takes fetch results from the queue and tries to import them -// into the chain. The type of import operation will depend on the result contents. -func (d *Downloader) processContent() error { - pivot := d.queue.FastSyncPivot() - for { - results := d.queue.WaitResults() - if len(results) == 0 { - return nil // queue empty - } - if d.chainInsertHook != nil { - d.chainInsertHook(results) - } - // Actually import the blocks - if glog.V(logger.Debug) { - first, last := results[0].Header, results[len(results)-1].Header - glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4]) - } - for len(results) != 0 { - // Check for any termination requests - select { - case <-d.quitCh: - return errCancelContentProcessing - default: - } - // Retrieve the a batch of results to import - var ( - blocks = make([]*types.Block, 0, maxResultsProcess) - receipts = make([]types.Receipts, 0, maxResultsProcess) - ) - items := int(math.Min(float64(len(results)), float64(maxResultsProcess))) - for _, result := range results[:items] { - switch { - case d.mode == FullSync: - blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) - case d.mode == FastSync: - blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) - if result.Header.Number.Uint64() <= pivot { - receipts = append(receipts, result.Receipts) - } - } - } - // Try to process the results, aborting if there's an error - var ( - err error - index int - ) - switch { - case len(receipts) > 0: - index, err = d.insertReceipts(blocks, receipts) - if err == nil && blocks[len(blocks)-1].NumberU64() == pivot { - glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]) - index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash()) - } - default: - index, err = d.insertBlocks(blocks) - } - if err != nil { - glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err) - return errInvalidChain - } - // Shift the results to the next batch - results = results[items:] - } - } -} - -// DeliverHeaders injects a new batch of block headers received from a remote -// node into the download schedule. -func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { - return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) -} - -// DeliverBodies injects a new batch of block bodies received from a remote node. -func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { - return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) -} - -// DeliverReceipts injects a new batch of receipts received from a remote node. -func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { - return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) -} - -// DeliverNodeData injects a new batch of node state data received from a remote node. -func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { - return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) -} - -// deliver injects a new batch of data received from a remote node. -func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { - // Update the delivery metrics for both good and failed deliveries - inMeter.Mark(int64(packet.Items())) - defer func() { - if err != nil { - dropMeter.Mark(int64(packet.Items())) - } - }() - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - if cancel == nil { - return errNoSyncActive - } - select { - case destCh <- packet: - return nil - case <-cancel: - return errNoSyncActive - } -} - -// qosTuner is the quality of service tuning loop that occasionally gathers the -// peer latency statistics and updates the estimated request round trip time. -func (d *Downloader) qosTuner() { - for { - // Retrieve the current median RTT and integrate into the previoust target RTT - rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) - atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) - - // A new RTT cycle passed, increase our confidence in the estimated RTT - conf := atomic.LoadUint64(&d.rttConfidence) - conf = conf + (1000000-conf)/2 - atomic.StoreUint64(&d.rttConfidence, conf) - - // Log the new QoS values and sleep until the next RTT - glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()) - select { - case <-d.quitCh: - return - case <-time.After(rtt): - } - } -} - -// qosReduceConfidence is meant to be called when a new peer joins the downloader's -// peer set, needing to reduce the confidence we have in out QoS estimates. -func (d *Downloader) qosReduceConfidence() { - // If we have a single peer, confidence is always 1 - peers := uint64(d.peers.Len()) - if peers == 1 { - atomic.StoreUint64(&d.rttConfidence, 1000000) - return - } - // If we have a ton of peers, don't drop confidence) - if peers >= uint64(qosConfidenceCap) { - return - } - // Otherwise drop the confidence factor - conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers - if float64(conf)/1000000 < rttMinConfidence { - conf = uint64(rttMinConfidence * 1000000) - } - atomic.StoreUint64(&d.rttConfidence, conf) - - rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) - glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()) -} - -// requestRTT returns the current target round trip time for a download request -// to complete in. -// -// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that -// the downloader tries to adapt queries to the RTT, so multiple RTT values can -// be adapted to, but smaller ones are preffered (stabler download stream). -func (d *Downloader) requestRTT() time.Duration { - return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 -} - -// requestTTL returns the current timeout allowance for a single download request -// to finish under. -func (d *Downloader) requestTTL() time.Duration { - var ( - rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) - conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 - ) - ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) - if ttl > ttlLimit { - ttl = ttlLimit - } - return ttl -} diff --git a/exp/downloader/downloader_test.go b/exp/downloader/downloader_test.go deleted file mode 100644 index c0fa3267593b6..0000000000000 --- a/exp/downloader/downloader_test.go +++ /dev/null @@ -1,1746 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package downloader - -import ( - "errors" - "fmt" - "math/big" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/params" - "github.com/expanse-org/go-expanse/trie" -) - -var ( - testdb, _ = ethdb.NewMemDatabase() - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddress = crypto.PubkeyToAddress(testKey.PublicKey) - genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) -) - -// Reduce some of the parameters to make the tester faster. -func init() { - MaxForkAncestry = uint64(10000) - blockCacheLimit = 1024 -} - -// makeChain creates a chain of n blocks starting at and including parent. -// the returned hash chain is ordered head->parent. In addition, every 3rd block -// contains a transaction and every 5th an uncle to allow testing correct block -// reassembly. -func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) { - // Generate the block chain - blocks, receipts := core.GenerateChain(nil, parent, testdb, n, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{seed}) - - // If a heavy chain is requested, delay blocks to raise difficulty - if heavy { - block.OffsetTime(-1) - } - // If the block number is multiple of 3, send a bonus transaction to the miner - if parent == genesis && i%3 == 0 { - tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - // If the block number is a multiple of 5, add a bonus uncle to the block - if i > 0 && i%5 == 0 { - block.AddUncle(&types.Header{ - ParentHash: block.PrevBlock(i - 1).Hash(), - Number: big.NewInt(block.Number().Int64() - 1), - }) - } - }) - // Convert the block-chain into a hash-chain and header/block maps - hashes := make([]common.Hash, n+1) - hashes[len(hashes)-1] = parent.Hash() - - headerm := make(map[common.Hash]*types.Header, n+1) - headerm[parent.Hash()] = parent.Header() - - blockm := make(map[common.Hash]*types.Block, n+1) - blockm[parent.Hash()] = parent - - receiptm := make(map[common.Hash]types.Receipts, n+1) - receiptm[parent.Hash()] = parentReceipts - - for i, b := range blocks { - hashes[len(hashes)-i-2] = b.Hash() - headerm[b.Hash()] = b.Header() - blockm[b.Hash()] = b - receiptm[b.Hash()] = receipts[i] - } - return hashes, headerm, blockm, receiptm -} - -// makeChainFork creates two chains of length n, such that h1[:f] and -// h2[:f] are different but have a common suffix of length n-f. -func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) { - // Create the common suffix - hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts, false) - - // Create the forks, making the second heavyer if non balanced forks were requested - hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false) - hashes1 = append(hashes1, hashes[1:]...) - - heavy := false - if !balanced { - heavy = true - } - hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy) - hashes2 = append(hashes2, hashes[1:]...) - - for hash, header := range headers { - headers1[hash] = header - headers2[hash] = header - } - for hash, block := range blocks { - blocks1[hash] = block - blocks2[hash] = block - } - for hash, receipt := range receipts { - receipts1[hash] = receipt - receipts2[hash] = receipt - } - return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2 -} - -// downloadTester is a test simulator for mocking out local block chain. -type downloadTester struct { - stateDb ethdb.Database - downloader *Downloader - - ownHashes []common.Hash // Hash chain belonging to the tester - ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester - ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester - ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester - ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain - - peerHashes map[string][]common.Hash // Hash chain belonging to different test peers - peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers - peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers - peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers - peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains - - peerMissingStates map[string]map[common.Hash]bool // State entries that fast sync should not return - - lock sync.RWMutex -} - -// newTester creates a new downloader test mocker. -func newTester() *downloadTester { - tester := &downloadTester{ - ownHashes: []common.Hash{genesis.Hash()}, - ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, - ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, - ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil}, - ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, - peerHashes: make(map[string][]common.Hash), - peerHeaders: make(map[string]map[common.Hash]*types.Header), - peerBlocks: make(map[string]map[common.Hash]*types.Block), - peerReceipts: make(map[string]map[common.Hash]types.Receipts), - peerChainTds: make(map[string]map[common.Hash]*big.Int), - peerMissingStates: make(map[string]map[common.Hash]bool), - } - tester.stateDb, _ = ethdb.NewMemDatabase() - tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00}) - - tester.downloader = New(tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, - tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd, - tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.rollback, tester.dropPeer) - - return tester -} - -// terminate aborts any operations on the embedded downloader and releases all -// held resources. -func (dl *downloadTester) terminate() { - dl.downloader.Terminate() -} - -// sync starts synchronizing with a remote peer, blocking until it completes. -func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - dl.lock.RLock() - hash := dl.peerHashes[id][0] - // If no particular TD was requested, load from the peer's blockchain - if td == nil { - td = big.NewInt(1) - if diff, ok := dl.peerChainTds[id][hash]; ok { - td = diff - } - } - dl.lock.RUnlock() - - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, hash, td, mode) - select { - case <-dl.downloader.cancelCh: - // Ok, downloader fully cancelled after sync cycle - default: - // Downloader is still accepting packets, can block a peer up - panic("downloader active post sync cycle") // panic will be caught by tester - } - return err -} - -// hasHeader checks if a header is present in the testers canonical chain. -func (dl *downloadTester) hasHeader(hash common.Hash) bool { - return dl.getHeader(hash) != nil -} - -// hasBlock checks if a block and associated state is present in the testers canonical chain. -func (dl *downloadTester) hasBlock(hash common.Hash) bool { - block := dl.getBlock(hash) - if block == nil { - return false - } - _, err := dl.stateDb.Get(block.Root().Bytes()) - return err == nil -} - -// getHeader retrieves a header from the testers canonical chain. -func (dl *downloadTester) getHeader(hash common.Hash) *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.ownHeaders[hash] -} - -// getBlock retrieves a block from the testers canonical chain. -func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.ownBlocks[hash] -} - -// headHeader retrieves the current head header from the canonical chain. -func (dl *downloadTester) headHeader() *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { - return header - } - } - return genesis.Header() -} - -// headBlock retrieves the current head block from the canonical chain. -func (dl *downloadTester) headBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { - return block - } - } - } - return genesis -} - -// headFastBlock retrieves the current head fast-sync block from the canonical chain. -func (dl *downloadTester) headFastBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - return block - } - } - return genesis -} - -// commitHeadBlock manually sets the head block to a given hash. -func (dl *downloadTester) commitHeadBlock(hash common.Hash) error { - // For now only check that the state trie is correct - if block := dl.getBlock(hash); block != nil { - _, err := trie.NewSecure(block.Root(), dl.stateDb) - return err - } - return fmt.Errorf("non existent block: %x", hash[:4]) -} - -// getTd retrieves the block's total difficulty from the canonical chain. -func (dl *downloadTester) getTd(hash common.Hash) *big.Int { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.ownChainTd[hash] -} - -// insertHeaders injects a new batch of headers into the simulated chain. -func (dl *downloadTester) insertHeaders(headers []*types.Header, checkFreq int) (int, error) { - dl.lock.Lock() - defer dl.lock.Unlock() - - // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors - if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { - return 0, errors.New("unknown parent") - } - for i := 1; i < len(headers); i++ { - if headers[i].ParentHash != headers[i-1].Hash() { - return i, errors.New("unknown parent") - } - } - // Do a full insert if pre-checks passed - for i, header := range headers { - if _, ok := dl.ownHeaders[header.Hash()]; ok { - continue - } - if _, ok := dl.ownHeaders[header.ParentHash]; !ok { - return i, errors.New("unknown parent") - } - dl.ownHashes = append(dl.ownHashes, header.Hash()) - dl.ownHeaders[header.Hash()] = header - dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) - } - return len(headers), nil -} - -// insertBlocks injects a new batch of blocks into the simulated chain. -func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) { - dl.lock.Lock() - defer dl.lock.Unlock() - - for i, block := range blocks { - if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { - return i, errors.New("unknown parent") - } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { - return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err) - } - if _, ok := dl.ownHeaders[block.Hash()]; !ok { - dl.ownHashes = append(dl.ownHashes, block.Hash()) - dl.ownHeaders[block.Hash()] = block.Header() - } - dl.ownBlocks[block.Hash()] = block - dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) - dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) - } - return len(blocks), nil -} - -// insertReceipts injects a new batch of blocks into the simulated chain. -func (dl *downloadTester) insertReceipts(blocks types.Blocks, receipts []types.Receipts) (int, error) { - dl.lock.Lock() - defer dl.lock.Unlock() - - for i := 0; i < len(blocks) && i < len(receipts); i++ { - if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { - return i, errors.New("unknown owner") - } - if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { - return i, errors.New("unknown parent") - } - dl.ownBlocks[blocks[i].Hash()] = blocks[i] - dl.ownReceipts[blocks[i].Hash()] = receipts[i] - } - return len(blocks), nil -} - -// rollback removes some recently added elements from the chain. -func (dl *downloadTester) rollback(hashes []common.Hash) { - dl.lock.Lock() - defer dl.lock.Unlock() - - for i := len(hashes) - 1; i >= 0; i-- { - if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { - dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] - } - delete(dl.ownChainTd, hashes[i]) - delete(dl.ownHeaders, hashes[i]) - delete(dl.ownReceipts, hashes[i]) - delete(dl.ownBlocks, hashes[i]) - } -} - -// newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error { - return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0) -} - -// newSlowPeer registers a new block download source into the downloader, with a -// specific delay time on processing the network packets sent to it, simulating -// potentially slow network IO. -func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error { - dl.lock.Lock() - defer dl.lock.Unlock() - - var err error - switch version { - case 62: - err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil) - case 63: - err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay)) - case 64: - err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay)) - } - if err == nil { - // Assign the owned hashes, headers and blocks to the peer (deep copy) - dl.peerHashes[id] = make([]common.Hash, len(hashes)) - copy(dl.peerHashes[id], hashes) - - dl.peerHeaders[id] = make(map[common.Hash]*types.Header) - dl.peerBlocks[id] = make(map[common.Hash]*types.Block) - dl.peerReceipts[id] = make(map[common.Hash]types.Receipts) - dl.peerChainTds[id] = make(map[common.Hash]*big.Int) - dl.peerMissingStates[id] = make(map[common.Hash]bool) - - genesis := hashes[len(hashes)-1] - if header := headers[genesis]; header != nil { - dl.peerHeaders[id][genesis] = header - dl.peerChainTds[id][genesis] = header.Difficulty - } - if block := blocks[genesis]; block != nil { - dl.peerBlocks[id][genesis] = block - dl.peerChainTds[id][genesis] = block.Difficulty() - } - - for i := len(hashes) - 2; i >= 0; i-- { - hash := hashes[i] - - if header, ok := headers[hash]; ok { - dl.peerHeaders[id][hash] = header - if _, ok := dl.peerHeaders[id][header.ParentHash]; ok { - dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash]) - } - } - if block, ok := blocks[hash]; ok { - dl.peerBlocks[id][hash] = block - if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok { - dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()]) - } - } - if receipt, ok := receipts[hash]; ok { - dl.peerReceipts[id][hash] = receipt - } - } - } - return err -} - -// dropPeer simulates a hard peer removal from the connection pool. -func (dl *downloadTester) dropPeer(id string) { - dl.lock.Lock() - defer dl.lock.Unlock() - - delete(dl.peerHashes, id) - delete(dl.peerHeaders, id) - delete(dl.peerBlocks, id) - delete(dl.peerChainTds, id) - - dl.downloader.UnregisterPeer(id) -} - -// peerCurrentHeadFn constructs a function to retrieve a peer's current head hash -// and total difficulty. -func (dl *downloadTester) peerCurrentHeadFn(id string) func() (common.Hash, *big.Int) { - return func() (common.Hash, *big.Int) { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.peerHashes[id][0], nil - } -} - -// peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dl *downloadTester) peerGetRelHeadersFn(id string, delay time.Duration) func(common.Hash, int, int, bool) error { - return func(origin common.Hash, amount int, skip int, reverse bool) error { - // Find the canonical number of the hash - dl.lock.RLock() - number := uint64(0) - for num, hash := range dl.peerHashes[id] { - if hash == origin { - number = uint64(len(dl.peerHashes[id]) - num - 1) - break - } - } - dl.lock.RUnlock() - - // Use the absolute header fetcher to satisfy the query - return dl.peerGetAbsHeadersFn(id, delay)(number, amount, skip, reverse) - } -} - -// peerGetAbsHeadersFn constructs a GetBlockHeaders function based on a numbered -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) func(uint64, int, int, bool) error { - return func(origin uint64, amount int, skip int, reverse bool) error { - time.Sleep(delay) - - dl.lock.RLock() - defer dl.lock.RUnlock() - - // Gather the next batch of headers - hashes := dl.peerHashes[id] - headers := dl.peerHeaders[id] - result := make([]*types.Header, 0, amount) - for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ { - if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok { - result = append(result, header) - } - } - // Delay delivery a bit to allow attacks to unfold - go func() { - time.Sleep(time.Millisecond) - dl.downloader.DeliverHeaders(id, result) - }() - return nil - } -} - -// peerGetBodiesFn constructs a getBlockBodies method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block bodies from the particularly requested peer. -func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([]common.Hash) error { - return func(hashes []common.Hash) error { - time.Sleep(delay) - - dl.lock.RLock() - defer dl.lock.RUnlock() - - blocks := dl.peerBlocks[id] - - transactions := make([][]*types.Transaction, 0, len(hashes)) - uncles := make([][]*types.Header, 0, len(hashes)) - - for _, hash := range hashes { - if block, ok := blocks[hash]; ok { - transactions = append(transactions, block.Transactions()) - uncles = append(uncles, block.Uncles()) - } - } - go dl.downloader.DeliverBodies(id, transactions, uncles) - - return nil - } -} - -// peerGetReceiptsFn constructs a getReceipts method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block receipts from the particularly requested peer. -func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func([]common.Hash) error { - return func(hashes []common.Hash) error { - time.Sleep(delay) - - dl.lock.RLock() - defer dl.lock.RUnlock() - - receipts := dl.peerReceipts[id] - - results := make([][]*types.Receipt, 0, len(hashes)) - for _, hash := range hashes { - if receipt, ok := receipts[hash]; ok { - results = append(results, receipt) - } - } - go dl.downloader.DeliverReceipts(id, results) - - return nil - } -} - -// peerGetNodeDataFn constructs a getNodeData method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of node state data from the particularly requested peer. -func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func([]common.Hash) error { - return func(hashes []common.Hash) error { - time.Sleep(delay) - - dl.lock.RLock() - defer dl.lock.RUnlock() - - results := make([][]byte, 0, len(hashes)) - for _, hash := range hashes { - if data, err := testdb.Get(hash.Bytes()); err == nil { - if !dl.peerMissingStates[id][hash] { - results = append(results, data) - } - } - } - go dl.downloader.DeliverNodeData(id, results) - - return nil - } -} - -// assertOwnChain checks if the local chain contains the correct number of items -// of the various chain components. -func assertOwnChain(t *testing.T, tester *downloadTester, length int) { - assertOwnForkedChain(t, tester, 1, []int{length}) -} - -// assertOwnForkedChain checks if the local forked chain contains the correct -// number of items of the various chain components. -func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { - // Initialize the counters for the first fork - headers, blocks := lengths[0], lengths[0] - - minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks - if minReceipts < 0 { - minReceipts = 1 - } - if maxReceipts < 0 { - maxReceipts = 1 - } - // Update the counters for each subsequent fork - for _, length := range lengths[1:] { - headers += length - common - blocks += length - common - - minReceipts += length - common - fsMinFullBlocks - fsPivotInterval - maxReceipts += length - common - fsMinFullBlocks - } - switch tester.downloader.mode { - case FullSync: - minReceipts, maxReceipts = 1, 1 - case LightSync: - blocks, minReceipts, maxReceipts = 1, 1, 1 - } - if hs := len(tester.ownHeaders); hs != headers { - t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) - } - if bs := len(tester.ownBlocks); bs != blocks { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) - } - if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts { - t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts) - } - // Verify the state trie too for fast syncs - if tester.downloader.mode == FastSync { - index := 0 - if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common { - index = pivot - } else { - index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot) - } - if index > 0 { - if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, tester.stateDb); statedb == nil || err != nil { - t.Fatalf("state reconstruction failed: %v", err) - } - } - } -} - -// Tests that simple synchronization against a canonical chain works correctly. -// In this test common ancestor lookup should be short circuited and not require -// binary searching. -func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } -func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } -func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } -func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } -func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } -func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) } - -func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a small enough block chain to download - targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) - - // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, targetBlocks+1) -} - -// Tests that if a large batch of blocks are being downloaded, it is throttled -// until the cached blocks are retrieved. -func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } -func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } -func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } -func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } -func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } - -func testThrottling(t *testing.T, protocol int, mode SyncMode) { - // Create a long block chain to download and the tester - targetBlocks := 8 * blockCacheLimit - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) - - // Wrap the importer to allow stepping - blocked, proceed := uint32(0), make(chan struct{}) - tester.downloader.chainInsertHook = func(results []*fetchResult) { - atomic.StoreUint32(&blocked, uint32(len(results))) - <-proceed - } - // Start a synchronisation concurrently - errc := make(chan error) - go func() { - errc <- tester.sync("peer", nil, mode) - }() - // Iteratively take some blocks, always checking the retrieval count - for { - // Check the retrieval count synchronously (! reason for this ugly block) - tester.lock.RLock() - retrieved := len(tester.ownBlocks) - tester.lock.RUnlock() - if retrieved >= targetBlocks+1 { - break - } - // Wait a bit for sync to throttle itself - var cached, frozen int - for start := time.Now(); time.Since(start) < 3*time.Second; { - time.Sleep(25 * time.Millisecond) - - tester.lock.Lock() - tester.downloader.queue.lock.Lock() - cached = len(tester.downloader.queue.blockDonePool) - if mode == FastSync { - if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { - if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot { - cached = receipts - } - } - } - frozen = int(atomic.LoadUint32(&blocked)) - retrieved = len(tester.ownBlocks) - tester.downloader.queue.lock.Unlock() - tester.lock.Unlock() - - if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 { - break - } - } - // Make sure we filled up the cache, then exhaust it - time.Sleep(25 * time.Millisecond) // give it a chance to screw up - - tester.lock.RLock() - retrieved = len(tester.ownBlocks) - tester.lock.RUnlock() - if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 { - t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1) - } - // Permit the blocked blocks to import - if atomic.LoadUint32(&blocked) > 0 { - atomic.StoreUint32(&blocked, uint32(0)) - proceed <- struct{}{} - } - } - // Check that we haven't pulled more blocks than available - assertOwnChain(t, tester, targetBlocks+1) - if err := <-errc; err != nil { - t.Fatalf("block synchronization failed: %v", err) - } -} - -// Tests that simple synchronization against a forked chain works correctly. In -// this test common ancestor lookup should *not* be short circuited, and a full -// binary search should be executed. -func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) } -func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } -func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } -func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } -func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } -func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) } - -func testForkedSync(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a long enough forked chain - common, fork := MaxHashFetch, 2*MaxHashFetch - hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) - tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("fork A", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, common+fork+1) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("fork B", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1}) -} - -// Tests that synchronising against a much shorter but much heavyer fork works -// corrently and is not dropped. -func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) } -func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } -func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } -func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } -func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } -func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) } - -func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a long enough forked chain - common, fork := MaxHashFetch, 4*MaxHashFetch - hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA) - tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("light", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, common+fork+1) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("heavy", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1}) -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head, ensuring that malicious peers cannot waste resources by feeding -// long dead chains. -func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) } -func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } -func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } -func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } -func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } -func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } - -func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a long enough forked chain - common, fork := 13, int(MaxForkAncestry+17) - hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) - tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, common+fork+1) - - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head for short but heavy forks too. These are a bit special because they -// take different ancestor lookup paths. -func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) } -func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } -func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } -func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } -func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } -func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) } - -func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a long enough forked chain - common, fork := 13, int(MaxForkAncestry+17) - hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA) - tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, common+fork+1) - - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that an inactive downloader will not accept incoming block headers and -// bodies. -func TestInactiveDownloader62(t *testing.T) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Check that neither block headers nor bodies are accepted - if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } -} - -// Tests that an inactive downloader will not accept incoming block headers, -// bodies and receipts. -func TestInactiveDownloader63(t *testing.T) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Check that neither block headers nor bodies are accepted - if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } -} - -// Tests that a canceled download wipes all previously accumulated state. -func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } -func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } -func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } -func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } -func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } -func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } - -func testCancel(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a small enough block chain to download and the tester - targetBlocks := blockCacheLimit - 15 - if targetBlocks >= MaxHashFetch { - targetBlocks = MaxHashFetch - 15 - } - if targetBlocks >= MaxHeaderFetch { - targetBlocks = MaxHeaderFetch - 15 - } - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) - - // Make sure canceling works with a pristine downloader - tester.downloader.cancel() - if !tester.downloader.queue.Idle() { - t.Errorf("download queue not idle") - } - // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - tester.downloader.cancel() - if !tester.downloader.queue.Idle() { - t.Errorf("download queue not idle") - } -} - -// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } -func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } -func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } -func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } -func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } -func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } - -func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create various peers with various parts of the chain - targetPeers := 8 - targetBlocks := targetPeers*blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - for i := 0; i < targetPeers; i++ { - id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts) - } - if err := tester.sync("peer #0", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, targetBlocks+1) -} - -// Tests that synchronisations behave well in multi-version protocol environments -// and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } -func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } -func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } -func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } -func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } -func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } - -func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a small enough block chain to download - targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - // Create peers of every type - tester := newTester() - defer tester.terminate() - - tester.newPeer("peer 62", 62, hashes, headers, blocks, nil) - tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts) - tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts) - - // Synchronise with the requested peer and make sure all blocks were retrieved - if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, targetBlocks+1) - - // Check that no peers have been dropped off - for _, version := range []int{62, 63, 64} { - peer := fmt.Sprintf("peer %d", version) - if _, ok := tester.peerHashes[peer]; !ok { - t.Errorf("%s dropped", peer) - } - } -} - -// Tests that if a block is empty (e.g. header only), no body request should be -// made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } -func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } -func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } -func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } -func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } -func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } - -func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a block chain to download - targetBlocks := 2*blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) - - // Instrument the downloader to signal body requests - bodiesHave, receiptsHave := int32(0), int32(0) - tester.downloader.bodyFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&bodiesHave, int32(len(headers))) - } - tester.downloader.receiptFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&receiptsHave, int32(len(headers))) - } - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, targetBlocks+1) - - // Validate the number of block bodies that should have been requested - bodiesNeeded, receiptsNeeded := 0, 0 - for _, block := range blocks { - if mode != LightSync && block != genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { - bodiesNeeded++ - } - } - for hash, receipt := range receipts { - if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot { - receiptsNeeded++ - } - } - if int(bodiesHave) != bodiesNeeded { - t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) - } - if int(receiptsHave) != receiptsNeeded { - t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) - } -} - -// Tests that headers are enqueued continuously, preventing malicious nodes from -// stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } -func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } -func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } -func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } -func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } -func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } - -func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a small enough block chain to download - targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - // Attempt a full sync with an attacker feeding gapped headers - tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) - missing := targetBlocks / 2 - delete(tester.peerHeaders["attack"], hashes[missing]) - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, targetBlocks+1) -} - -// Tests that if requested headers are shifted (i.e. first is missing), the queue -// detects the invalid numbering. -func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } -func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } -func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } -func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } -func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } -func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } - -func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { - // Create a small enough block chain to download - targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - // Attempt a full sync with an attacker feeding shifted headers - tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) - delete(tester.peerHeaders["attack"], hashes[len(hashes)-2]) - delete(tester.peerBlocks["attack"], hashes[len(hashes)-2]) - delete(tester.peerReceipts["attack"], hashes[len(hashes)-2]) - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, targetBlocks+1) -} - -// Tests that upon detecting an invalid header, the recent ones are rolled back -// for various failure scenarios. Afterwards a full sync is attempted to make -// sure no state was corrupted. -func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } -func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } -func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } - -func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { - // Create a small enough block chain to download - targetBlocks := 3*fsHeaderSafetyNet + fsMinFullBlocks - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - tester := newTester() - defer tester.terminate() - - // Attempt to sync with an attacker that feeds junk during the fast sync phase. - // This should result in the last fsHeaderSafetyNet headers being rolled back. - tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts) - missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 - delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) - - if err := tester.sync("fast-attack", nil, mode); err == nil { - t.Fatalf("succeeded fast attacker synchronisation") - } - if head := tester.headHeader().Number.Int64(); int(head) > MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) - } - // Attempt to sync with an attacker that feeds junk during the block import phase. - // This should result in both the last fsHeaderSafetyNet number of headers being - // rolled back, and also the pivot point being reverted to a non-block status. - tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts) - missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in - delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing]) - - if err := tester.sync("block-attack", nil, mode); err == nil { - t.Fatalf("succeeded block attacker synchronisation") - } - if head := tester.headHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == FastSync { - if head := tester.headBlock().NumberU64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - // Attempt to sync with an attacker that withholds promised blocks after the - // fast sync pivot point. This could be a trial to leave the node with a bad - // but already imported pivot block. - tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts) - missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - - tester.downloader.fsPivotFails = 0 - tester.downloader.syncInitHook = func(uint64, uint64) { - for i := missing; i <= len(hashes); i++ { - delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i]) - } - tester.downloader.syncInitHook = nil - } - - if err := tester.sync("withhold-attack", nil, mode); err == nil { - t.Fatalf("succeeded withholding attacker synchronisation") - } - if head := tester.headHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == FastSync { - if head := tester.headBlock().NumberU64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - tester.downloader.fsPivotFails = fsCriticalTrials - - // Synchronise with the valid peer and make sure sync succeeds. Since the last - // rollback should also disable fast syncing for this process, verify that we - // did a fresh full sync. Note, we can't assert anything about the receipts - // since we won't purge the database of them, hence we can't use assertOwnChain. - tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - if hs := len(tester.ownHeaders); hs != len(headers) { - t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers)) - } - if mode != LightSync { - if bs := len(tester.ownBlocks); bs != len(blocks) { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks)) - } - } -} - -// Tests that a peer advertising an high TD doesn't get to stall the downloader -// afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } -func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } -func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } -func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } -func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } -func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } - -func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false) - tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts) - - if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) - } -} - -// Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) } -func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } -func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } - -func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { - // Define the disconnection requirement for individual hash fetch errors - tests := []struct { - result error - drop bool - }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin - {errInvalidBody, false}, // A bad peer was detected, but not the sync origin - {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin - {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelHeaderProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop - } - // Run the tests and check disconnection status - tester := newTester() - defer tester.terminate() - - for i, tt := range tests { - // Register a new peer and ensure it's presence - id := fmt.Sprintf("test %d", i) - if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil, nil, nil); err != nil { - t.Fatalf("test %d: failed to register new peer: %v", i, err) - } - if _, ok := tester.peerHashes[id]; !ok { - t.Fatalf("test %d: registered peer not found", i) - } - // Simulate a synchronisation and check the required result - tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - - tester.downloader.Synchronise(id, genesis.Hash(), big.NewInt(1000), FullSync) - if _, ok := tester.peerHashes[id]; !ok != tt.drop { - t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) - } - } -} - -// Tests that synchronisation progress (origin block number, current block number -// and highest block number) is tracked and updated correctly. -func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } -func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } -func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } -func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } -func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } -func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } - -func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a small enough block chain to download - targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester := newTester() - defer tester.terminate() - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - // Retrieve the sync progress and ensure they are zero (pristine sync) - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { - t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) - } - // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts) - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("peer-half", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks/2+1) { - t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks/2+1) - } - progress <- struct{}{} - pending.Wait() - - // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("peer-full", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks/2+1) || latest != uint64(targetBlocks) { - t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, targetBlocks/2+1, targetBlocks/2+1, targetBlocks) - } - progress <- struct{}{} - pending.Wait() - - // Check final progress after successful sync - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) { - t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, targetBlocks/2+1, targetBlocks, targetBlocks) - } -} - -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of a fork (or manual head -// revertal). -func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } -func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } -func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } -func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } -func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } -func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } - -func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a forked chain to simulate origin revertal - common, fork := MaxHashFetch, 2*MaxHashFetch - hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester := newTester() - defer tester.terminate() - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - // Retrieve the sync progress and ensure they are zero (pristine sync) - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { - t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) - } - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("fork A", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(len(hashesA)-1) { - t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, len(hashesA)-1) - } - progress <- struct{}{} - pending.Wait() - - // Simulate a successful sync above the fork - tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight - - // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("fork B", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesA)-1) || latest != uint64(len(hashesB)-1) { - t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, common, len(hashesA)-1, len(hashesB)-1) - } - progress <- struct{}{} - pending.Wait() - - // Check final progress after successful sync - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesB)-1) || latest != uint64(len(hashesB)-1) { - t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, common, len(hashesB)-1, len(hashesB)-1) - } -} - -// Tests that if synchronisation is aborted due to some failure, then the progress -// origin is not updated in the next sync cycle, as it should be considered the -// continuation of the previous sync and not a new instance. -func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } -func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } -func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } -func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } -func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } -func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } - -func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a small enough block chain to download - targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester := newTester() - defer tester.terminate() - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - // Retrieve the sync progress and ensure they are zero (pristine sync) - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { - t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) - } - // Attempt a full sync with a faulty peer - tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts) - missing := targetBlocks / 2 - delete(tester.peerHeaders["faulty"], hashes[missing]) - delete(tester.peerBlocks["faulty"], hashes[missing]) - delete(tester.peerReceipts["faulty"], hashes[missing]) - - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("faulty", nil, mode); err == nil { - t.Fatalf("succeeded faulty synchronisation") - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks) { - t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks) - } - progress <- struct{}{} - pending.Wait() - - // Synchronise with a good peer and check that the progress origin remind the same after a failure - tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks/2) || latest != uint64(targetBlocks) { - t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", origin, current, latest, 0, targetBlocks/2, targetBlocks) - } - progress <- struct{}{} - pending.Wait() - - // Check final progress after successful sync - if origin, current, latest, _, _ := tester.downloader.Progress(); origin > uint64(targetBlocks/2) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) { - t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", origin, current, latest, targetBlocks/2, targetBlocks, targetBlocks) - } -} - -// Tests that if an attacker fakes a chain height, after the attack is detected, -// the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } -func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } -func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } -func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } -func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } -func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } - -func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - - // Create a small block chain - targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil, false) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester := newTester() - defer tester.terminate() - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - // Retrieve the sync progress and ensure they are zero (pristine sync) - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { - t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) - } - // Create and sync with an attacker that promises a higher chain than available - tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) - for i := 1; i < 3; i++ { - delete(tester.peerHeaders["attack"], hashes[i]) - delete(tester.peerBlocks["attack"], hashes[i]) - delete(tester.peerReceipts["attack"], hashes[i]) - } - - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks+3) { - t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks+3) - } - progress <- struct{}{} - pending.Wait() - - // Synchronise with a good peer and check that the progress height has been reduced to the true value - tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - }() - <-starting - if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks) || latest != uint64(targetBlocks) { - t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", origin, current, latest, 0, targetBlocks, targetBlocks) - } - progress <- struct{}{} - pending.Wait() - - // Check final progress after successful sync - if origin, current, latest, _, _ := tester.downloader.Progress(); origin > uint64(targetBlocks) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) { - t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", origin, current, latest, targetBlocks, targetBlocks, targetBlocks) - } -} - -// This test reproduces an issue where unexpected deliveries would -// block indefinitely if they arrived at the right time. -func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) } -func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) } -func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) } -func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) } -func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) } -func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) } - -func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { - t.Parallel() - hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil, false) - fakeHeads := []*types.Header{{}, {}, {}, {}} - for i := 0; i < 200; i++ { - tester := newTester() - tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) - // Whenever the downloader requests headers, flood it with - // a lot of unrequested header deliveries. - tester.downloader.peers.peers["peer"].getAbsHeaders = func(from uint64, count, skip int, reverse bool) error { - deliveriesDone := make(chan struct{}, 500) - for i := 0; i < cap(deliveriesDone); i++ { - peer := fmt.Sprintf("fake-peer%d", i) - go func() { - tester.downloader.DeliverHeaders(peer, fakeHeads) - deliveriesDone <- struct{}{} - }() - } - // Deliver the actual requested headers. - impl := tester.peerGetAbsHeadersFn("peer", 0) - go impl(from, count, skip, reverse) - // None of the extra deliveries should block. - timeout := time.After(15 * time.Second) - for i := 0; i < cap(deliveriesDone); i++ { - select { - case <-deliveriesDone: - case <-timeout: - panic("blocked") - } - } - return nil - } - if err := tester.sync("peer", nil, mode); err != nil { - t.Errorf("sync failed: %v", err) - } - tester.terminate() - } -} - -// Tests that if fast sync aborts in the critical section, it can restart a few -// times before giving up. -func TestFastCriticalRestarts63(t *testing.T) { testFastCriticalRestarts(t, 63) } -func TestFastCriticalRestarts64(t *testing.T) { testFastCriticalRestarts(t, 64) } - -func testFastCriticalRestarts(t *testing.T, protocol int) { - t.Parallel() - - // Create a large enough blockchin to actually fast sync on - targetBlocks := fsMinFullBlocks + 2*fsPivotInterval - 15 - hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false) - - // Create a tester peer with the critical section state roots missing (force failures) - tester := newTester() - defer tester.terminate() - - tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) - for i := 0; i < fsPivotInterval; i++ { - tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true - } - tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test - - // Synchronise with the peer a few times and make sure they fail until the retry limit - for i := 0; i < fsCriticalTrials; i++ { - // Attempt a sync and ensure it fails properly - if err := tester.sync("peer", nil, FastSync); err == nil { - t.Fatalf("failing fast sync succeeded: %v", err) - } - time.Sleep(100 * time.Millisecond) // Make sure no in-flight requests remain - - // If it's the first failure, pivot should be locked => reenable all others to detect pivot changes - if i == 0 { - tester.lock.Lock() - tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true} - tester.lock.Unlock() - } - } - // Retry limit exhausted, downloader will switch to full sync, should succeed - if err := tester.sync("peer", nil, FastSync); err != nil { - t.Fatalf("failed to synchronise blocks in slow sync: %v", err) - } - assertOwnChain(t, tester, targetBlocks+1) -} diff --git a/exp/downloader/metrics.go b/exp/downloader/metrics.go deleted file mode 100644 index 6e0b457c71d10..0000000000000 --- a/exp/downloader/metrics.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the metrics collected by the downloader. - -package downloader - -import ( - "github.com/expanse-org/go-expanse/metrics" -) - -var ( - headerInMeter = metrics.NewMeter("eth/downloader/headers/in") - headerReqTimer = metrics.NewTimer("eth/downloader/headers/req") - headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop") - headerTimeoutMeter = metrics.NewMeter("eth/downloader/headers/timeout") - - bodyInMeter = metrics.NewMeter("eth/downloader/bodies/in") - bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req") - bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop") - bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout") - - receiptInMeter = metrics.NewMeter("eth/downloader/receipts/in") - receiptReqTimer = metrics.NewTimer("eth/downloader/receipts/req") - receiptDropMeter = metrics.NewMeter("eth/downloader/receipts/drop") - receiptTimeoutMeter = metrics.NewMeter("eth/downloader/receipts/timeout") - - stateInMeter = metrics.NewMeter("eth/downloader/states/in") - stateReqTimer = metrics.NewTimer("eth/downloader/states/req") - stateDropMeter = metrics.NewMeter("eth/downloader/states/drop") - stateTimeoutMeter = metrics.NewMeter("eth/downloader/states/timeout") -) diff --git a/exp/downloader/peer.go b/exp/downloader/peer.go deleted file mode 100644 index fe18dbe929f6b..0000000000000 --- a/exp/downloader/peer.go +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the active peer-set of the downloader, maintaining both failures -// as well as reputation metrics to prioritize the block retrievals. - -package downloader - -import ( - "errors" - "fmt" - "math" - "math/big" - "sort" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/expanse-org/go-expanse/common" -) - -const ( - maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items - measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. -) - -// Head hash and total difficulty retriever for -type currentHeadRetrievalFn func() (common.Hash, *big.Int) - -// Block header and body fetchers belonging to eth/62 and above -type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error -type absoluteHeaderFetcherFn func(uint64, int, int, bool) error -type blockBodyFetcherFn func([]common.Hash) error -type receiptFetcherFn func([]common.Hash) error -type stateFetcherFn func([]common.Hash) error - -var ( - errAlreadyFetching = errors.New("already fetching blocks from peer") - errAlreadyRegistered = errors.New("peer is already registered") - errNotRegistered = errors.New("peer is not registered") -) - -// peer represents an active peer from which hashes and blocks are retrieved. -type peer struct { - id string // Unique identifier of the peer - - headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) - blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) - receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) - stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) - - headerThroughput float64 // Number of headers measured to be retrievable per second - blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second - receiptThroughput float64 // Number of receipts measured to be retrievable per second - stateThroughput float64 // Number of node data pieces measured to be retrievable per second - - rtt time.Duration // Request round trip time to track responsiveness (QoS) - - headerStarted time.Time // Time instance when the last header fetch was started - blockStarted time.Time // Time instance when the last block (body) fetch was started - receiptStarted time.Time // Time instance when the last receipt fetch was started - stateStarted time.Time // Time instance when the last node data fetch was started - - lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) - - currentHead currentHeadRetrievalFn // Method to fetch the currently known head of the peer - - getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash - getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position - getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies - - getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts - getNodeData stateFetcherFn // [eth/63] Method to retrieve a batch of state trie data - - version int // Eth protocol version number to switch strategies - lock sync.RWMutex -} - -// newPeer create a new downloader peer, with specific hash and block retrieval -// mechanisms. -func newPeer(id string, version int, currentHead currentHeadRetrievalFn, - getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, - getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer { - return &peer{ - id: id, - lacking: make(map[common.Hash]struct{}), - - currentHead: currentHead, - getRelHeaders: getRelHeaders, - getAbsHeaders: getAbsHeaders, - getBlockBodies: getBlockBodies, - - getReceipts: getReceipts, - getNodeData: getNodeData, - - version: version, - } -} - -// Reset clears the internal state of a peer entity. -func (p *peer) Reset() { - p.lock.Lock() - defer p.lock.Unlock() - - atomic.StoreInt32(&p.headerIdle, 0) - atomic.StoreInt32(&p.blockIdle, 0) - atomic.StoreInt32(&p.receiptIdle, 0) - atomic.StoreInt32(&p.stateIdle, 0) - - p.headerThroughput = 0 - p.blockThroughput = 0 - p.receiptThroughput = 0 - p.stateThroughput = 0 - - p.lacking = make(map[common.Hash]struct{}) -} - -// FetchHeaders sends a header retrieval request to the remote peer. -func (p *peer) FetchHeaders(from uint64, count int) error { - // Sanity check the protocol version - if p.version < 62 { - panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version)) - } - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { - return errAlreadyFetching - } - p.headerStarted = time.Now() - - // Issue the header retrieval request (absolut upwards without gaps) - go p.getAbsHeaders(from, count, 0, false) - - return nil -} - -// FetchBodies sends a block body retrieval request to the remote peer. -func (p *peer) FetchBodies(request *fetchRequest) error { - // Sanity check the protocol version - if p.version < 62 { - panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version)) - } - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { - return errAlreadyFetching - } - p.blockStarted = time.Now() - - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - go p.getBlockBodies(hashes) - - return nil -} - -// FetchReceipts sends a receipt retrieval request to the remote peer. -func (p *peer) FetchReceipts(request *fetchRequest) error { - // Sanity check the protocol version - if p.version < 63 { - panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version)) - } - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { - return errAlreadyFetching - } - p.receiptStarted = time.Now() - - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - go p.getReceipts(hashes) - - return nil -} - -// FetchNodeData sends a node state data retrieval request to the remote peer. -func (p *peer) FetchNodeData(request *fetchRequest) error { - // Sanity check the protocol version - if p.version < 63 { - panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version)) - } - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { - return errAlreadyFetching - } - p.stateStarted = time.Now() - - // Convert the hash set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Hashes)) - for hash, _ := range request.Hashes { - hashes = append(hashes, hash) - } - go p.getNodeData(hashes) - - return nil -} - -// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval -// requests. Its estimated header retrieval throughput is updated with that measured -// just now. -func (p *peer) SetHeadersIdle(delivered int) { - p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) -} - -// SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval -// requests. Its estimated block retrieval throughput is updated with that measured -// just now. -func (p *peer) SetBlocksIdle(delivered int) { - p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) -} - -// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval -// requests. Its estimated body retrieval throughput is updated with that measured -// just now. -func (p *peer) SetBodiesIdle(delivered int) { - p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) -} - -// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt -// retrieval requests. Its estimated receipt retrieval throughput is updated -// with that measured just now. -func (p *peer) SetReceiptsIdle(delivered int) { - p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle) -} - -// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie -// data retrieval requests. Its estimated state retrieval throughput is updated -// with that measured just now. -func (p *peer) SetNodeDataIdle(delivered int) { - p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle) -} - -// setIdle sets the peer to idle, allowing it to execute new retrieval requests. -// Its estimated retrieval throughput is updated with that measured just now. -func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) { - // Irrelevant of the scaling, make sure the peer ends up idle - defer atomic.StoreInt32(idle, 0) - - p.lock.Lock() - defer p.lock.Unlock() - - // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum - if delivered == 0 { - *throughput = 0 - return - } - // Otherwise update the throughput with a new measurement - elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor - measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) - - *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured - p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) -} - -// HeaderCapacity retrieves the peers header download allowance based on its -// previously discovered throughput. -func (p *peer) HeaderCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) -} - -// BlockCapacity retrieves the peers block download allowance based on its -// previously discovered throughput. -func (p *peer) BlockCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) -} - -// ReceiptCapacity retrieves the peers receipt download allowance based on its -// previously discovered throughput. -func (p *peer) ReceiptCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) -} - -// NodeDataCapacity retrieves the peers state download allowance based on its -// previously discovered throughput. -func (p *peer) NodeDataCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) -} - -// MarkLacking appends a new entity to the set of items (blocks, receipts, states) -// that a peer is known not to have (i.e. have been requested before). If the -// set reaches its maximum allowed capacity, items are randomly dropped off. -func (p *peer) MarkLacking(hash common.Hash) { - p.lock.Lock() - defer p.lock.Unlock() - - for len(p.lacking) >= maxLackingHashes { - for drop, _ := range p.lacking { - delete(p.lacking, drop) - break - } - } - p.lacking[hash] = struct{}{} -} - -// Lacks retrieves whether the hash of a blockchain item is on the peers lacking -// list (i.e. whether we know that the peer does not have it). -func (p *peer) Lacks(hash common.Hash) bool { - p.lock.RLock() - defer p.lock.RUnlock() - - _, ok := p.lacking[hash] - return ok -} - -// String implements fmt.Stringer. -func (p *peer) String() string { - p.lock.RLock() - defer p.lock.RUnlock() - - return fmt.Sprintf("Peer %s [%s]", p.id, strings.Join([]string{ - fmt.Sprintf("hs %3.2f/s", p.headerThroughput), - fmt.Sprintf("bs %3.2f/s", p.blockThroughput), - fmt.Sprintf("rs %3.2f/s", p.receiptThroughput), - fmt.Sprintf("ss %3.2f/s", p.stateThroughput), - fmt.Sprintf("miss %4d", len(p.lacking)), - fmt.Sprintf("rtt %v", p.rtt), - }, ", ")) -} - -// peerSet represents the collection of active peer participating in the chain -// download procedure. -type peerSet struct { - peers map[string]*peer - lock sync.RWMutex -} - -// newPeerSet creates a new peer set top track the active download sources. -func newPeerSet() *peerSet { - return &peerSet{ - peers: make(map[string]*peer), - } -} - -// Reset iterates over the current peer set, and resets each of the known peers -// to prepare for a next batch of block retrieval. -func (ps *peerSet) Reset() { - ps.lock.RLock() - defer ps.lock.RUnlock() - - for _, peer := range ps.peers { - peer.Reset() - } -} - -// Register injects a new peer into the working set, or returns an error if the -// peer is already known. -// -// The method also sets the starting throughput values of the new peer to the -// average of all existing peers, to give it a realistic chance of being used -// for data retrievals. -func (ps *peerSet) Register(p *peer) error { - // Retrieve the current median RTT as a sane default - p.rtt = ps.medianRTT() - - // Register the new peer with some meaningful defaults - ps.lock.Lock() - defer ps.lock.Unlock() - - if _, ok := ps.peers[p.id]; ok { - return errAlreadyRegistered - } - if len(ps.peers) > 0 { - p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 - - for _, peer := range ps.peers { - peer.lock.RLock() - p.headerThroughput += peer.headerThroughput - p.blockThroughput += peer.blockThroughput - p.receiptThroughput += peer.receiptThroughput - p.stateThroughput += peer.stateThroughput - peer.lock.RUnlock() - } - p.headerThroughput /= float64(len(ps.peers)) - p.blockThroughput /= float64(len(ps.peers)) - p.receiptThroughput /= float64(len(ps.peers)) - p.stateThroughput /= float64(len(ps.peers)) - } - ps.peers[p.id] = p - return nil -} - -// Unregister removes a remote peer from the active set, disabling any further -// actions to/from that particular entity. -func (ps *peerSet) Unregister(id string) error { - ps.lock.Lock() - defer ps.lock.Unlock() - - if _, ok := ps.peers[id]; !ok { - return errNotRegistered - } - delete(ps.peers, id) - return nil -} - -// Peer retrieves the registered peer with the given id. -func (ps *peerSet) Peer(id string) *peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return ps.peers[id] -} - -// Len returns if the current number of peers in the set. -func (ps *peerSet) Len() int { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return len(ps.peers) -} - -// AllPeers retrieves a flat list of all the peers within the set. -func (ps *peerSet) AllPeers() []*peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - list := make([]*peer, 0, len(ps.peers)) - for _, p := range ps.peers { - list = append(list, p) - } - return list -} - -// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) { - idle := func(p *peer) bool { - return atomic.LoadInt32(&p.headerIdle) == 0 - } - throughput := func(p *peer) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.headerThroughput - } - return ps.idlePeers(62, 64, idle, throughput) -} - -// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within -// the active peer set, ordered by their reputation. -func (ps *peerSet) BodyIdlePeers() ([]*peer, int) { - idle := func(p *peer) bool { - return atomic.LoadInt32(&p.blockIdle) == 0 - } - throughput := func(p *peer) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.blockThroughput - } - return ps.idlePeers(62, 64, idle, throughput) -} - -// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) { - idle := func(p *peer) bool { - return atomic.LoadInt32(&p.receiptIdle) == 0 - } - throughput := func(p *peer) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.receiptThroughput - } - return ps.idlePeers(63, 64, idle, throughput) -} - -// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle -// peers within the active peer set, ordered by their reputation. -func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) { - idle := func(p *peer) bool { - return atomic.LoadInt32(&p.stateIdle) == 0 - } - throughput := func(p *peer) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.stateThroughput - } - return ps.idlePeers(63, 64, idle, throughput) -} - -// idlePeers retrieves a flat list of all currently idle peers satisfying the -// protocol version constraints, using the provided function to check idleness. -// The resulting set of peers are sorted by their measure throughput. -func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool, throughput func(*peer) float64) ([]*peer, int) { - ps.lock.RLock() - defer ps.lock.RUnlock() - - idle, total := make([]*peer, 0, len(ps.peers)), 0 - for _, p := range ps.peers { - - if p.version >= minProtocol && p.version <= maxProtocol { - if idleCheck(p) { - idle = append(idle, p) - } - total++ - } - } - for i := 0; i < len(idle); i++ { - for j := i + 1; j < len(idle); j++ { - if throughput(idle[i]) < throughput(idle[j]) { - idle[i], idle[j] = idle[j], idle[i] - } - } - } - return idle, total -} - -// medianRTT returns the median RTT of te peerset, considering only the tuning -// peers if there are more peers available. -func (ps *peerSet) medianRTT() time.Duration { - // Gather all the currnetly measured round trip times - ps.lock.RLock() - defer ps.lock.RUnlock() - - rtts := make([]float64, 0, len(ps.peers)) - for _, p := range ps.peers { - p.lock.RLock() - rtts = append(rtts, float64(p.rtt)) - p.lock.RUnlock() - } - sort.Float64s(rtts) - - median := rttMaxEstimate - if qosTuningPeers <= len(rtts) { - median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers - } else if len(rtts) > 0 { - median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) - } - // Restrict the RTT into some QoS defaults, irrelevant of true RTT - if median < rttMinEstimate { - median = rttMinEstimate - } - if median > rttMaxEstimate { - median = rttMaxEstimate - } - return median -} diff --git a/exp/downloader/queue.go b/exp/downloader/queue.go deleted file mode 100644 index b1eba0ca9b83b..0000000000000 --- a/exp/downloader/queue.go +++ /dev/null @@ -1,1146 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the block download scheduler to collect download tasks and schedule -// them in an ordered, and throttled way. - -package downloader - -import ( - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/trie" - "github.com/rcrowley/go-metrics" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" -) - -var ( - blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download - maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently -) - -var ( - errNoFetchesPending = errors.New("no fetches pending") - errStaleDelivery = errors.New("stale delivery") -) - -// fetchRequest is a currently running data retrieval operation. -type fetchRequest struct { - Peer *peer // Peer to which the request was sent - From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) - Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) - Headers []*types.Header // [eth/62] Requested headers, sorted by request order - Time time.Time // Time when the request was made -} - -// fetchResult is a struct collecting partial results from data fetchers until -// all outstanding pieces complete and the result as a whole can be processed. -type fetchResult struct { - Pending int // Number of data fetches still pending - - Header *types.Header - Uncles []*types.Header - Transactions types.Transactions - Receipts types.Receipts -} - -// queue represents hashes that are either need fetching or are being fetched -type queue struct { - mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching - fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode - - headerHead common.Hash // [eth/62] Hash of the last queued header to verify order - - // Headers are "special", they download in batches, supported by a skeleton chain - headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers - headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for - headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable - headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations - headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers - headerProced int // [eth/62] Number of headers already processed from the results - headerOffset uint64 // [eth/62] Number of the first header in the result cache - headerContCh chan bool // [eth/62] Channel to notify when header download finishes - - // All data retrievals below are based on an already assembles header chain - blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers - blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for - blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations - blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches - - receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers - receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for - receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations - receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches - - stateTaskIndex int // [eth/63] Counter indexing the added hashes to ensure prioritised retrieval order - stateTaskPool map[common.Hash]int // [eth/63] Pending node data retrieval tasks, mapping to their priority - stateTaskQueue *prque.Prque // [eth/63] Priority queue of the hashes to fetch the node data for - statePendPool map[string]*fetchRequest // [eth/63] Currently pending node data retrieval operations - - stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly - stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator - stateProcessors int32 // [eth/63] Number of currently running state processors - stateSchedLock sync.RWMutex // [eth/63] Lock serialising access to the state scheduler - - resultCache []*fetchResult // Downloaded but not yet delivered fetch results - resultOffset uint64 // Offset of the first cached fetch result in the block chain - - lock *sync.Mutex - active *sync.Cond - closed bool -} - -// newQueue creates a new download queue for scheduling block retrieval. -func newQueue(stateDb ethdb.Database) *queue { - lock := new(sync.Mutex) - return &queue{ - headerPendPool: make(map[string]*fetchRequest), - headerContCh: make(chan bool), - blockTaskPool: make(map[common.Hash]*types.Header), - blockTaskQueue: prque.New(), - blockPendPool: make(map[string]*fetchRequest), - blockDonePool: make(map[common.Hash]struct{}), - receiptTaskPool: make(map[common.Hash]*types.Header), - receiptTaskQueue: prque.New(), - receiptPendPool: make(map[string]*fetchRequest), - receiptDonePool: make(map[common.Hash]struct{}), - stateTaskPool: make(map[common.Hash]int), - stateTaskQueue: prque.New(), - statePendPool: make(map[string]*fetchRequest), - stateDatabase: stateDb, - resultCache: make([]*fetchResult, blockCacheLimit), - active: sync.NewCond(lock), - lock: lock, - } -} - -// Reset clears out the queue contents. -func (q *queue) Reset() { - q.lock.Lock() - defer q.lock.Unlock() - - q.stateSchedLock.Lock() - defer q.stateSchedLock.Unlock() - - q.closed = false - q.mode = FullSync - q.fastSyncPivot = 0 - - q.headerHead = common.Hash{} - - q.headerPendPool = make(map[string]*fetchRequest) - - q.blockTaskPool = make(map[common.Hash]*types.Header) - q.blockTaskQueue.Reset() - q.blockPendPool = make(map[string]*fetchRequest) - q.blockDonePool = make(map[common.Hash]struct{}) - - q.receiptTaskPool = make(map[common.Hash]*types.Header) - q.receiptTaskQueue.Reset() - q.receiptPendPool = make(map[string]*fetchRequest) - q.receiptDonePool = make(map[common.Hash]struct{}) - - q.stateTaskIndex = 0 - q.stateTaskPool = make(map[common.Hash]int) - q.stateTaskQueue.Reset() - q.statePendPool = make(map[string]*fetchRequest) - q.stateScheduler = nil - - q.resultCache = make([]*fetchResult, blockCacheLimit) - q.resultOffset = 0 -} - -// Close marks the end of the sync, unblocking WaitResults. -// It may be called even if the queue is already closed. -func (q *queue) Close() { - q.lock.Lock() - q.closed = true - q.lock.Unlock() - q.active.Broadcast() -} - -// PendingHeaders retrieves the number of header requests pending for retrieval. -func (q *queue) PendingHeaders() int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.headerTaskQueue.Size() -} - -// PendingBlocks retrieves the number of block (body) requests pending for retrieval. -func (q *queue) PendingBlocks() int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.blockTaskQueue.Size() -} - -// PendingReceipts retrieves the number of block receipts pending for retrieval. -func (q *queue) PendingReceipts() int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.receiptTaskQueue.Size() -} - -// PendingNodeData retrieves the number of node data entries pending for retrieval. -func (q *queue) PendingNodeData() int { - q.stateSchedLock.RLock() - defer q.stateSchedLock.RUnlock() - - if q.stateScheduler != nil { - return q.stateScheduler.Pending() - } - return 0 -} - -// InFlightHeaders retrieves whether there are header fetch requests currently -// in flight. -func (q *queue) InFlightHeaders() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.headerPendPool) > 0 -} - -// InFlightBlocks retrieves whether there are block fetch requests currently in -// flight. -func (q *queue) InFlightBlocks() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.blockPendPool) > 0 -} - -// InFlightReceipts retrieves whether there are receipt fetch requests currently -// in flight. -func (q *queue) InFlightReceipts() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.receiptPendPool) > 0 -} - -// InFlightNodeData retrieves whether there are node data entry fetch requests -// currently in flight. -func (q *queue) InFlightNodeData() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.statePendPool)+int(atomic.LoadInt32(&q.stateProcessors)) > 0 -} - -// Idle returns if the queue is fully idle or has some data still inside. This -// method is used by the tester to detect termination events. -func (q *queue) Idle() bool { - q.lock.Lock() - defer q.lock.Unlock() - - queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size() - pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) - cached := len(q.blockDonePool) + len(q.receiptDonePool) - - q.stateSchedLock.RLock() - if q.stateScheduler != nil { - queued += q.stateScheduler.Pending() - } - q.stateSchedLock.RUnlock() - - return (queued + pending + cached) == 0 -} - -// FastSyncPivot retrieves the currently used fast sync pivot point. -func (q *queue) FastSyncPivot() uint64 { - q.lock.Lock() - defer q.lock.Unlock() - - return q.fastSyncPivot -} - -// ShouldThrottleBlocks checks if the download should be throttled (active block (body) -// fetches exceed block cache). -func (q *queue) ShouldThrottleBlocks() bool { - q.lock.Lock() - defer q.lock.Unlock() - - // Calculate the currently in-flight block (body) requests - pending := 0 - for _, request := range q.blockPendPool { - pending += len(request.Hashes) + len(request.Headers) - } - // Throttle if more blocks (bodies) are in-flight than free space in the cache - return pending >= len(q.resultCache)-len(q.blockDonePool) -} - -// ShouldThrottleReceipts checks if the download should be throttled (active receipt -// fetches exceed block cache). -func (q *queue) ShouldThrottleReceipts() bool { - q.lock.Lock() - defer q.lock.Unlock() - - // Calculate the currently in-flight receipt requests - pending := 0 - for _, request := range q.receiptPendPool { - pending += len(request.Headers) - } - // Throttle if more receipts are in-flight than free space in the cache - return pending >= len(q.resultCache)-len(q.receiptDonePool) -} - -// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill -// up an already retrieved header skeleton. -func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { - q.lock.Lock() - defer q.lock.Unlock() - - // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) - if q.headerResults != nil { - panic("skeleton assembly already in progress") - } - // Shedule all the header retrieval tasks for the skeleton assembly - q.headerTaskPool = make(map[uint64]*types.Header) - q.headerTaskQueue = prque.New() - q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains - q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) - q.headerProced = 0 - q.headerOffset = from - q.headerContCh = make(chan bool, 1) - - for i, header := range skeleton { - index := from + uint64(i*MaxHeaderFetch) - - q.headerTaskPool[index] = header - q.headerTaskQueue.Push(index, -float32(index)) - } -} - -// RetrieveHeaders retrieves the header chain assemble based on the scheduled -// skeleton. -func (q *queue) RetrieveHeaders() ([]*types.Header, int) { - q.lock.Lock() - defer q.lock.Unlock() - - headers, proced := q.headerResults, q.headerProced - q.headerResults, q.headerProced = nil, 0 - - return headers, proced -} - -// Schedule adds a set of headers for the download queue for scheduling, returning -// the new headers encountered. -func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { - q.lock.Lock() - defer q.lock.Unlock() - - // Insert all the headers prioritised by the contained block number - inserts := make([]*types.Header, 0, len(headers)) - for _, header := range headers { - // Make sure chain order is honoured and preserved throughout - hash := header.Hash() - if header.Number == nil || header.Number.Uint64() != from { - glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ordering, expected %d", header.Number, hash[:4], from) - break - } - if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { - glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ancestry", header.Number, hash[:4]) - break - } - // Make sure no duplicate requests are executed - if _, ok := q.blockTaskPool[hash]; ok { - glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled for block fetch", header.Number.Uint64(), hash[:4]) - continue - } - if _, ok := q.receiptTaskPool[hash]; ok { - glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]) - continue - } - // Queue the header for content retrieval - q.blockTaskPool[hash] = header - q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) - - if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { - // Fast phase of the fast sync, retrieve receipts too - q.receiptTaskPool[hash] = header - q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) - } - if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { - // Pivoting point of the fast sync, retrieve the state tries - q.stateSchedLock.Lock() - q.stateScheduler = state.NewStateSync(header.Root, q.stateDatabase) - q.stateSchedLock.Unlock() - } - inserts = append(inserts, header) - q.headerHead = hash - from++ - } - return inserts -} - -// WaitResults retrieves and permanently removes a batch of fetch -// results from the cache. the result slice will be empty if the queue -// has been closed. -func (q *queue) WaitResults() []*fetchResult { - q.lock.Lock() - defer q.lock.Unlock() - - nproc := q.countProcessableItems() - for nproc == 0 && !q.closed { - q.active.Wait() - nproc = q.countProcessableItems() - } - results := make([]*fetchResult, nproc) - copy(results, q.resultCache[:nproc]) - if len(results) > 0 { - // Mark results as done before dropping them from the cache. - for _, result := range results { - hash := result.Header.Hash() - delete(q.blockDonePool, hash) - delete(q.receiptDonePool, hash) - } - // Delete the results from the cache and clear the tail. - copy(q.resultCache, q.resultCache[nproc:]) - for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ { - q.resultCache[i] = nil - } - // Advance the expected block number of the first cache entry. - q.resultOffset += uint64(nproc) - } - return results -} - -// countProcessableItems counts the processable items. -func (q *queue) countProcessableItems() int { - for i, result := range q.resultCache { - // Don't process incomplete or unavailable items. - if result == nil || result.Pending > 0 { - return i - } - // Special handling for the fast-sync pivot block: - if q.mode == FastSync { - bnum := result.Header.Number.Uint64() - if bnum == q.fastSyncPivot { - // If the state of the pivot block is not - // available yet, we cannot proceed and return 0. - // - // Stop before processing the pivot block to ensure that - // resultCache has space for fsHeaderForceVerify items. Not - // doing this could leave us unable to download the required - // amount of headers. - if i > 0 || len(q.stateTaskPool) > 0 || q.PendingNodeData() > 0 { - return i - } - for j := 0; j < fsHeaderForceVerify; j++ { - if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil { - return i - } - } - } - // If we're just the fast sync pivot, stop as well - // because the following batch needs different insertion. - // This simplifies handling the switchover in d.process. - if bnum == q.fastSyncPivot+1 && i > 0 { - return i - } - } - } - return len(q.resultCache) -} - -// ReserveHeaders reserves a set of headers for the given peer, skipping any -// previously failed batches. -func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest { - q.lock.Lock() - defer q.lock.Unlock() - - // Short circuit if the peer's already downloading something (sanity check to - // not corrupt state) - if _, ok := q.headerPendPool[p.id]; ok { - return nil - } - // Retrieve a batch of hashes, skipping previously failed ones - send, skip := uint64(0), []uint64{} - for send == 0 && !q.headerTaskQueue.Empty() { - from, _ := q.headerTaskQueue.Pop() - if q.headerPeerMiss[p.id] != nil { - if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { - skip = append(skip, from.(uint64)) - continue - } - } - send = from.(uint64) - } - // Merge all the skipped batches back - for _, from := range skip { - q.headerTaskQueue.Push(from, -float32(from)) - } - // Assemble and return the block download request - if send == 0 { - return nil - } - request := &fetchRequest{ - Peer: p, - From: send, - Time: time.Now(), - } - q.headerPendPool[p.id] = request - return request -} - -// ReserveNodeData reserves a set of node data hashes for the given peer, skipping -// any previously failed download. -func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { - // Create a task generator to fetch status-fetch tasks if all schedules ones are done - generator := func(max int) { - q.stateSchedLock.Lock() - defer q.stateSchedLock.Unlock() - - if q.stateScheduler != nil { - for _, hash := range q.stateScheduler.Missing(max) { - q.stateTaskPool[hash] = q.stateTaskIndex - q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) - q.stateTaskIndex++ - } - } - } - q.lock.Lock() - defer q.lock.Unlock() - - return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, maxInFlightStates) -} - -// reserveHashes reserves a set of hashes for the given peer, skipping previously -// failed ones. -// -// Note, this method expects the queue lock to be already held for writing. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGen func(int), pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { - // Short circuit if the peer's already downloading something (sanity check to - // not corrupt state) - if _, ok := pendPool[p.id]; ok { - return nil - } - // Calculate an upper limit on the hashes we might fetch (i.e. throttling) - allowance := maxPending - if allowance > 0 { - for _, request := range pendPool { - allowance -= len(request.Hashes) - } - } - // If there's a task generator, ask it to fill our task queue - if taskGen != nil && taskQueue.Size() < allowance { - taskGen(allowance - taskQueue.Size()) - } - if taskQueue.Empty() { - return nil - } - // Retrieve a batch of hashes, skipping previously failed ones - send := make(map[common.Hash]int) - skip := make(map[common.Hash]int) - - for proc := 0; (allowance == 0 || proc < allowance) && len(send) < count && !taskQueue.Empty(); proc++ { - hash, priority := taskQueue.Pop() - if p.Lacks(hash.(common.Hash)) { - skip[hash.(common.Hash)] = int(priority) - } else { - send[hash.(common.Hash)] = int(priority) - } - } - // Merge all the skipped hashes back - for hash, index := range skip { - taskQueue.Push(hash, float32(index)) - } - // Assemble and return the block download request - if len(send) == 0 { - return nil - } - request := &fetchRequest{ - Peer: p, - Hashes: send, - Time: time.Now(), - } - pendPool[p.id] = request - - return request -} - -// ReserveBodies reserves a set of body fetches for the given peer, skipping any -// previously failed downloads. Beside the next batch of needed fetches, it also -// returns a flag whether empty blocks were queued requiring processing. -func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) { - isNoop := func(header *types.Header) bool { - return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash - } - q.lock.Lock() - defer q.lock.Unlock() - - return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) -} - -// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping -// any previously failed downloads. Beside the next batch of needed fetches, it -// also returns a flag whether empty receipts were queued requiring importing. -func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) { - isNoop := func(header *types.Header) bool { - return header.ReceiptHash == types.EmptyRootHash - } - q.lock.Lock() - defer q.lock.Unlock() - - return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) -} - -// reserveHeaders reserves a set of data download operations for a given peer, -// skipping any previously failed ones. This method is a generic version used -// by the individual special reservation functions. -// -// Note, this method expects the queue lock to be already held for writing. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, - pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { - // Short circuit if the pool has been depleted, or if the peer's already - // downloading something (sanity check not to corrupt state) - if taskQueue.Empty() { - return nil, false, nil - } - if _, ok := pendPool[p.id]; ok { - return nil, false, nil - } - // Calculate an upper limit on the items we might fetch (i.e. throttling) - space := len(q.resultCache) - len(donePool) - for _, request := range pendPool { - space -= len(request.Headers) - } - // Retrieve a batch of tasks, skipping previously failed ones - send := make([]*types.Header, 0, count) - skip := make([]*types.Header, 0) - - progress := false - for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { - header := taskQueue.PopItem().(*types.Header) - - // If we're the first to request this task, initialise the result container - index := int(header.Number.Int64() - int64(q.resultOffset)) - if index >= len(q.resultCache) || index < 0 { - common.Report("index allocation went beyond available resultCache space") - return nil, false, errInvalidChain - } - if q.resultCache[index] == nil { - components := 1 - if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { - components = 2 - } - q.resultCache[index] = &fetchResult{ - Pending: components, - Header: header, - } - } - // If this fetch task is a noop, skip this fetch operation - if isNoop(header) { - donePool[header.Hash()] = struct{}{} - delete(taskPool, header.Hash()) - - space, proc = space-1, proc-1 - q.resultCache[index].Pending-- - progress = true - continue - } - // Otherwise unless the peer is known not to have the data, add to the retrieve list - if p.Lacks(header.Hash()) { - skip = append(skip, header) - } else { - send = append(send, header) - } - } - // Merge all the skipped headers back - for _, header := range skip { - taskQueue.Push(header, -float32(header.Number.Uint64())) - } - if progress { - // Wake WaitResults, resultCache was modified - q.active.Signal() - } - // Assemble and return the block download request - if len(send) == 0 { - return nil, progress, nil - } - request := &fetchRequest{ - Peer: p, - Headers: send, - Time: time.Now(), - } - pendPool[p.id] = request - - return request, progress, nil -} - -// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. -func (q *queue) CancelHeaders(request *fetchRequest) { - q.cancel(request, q.headerTaskQueue, q.headerPendPool) -} - -// CancelBodies aborts a body fetch request, returning all pending headers to the -// task queue. -func (q *queue) CancelBodies(request *fetchRequest) { - q.cancel(request, q.blockTaskQueue, q.blockPendPool) -} - -// CancelReceipts aborts a body fetch request, returning all pending headers to -// the task queue. -func (q *queue) CancelReceipts(request *fetchRequest) { - q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) -} - -// CancelNodeData aborts a node state data fetch request, returning all pending -// hashes to the task queue. -func (q *queue) CancelNodeData(request *fetchRequest) { - q.cancel(request, q.stateTaskQueue, q.statePendPool) -} - -// Cancel aborts a fetch request, returning all pending hashes to the task queue. -func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - - if request.From > 0 { - taskQueue.Push(request.From, -float32(request.From)) - } - for hash, index := range request.Hashes { - taskQueue.Push(hash, float32(index)) - } - for _, header := range request.Headers { - taskQueue.Push(header, -float32(header.Number.Uint64())) - } - delete(pendPool, request.Peer.id) -} - -// Revoke cancels all pending requests belonging to a given peer. This method is -// meant to be called during a peer drop to quickly reassign owned data fetches -// to remaining nodes. -func (q *queue) Revoke(peerId string) { - q.lock.Lock() - defer q.lock.Unlock() - - if request, ok := q.blockPendPool[peerId]; ok { - for _, header := range request.Headers { - q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) - } - delete(q.blockPendPool, peerId) - } - if request, ok := q.receiptPendPool[peerId]; ok { - for _, header := range request.Headers { - q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) - } - delete(q.receiptPendPool, peerId) - } - if request, ok := q.statePendPool[peerId]; ok { - for hash, index := range request.Hashes { - q.stateTaskQueue.Push(hash, float32(index)) - } - delete(q.statePendPool, peerId) - } -} - -// ExpireHeaders checks for in flight requests that exceeded a timeout allowance, -// canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) -} - -// ExpireBodies checks for in flight block body requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) -} - -// ExpireReceipts checks for in flight receipt requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) -} - -// ExpireNodeData checks for in flight node data requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireNodeData(timeout time.Duration) map[string]int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.expire(timeout, q.statePendPool, q.stateTaskQueue, stateTimeoutMeter) -} - -// expire is the generic check that move expired tasks from a pending pool back -// into a task pool, returning all entities caught with expired tasks. -// -// Note, this method expects the queue lock to be already held. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { - // Iterate over the expired requests and return each to the queue - expiries := make(map[string]int) - for id, request := range pendPool { - if time.Since(request.Time) > timeout { - // Update the metrics with the timeout - timeoutMeter.Mark(1) - - // Return any non satisfied requests to the pool - if request.From > 0 { - taskQueue.Push(request.From, -float32(request.From)) - } - for hash, index := range request.Hashes { - taskQueue.Push(hash, float32(index)) - } - for _, header := range request.Headers { - taskQueue.Push(header, -float32(header.Number.Uint64())) - } - // Add the peer to the expiry report along the the number of failed requests - expirations := len(request.Hashes) - if expirations < len(request.Headers) { - expirations = len(request.Headers) - } - expiries[id] = expirations - } - } - // Remove the expired requests from the pending pool - for id, _ := range expiries { - delete(pendPool, id) - } - return expiries -} - -// DeliverHeaders injects a header retrieval response into the header results -// cache. This method either accepts all headers it received, or none of them -// if they do not map correctly to the skeleton. -// -// If the headers are accepted, the method makes an attempt to deliver the set -// of ready headers to the processor to keep the pipeline full. However it will -// not block to prevent stalling other pending deliveries. -func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { - q.lock.Lock() - defer q.lock.Unlock() - - // Short circuit if the data was never requested - request := q.headerPendPool[id] - if request == nil { - return 0, errNoFetchesPending - } - headerReqTimer.UpdateSince(request.Time) - delete(q.headerPendPool, id) - - // Ensure headers can be mapped onto the skeleton chain - target := q.headerTaskPool[request.From].Hash() - - accepted := len(headers) == MaxHeaderFetch - if accepted { - if headers[0].Number.Uint64() != request.From { - glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From) - accepted = false - } else if headers[len(headers)-1].Hash() != target { - glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]) - accepted = false - } - } - if accepted { - for i, header := range headers[1:] { - hash := header.Hash() - if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { - glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ordering, expected %d", id, header.Number, hash[:4], want) - accepted = false - break - } - if headers[i].Hash() != header.ParentHash { - glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ancestry", id, header.Number, hash[:4]) - accepted = false - break - } - } - } - // If the batch of headers wasn't accepted, mark as unavailable - if !accepted { - glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From) - - miss := q.headerPeerMiss[id] - if miss == nil { - q.headerPeerMiss[id] = make(map[uint64]struct{}) - miss = q.headerPeerMiss[id] - } - miss[request.From] = struct{}{} - - q.headerTaskQueue.Push(request.From, -float32(request.From)) - return 0, errors.New("delivery not accepted") - } - // Clean up a successful fetch and try to deliver any sub-results - copy(q.headerResults[request.From-q.headerOffset:], headers) - delete(q.headerTaskPool, request.From) - - ready := 0 - for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { - ready += MaxHeaderFetch - } - if ready > 0 { - // Headers are ready for delivery, gather them and push forward (non blocking) - process := make([]*types.Header, ready) - copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) - - select { - case headerProcCh <- process: - glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number) - q.headerProced += len(process) - default: - } - } - // Check for termination and return - if len(q.headerTaskPool) == 0 { - q.headerContCh <- false - } - return len(headers), nil -} - -// DeliverBodies injects a block body retrieval response into the results queue. -// The method returns the number of blocks bodies accepted from the delivery and -// also wakes any threads waiting for data delivery. -func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { - q.lock.Lock() - defer q.lock.Unlock() - - reconstruct := func(header *types.Header, index int, result *fetchResult) error { - if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { - return errInvalidBody - } - result.Transactions = txLists[index] - result.Uncles = uncleLists[index] - return nil - } - return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) -} - -// DeliverReceipts injects a receipt retrieval response into the results queue. -// The method returns the number of transaction receipts accepted from the delivery -// and also wakes any threads waiting for data delivery. -func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { - q.lock.Lock() - defer q.lock.Unlock() - - reconstruct := func(header *types.Header, index int, result *fetchResult) error { - if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { - return errInvalidReceipt - } - result.Receipts = receiptList[index] - return nil - } - return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) -} - -// deliver injects a data retrieval response into the results queue. -// -// Note, this method expects the queue lock to be already held for writing. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, - pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, - results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { - - // Short circuit if the data was never requested - request := pendPool[id] - if request == nil { - return 0, errNoFetchesPending - } - reqTimer.UpdateSince(request.Time) - delete(pendPool, id) - - // If no data items were retrieved, mark them as unavailable for the origin peer - if results == 0 { - for _, header := range request.Headers { - request.Peer.MarkLacking(header.Hash()) - } - } - // Assemble each of the results with their headers and retrieved data parts - var ( - accepted int - failure error - useful bool - ) - for i, header := range request.Headers { - // Short circuit assembly if no more fetch results are found - if i >= results { - break - } - // Reconstruct the next result if contents match up - index := int(header.Number.Int64() - int64(q.resultOffset)) - if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { - failure = errInvalidChain - break - } - if err := reconstruct(header, i, q.resultCache[index]); err != nil { - failure = err - break - } - donePool[header.Hash()] = struct{}{} - q.resultCache[index].Pending-- - useful = true - accepted++ - - // Clean up a successful fetch - request.Headers[i] = nil - delete(taskPool, header.Hash()) - } - // Return all failed or missing fetches to the queue - for _, header := range request.Headers { - if header != nil { - taskQueue.Push(header, -float32(header.Number.Uint64())) - } - } - // Wake up WaitResults - if accepted > 0 { - q.active.Signal() - } - // If none of the data was good, it's a stale delivery - switch { - case failure == nil || failure == errInvalidChain: - return accepted, failure - case useful: - return accepted, fmt.Errorf("partial failure: %v", failure) - default: - return accepted, errStaleDelivery - } -} - -// DeliverNodeData injects a node state data retrieval response into the queue. -// The method returns the number of node state entries originally requested, and -// the number of them actually accepted from the delivery. -func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, int)) (int, error) { - q.lock.Lock() - defer q.lock.Unlock() - - // Short circuit if the data was never requested - request := q.statePendPool[id] - if request == nil { - return 0, errNoFetchesPending - } - stateReqTimer.UpdateSince(request.Time) - delete(q.statePendPool, id) - - // If no data was retrieved, mark their hashes as unavailable for the origin peer - if len(data) == 0 { - for hash, _ := range request.Hashes { - request.Peer.MarkLacking(hash) - } - } - // Iterate over the downloaded data and verify each of them - accepted, errs := 0, make([]error, 0) - process := []trie.SyncResult{} - for _, blob := range data { - // Skip any state trie entries that were not requested - hash := common.BytesToHash(crypto.Keccak256(blob)) - if _, ok := request.Hashes[hash]; !ok { - errs = append(errs, fmt.Errorf("non-requested state data %x", hash)) - continue - } - // Inject the next state trie item into the processing queue - process = append(process, trie.SyncResult{Hash: hash, Data: blob}) - accepted++ - - delete(request.Hashes, hash) - delete(q.stateTaskPool, hash) - } - // Start the asynchronous node state data injection - atomic.AddInt32(&q.stateProcessors, 1) - go func() { - defer atomic.AddInt32(&q.stateProcessors, -1) - q.deliverNodeData(process, callback) - }() - // Return all failed or missing fetches to the queue - for hash, index := range request.Hashes { - q.stateTaskQueue.Push(hash, float32(index)) - } - // If none of the data items were good, it's a stale delivery - switch { - case len(errs) == 0: - return accepted, nil - case len(errs) == len(request.Hashes): - return accepted, errStaleDelivery - default: - return accepted, fmt.Errorf("multiple failures: %v", errs) - } -} - -// deliverNodeData is the asynchronous node data processor that injects a batch -// of sync results into the state scheduler. -func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(error, int)) { - // Wake up WaitResults after the state has been written because it - // might be waiting for the pivot block state to get completed. - defer q.active.Signal() - - // Process results one by one to permit task fetches in between - for i, result := range results { - q.stateSchedLock.Lock() - - if q.stateScheduler == nil { - // Syncing aborted since this async delivery started, bail out - q.stateSchedLock.Unlock() - callback(errNoFetchesPending, i) - return - } - if _, err := q.stateScheduler.Process([]trie.SyncResult{result}); err != nil { - // Processing a state result failed, bail out - q.stateSchedLock.Unlock() - callback(err, i) - return - } - // Item processing succeeded, release the lock (temporarily) - q.stateSchedLock.Unlock() - } - callback(nil, len(results)) -} - -// Prepare configures the result cache to allow accepting and caching inbound -// fetch results. -func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) { - q.lock.Lock() - defer q.lock.Unlock() - - // Prepare the queue for sync results - if q.resultOffset < offset { - q.resultOffset = offset - } - q.fastSyncPivot = pivot - q.mode = mode - - // If long running fast sync, also start up a head stateretrieval immediately - if mode == FastSync && pivot > 0 { - q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase) - } -} diff --git a/exp/downloader/types.go b/exp/downloader/types.go deleted file mode 100644 index 4672f0d01fe48..0000000000000 --- a/exp/downloader/types.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "math/big" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" -) - -// headerCheckFn is a callback type for verifying a header's presence in the local chain. -type headerCheckFn func(common.Hash) bool - -// blockAndStateCheckFn is a callback type for verifying block and associated states' presence in the local chain. -type blockAndStateCheckFn func(common.Hash) bool - -// headerRetrievalFn is a callback type for retrieving a header from the local chain. -type headerRetrievalFn func(common.Hash) *types.Header - -// blockRetrievalFn is a callback type for retrieving a block from the local chain. -type blockRetrievalFn func(common.Hash) *types.Block - -// headHeaderRetrievalFn is a callback type for retrieving the head header from the local chain. -type headHeaderRetrievalFn func() *types.Header - -// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain. -type headBlockRetrievalFn func() *types.Block - -// headFastBlockRetrievalFn is a callback type for retrieving the head fast block from the local chain. -type headFastBlockRetrievalFn func() *types.Block - -// headBlockCommitterFn is a callback for directly committing the head block to a certain entity. -type headBlockCommitterFn func(common.Hash) error - -// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block. -type tdRetrievalFn func(common.Hash) *big.Int - -// headerChainInsertFn is a callback type to insert a batch of headers into the local chain. -type headerChainInsertFn func([]*types.Header, int) (int, error) - -// blockChainInsertFn is a callback type to insert a batch of blocks into the local chain. -type blockChainInsertFn func(types.Blocks) (int, error) - -// receiptChainInsertFn is a callback type to insert a batch of receipts into the local chain. -type receiptChainInsertFn func(types.Blocks, []types.Receipts) (int, error) - -// chainRollbackFn is a callback type to remove a few recently added elements from the local chain. -type chainRollbackFn func([]common.Hash) - -// peerDropFn is a callback type for dropping a peer detected as malicious. -type peerDropFn func(id string) - -// dataPack is a data message returned by a peer for some query. -type dataPack interface { - PeerId() string - Items() int - Stats() string -} - -// headerPack is a batch of block headers returned by a peer. -type headerPack struct { - peerId string - headers []*types.Header -} - -func (p *headerPack) PeerId() string { return p.peerId } -func (p *headerPack) Items() int { return len(p.headers) } -func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) } - -// bodyPack is a batch of block bodies returned by a peer. -type bodyPack struct { - peerId string - transactions [][]*types.Transaction - uncles [][]*types.Header -} - -func (p *bodyPack) PeerId() string { return p.peerId } -func (p *bodyPack) Items() int { - if len(p.transactions) <= len(p.uncles) { - return len(p.transactions) - } - return len(p.uncles) -} -func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } - -// receiptPack is a batch of receipts returned by a peer. -type receiptPack struct { - peerId string - receipts [][]*types.Receipt -} - -func (p *receiptPack) PeerId() string { return p.peerId } -func (p *receiptPack) Items() int { return len(p.receipts) } -func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } - -// statePack is a batch of states returned by a peer. -type statePack struct { - peerId string - states [][]byte -} - -func (p *statePack) PeerId() string { return p.peerId } -func (p *statePack) Items() int { return len(p.states) } -func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) } diff --git a/exp/fetcher/fetcher.go b/exp/fetcher/fetcher.go deleted file mode 100644 index bf9dab31691c2..0000000000000 --- a/exp/fetcher/fetcher.go +++ /dev/null @@ -1,751 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Package fetcher contains the block announcement based synchronisation. -package fetcher - -import ( - "errors" - "fmt" - "math/rand" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" -) - -const ( - arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested - gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches - fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block - maxUncleDist = 7 // Maximum allowed backward distance from the chain head - maxQueueDist = 32 // Maximum allowed distance from the chain head to queue - hashLimit = 256 // Maximum number of unique blocks a peer may have announced - blockLimit = 64 // Maximum number of unique blocks a per may have delivered -) - -var ( - errTerminated = errors.New("terminated") -) - -// blockRetrievalFn is a callback type for retrieving a block from the local chain. -type blockRetrievalFn func(common.Hash) *types.Block - -// headerRequesterFn is a callback type for sending a header retrieval request. -type headerRequesterFn func(common.Hash) error - -// bodyRequesterFn is a callback type for sending a body retrieval request. -type bodyRequesterFn func([]common.Hash) error - -// blockValidatorFn is a callback type to verify a block's header for fast propagation. -type blockValidatorFn func(block *types.Block, parent *types.Block) error - -// blockBroadcasterFn is a callback type for broadcasting a block to connected peers. -type blockBroadcasterFn func(block *types.Block, propagate bool) - -// chainHeightFn is a callback type to retrieve the current chain height. -type chainHeightFn func() uint64 - -// chainInsertFn is a callback type to insert a batch of blocks into the local chain. -type chainInsertFn func(types.Blocks) (int, error) - -// peerDropFn is a callback type for dropping a peer detected as malicious. -type peerDropFn func(id string) - -// announce is the hash notification of the availability of a new block in the -// network. -type announce struct { - hash common.Hash // Hash of the block being announced - number uint64 // Number of the block being announced (0 = unknown | old protocol) - header *types.Header // Header of the block partially reassembled (new protocol) - time time.Time // Timestamp of the announcement - - origin string // Identifier of the peer originating the notification - - fetchHeader headerRequesterFn // [eth/62] Fetcher function to retrieve the header of an announced block - fetchBodies bodyRequesterFn // [eth/62] Fetcher function to retrieve the body of an announced block -} - -// headerFilterTask represents a batch of headers needing fetcher filtering. -type headerFilterTask struct { - headers []*types.Header // Collection of headers to filter - time time.Time // Arrival time of the headers -} - -// headerFilterTask represents a batch of block bodies (transactions and uncles) -// needing fetcher filtering. -type bodyFilterTask struct { - transactions [][]*types.Transaction // Collection of transactions per block bodies - uncles [][]*types.Header // Collection of uncles per block bodies - time time.Time // Arrival time of the blocks' contents -} - -// inject represents a schedules import operation. -type inject struct { - origin string - block *types.Block -} - -// Fetcher is responsible for accumulating block announcements from various peers -// and scheduling them for retrieval. -type Fetcher struct { - // Various event channels - notify chan *announce - inject chan *inject - - blockFilter chan chan []*types.Block - headerFilter chan chan *headerFilterTask - bodyFilter chan chan *bodyFilterTask - - done chan common.Hash - quit chan struct{} - - // Announce states - announces map[string]int // Per peer announce counts to prevent memory exhaustion - announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching - fetching map[common.Hash]*announce // Announced blocks, currently fetching - fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval - completing map[common.Hash]*announce // Blocks with headers, currently body-completing - - // Block cache - queue *prque.Prque // Queue containing the import operations (block number sorted) - queues map[string]int // Per peer block counts to prevent memory exhaustion - queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports) - - // Callbacks - getBlock blockRetrievalFn // Retrieves a block from the local chain - validateBlock blockValidatorFn // Checks if a block's headers have a valid proof of work - broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers - chainHeight chainHeightFn // Retrieves the current chain's height - insertChain chainInsertFn // Injects a batch of blocks into the chain - dropPeer peerDropFn // Drops a peer for misbehaving - - // Testing hooks - announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list - queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue - fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch - completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62) - importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62) -} - -// New creates a block fetcher to retrieve blocks based on hash announcements. -func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher { - return &Fetcher{ - notify: make(chan *announce), - inject: make(chan *inject), - blockFilter: make(chan chan []*types.Block), - headerFilter: make(chan chan *headerFilterTask), - bodyFilter: make(chan chan *bodyFilterTask), - done: make(chan common.Hash), - quit: make(chan struct{}), - announces: make(map[string]int), - announced: make(map[common.Hash][]*announce), - fetching: make(map[common.Hash]*announce), - fetched: make(map[common.Hash][]*announce), - completing: make(map[common.Hash]*announce), - queue: prque.New(), - queues: make(map[string]int), - queued: make(map[common.Hash]*inject), - getBlock: getBlock, - validateBlock: validateBlock, - broadcastBlock: broadcastBlock, - chainHeight: chainHeight, - insertChain: insertChain, - dropPeer: dropPeer, - } -} - -// Start boots up the announcement based synchroniser, accepting and processing -// hash notifications and block fetches until termination requested. -func (f *Fetcher) Start() { - go f.loop() -} - -// Stop terminates the announcement based synchroniser, canceling all pending -// operations. -func (f *Fetcher) Stop() { - close(f.quit) -} - -// Notify announces the fetcher of the potential availability of a new block in -// the network. -func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, - headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { - block := &announce{ - hash: hash, - number: number, - time: time, - origin: peer, - fetchHeader: headerFetcher, - fetchBodies: bodyFetcher, - } - select { - case f.notify <- block: - return nil - case <-f.quit: - return errTerminated - } -} - -// Enqueue tries to fill gaps the the fetcher's future import queue. -func (f *Fetcher) Enqueue(peer string, block *types.Block) error { - op := &inject{ - origin: peer, - block: block, - } - select { - case f.inject <- op: - return nil - case <-f.quit: - return errTerminated - } -} - -// FilterHeaders extracts all the headers that were explicitly requested by the fetcher, -// returning those that should be handled differently. -func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header { - glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers)) - - // Send the filter channel to the fetcher - filter := make(chan *headerFilterTask) - - select { - case f.headerFilter <- filter: - case <-f.quit: - return nil - } - // Request the filtering of the header list - select { - case filter <- &headerFilterTask{headers: headers, time: time}: - case <-f.quit: - return nil - } - // Retrieve the headers remaining after filtering - select { - case task := <-filter: - return task.headers - case <-f.quit: - return nil - } -} - -// FilterBodies extracts all the block bodies that were explicitly requested by -// the fetcher, returning those that should be handled differently. -func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { - glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles)) - - // Send the filter channel to the fetcher - filter := make(chan *bodyFilterTask) - - select { - case f.bodyFilter <- filter: - case <-f.quit: - return nil, nil - } - // Request the filtering of the body list - select { - case filter <- &bodyFilterTask{transactions: transactions, uncles: uncles, time: time}: - case <-f.quit: - return nil, nil - } - // Retrieve the bodies remaining after filtering - select { - case task := <-filter: - return task.transactions, task.uncles - case <-f.quit: - return nil, nil - } -} - -// Loop is the main fetcher loop, checking and processing various notification -// events. -func (f *Fetcher) loop() { - // Iterate the block fetching until a quit is requested - fetchTimer := time.NewTimer(0) - completeTimer := time.NewTimer(0) - - for { - // Clean up any expired block fetches - for hash, announce := range f.fetching { - if time.Since(announce.time) > fetchTimeout { - f.forgetHash(hash) - } - } - // Import any queued blocks that could potentially fit - height := f.chainHeight() - for !f.queue.Empty() { - op := f.queue.PopItem().(*inject) - if f.queueChangeHook != nil { - f.queueChangeHook(op.block.Hash(), false) - } - // If too high up the chain or phase, continue later - number := op.block.NumberU64() - if number > height+1 { - f.queue.Push(op, -float32(op.block.NumberU64())) - if f.queueChangeHook != nil { - f.queueChangeHook(op.block.Hash(), true) - } - break - } - // Otherwise if fresh and still unknown, try and import - hash := op.block.Hash() - if number+maxUncleDist < height || f.getBlock(hash) != nil { - f.forgetBlock(hash) - continue - } - f.insert(op.origin, op.block) - } - // Wait for an outside event to occur - select { - case <-f.quit: - // Fetcher terminating, abort all operations - return - - case notification := <-f.notify: - // A block was announced, make sure the peer isn't DOSing us - propAnnounceInMeter.Mark(1) - - count := f.announces[notification.origin] + 1 - if count > hashLimit { - glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit) - propAnnounceDOSMeter.Mark(1) - break - } - // If we have a valid block number, check that it's potentially useful - if notification.number > 0 { - if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { - glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist) - propAnnounceDropMeter.Mark(1) - break - } - } - // All is well, schedule the announce if block's not yet downloading - if _, ok := f.fetching[notification.hash]; ok { - break - } - if _, ok := f.completing[notification.hash]; ok { - break - } - f.announces[notification.origin] = count - f.announced[notification.hash] = append(f.announced[notification.hash], notification) - if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { - f.announceChangeHook(notification.hash, true) - } - if len(f.announced) == 1 { - f.rescheduleFetch(fetchTimer) - } - - case op := <-f.inject: - // A direct block insertion was requested, try and fill any pending gaps - propBroadcastInMeter.Mark(1) - f.enqueue(op.origin, op.block) - - case hash := <-f.done: - // A pending import finished, remove all traces of the notification - f.forgetHash(hash) - f.forgetBlock(hash) - - case <-fetchTimer.C: - // At least one block's timer ran out, check for needing retrieval - request := make(map[string][]common.Hash) - - for hash, announces := range f.announced { - if time.Since(announces[0].time) > arriveTimeout-gatherSlack { - // Pick a random peer to retrieve from, reset all others - announce := announces[rand.Intn(len(announces))] - f.forgetHash(hash) - - // If the block still didn't arrive, queue for fetching - if f.getBlock(hash) == nil { - request[announce.origin] = append(request[announce.origin], hash) - f.fetching[hash] = announce - } - } - } - // Send out all block header requests - for peer, hashes := range request { - if glog.V(logger.Detail) && len(hashes) > 0 { - list := "[" - for _, hash := range hashes { - list += fmt.Sprintf("%x…, ", hash[:4]) - } - list = list[:len(list)-2] + "]" - glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list) - } - // Create a closure of the fetch and schedule in on a new thread - fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes - go func() { - if f.fetchingHook != nil { - f.fetchingHook(hashes) - } - for _, hash := range hashes { - headerFetchMeter.Mark(1) - fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals - } - }() - } - // Schedule the next fetch if blocks are still pending - f.rescheduleFetch(fetchTimer) - - case <-completeTimer.C: - // At least one header's timer ran out, retrieve everything - request := make(map[string][]common.Hash) - - for hash, announces := range f.fetched { - // Pick a random peer to retrieve from, reset all others - announce := announces[rand.Intn(len(announces))] - f.forgetHash(hash) - - // If the block still didn't arrive, queue for completion - if f.getBlock(hash) == nil { - request[announce.origin] = append(request[announce.origin], hash) - f.completing[hash] = announce - } - } - // Send out all block body requests - for peer, hashes := range request { - if glog.V(logger.Detail) && len(hashes) > 0 { - list := "[" - for _, hash := range hashes { - list += fmt.Sprintf("%x…, ", hash[:4]) - } - list = list[:len(list)-2] + "]" - - glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list) - } - // Create a closure of the fetch and schedule in on a new thread - if f.completingHook != nil { - f.completingHook(hashes) - } - bodyFetchMeter.Mark(int64(len(hashes))) - go f.completing[hashes[0]].fetchBodies(hashes) - } - // Schedule the next fetch if blocks are still pending - f.rescheduleComplete(completeTimer) - - case filter := <-f.headerFilter: - // Headers arrived from a remote peer. Extract those that were explicitly - // requested by the fetcher, and return everything else so it's delivered - // to other parts of the system. - var task *headerFilterTask - select { - case task = <-filter: - case <-f.quit: - return - } - headerFilterInMeter.Mark(int64(len(task.headers))) - - // Split the batch of headers into unknown ones (to return to the caller), - // known incomplete ones (requiring body retrievals) and completed blocks. - unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{} - for _, header := range task.headers { - hash := header.Hash() - - // Filter fetcher-requested headers from other synchronisation algorithms - if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { - // If the delivered header does not match the promised number, drop the announcer - if header.Number.Uint64() != announce.number { - glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()) - f.dropPeer(announce.origin) - f.forgetHash(hash) - continue - } - // Only keep if not imported by other means - if f.getBlock(hash) == nil { - announce.header = header - announce.time = task.time - - // If the block is empty (header only), short circuit into the final import queue - if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { - glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]) - - block := types.NewBlockWithHeader(header) - block.ReceivedAt = task.time - - complete = append(complete, block) - f.completing[hash] = announce - continue - } - // Otherwise add to the list of blocks needing completion - incomplete = append(incomplete, announce) - } else { - glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]) - f.forgetHash(hash) - } - } else { - // Fetcher doesn't know about it, add to the return list - unknown = append(unknown, header) - } - } - headerFilterOutMeter.Mark(int64(len(unknown))) - select { - case filter <- &headerFilterTask{headers: unknown, time: task.time}: - case <-f.quit: - return - } - // Schedule the retrieved headers for body completion - for _, announce := range incomplete { - hash := announce.header.Hash() - if _, ok := f.completing[hash]; ok { - continue - } - f.fetched[hash] = append(f.fetched[hash], announce) - if len(f.fetched) == 1 { - f.rescheduleComplete(completeTimer) - } - } - // Schedule the header-only blocks for import - for _, block := range complete { - if announce := f.completing[block.Hash()]; announce != nil { - f.enqueue(announce.origin, block) - } - } - - case filter := <-f.bodyFilter: - // Block bodies arrived, extract any explicitly requested blocks, return the rest - var task *bodyFilterTask - select { - case task = <-filter: - case <-f.quit: - return - } - bodyFilterInMeter.Mark(int64(len(task.transactions))) - - blocks := []*types.Block{} - for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { - // Match up a body to any possible completion request - matched := false - - for hash, announce := range f.completing { - if f.queued[hash] == nil { - txnHash := types.DeriveSha(types.Transactions(task.transactions[i])) - uncleHash := types.CalcUncleHash(task.uncles[i]) - - if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash { - // Mark the body matched, reassemble if still unknown - matched = true - - if f.getBlock(hash) == nil { - block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) - block.ReceivedAt = task.time - - blocks = append(blocks, block) - } else { - f.forgetHash(hash) - } - } - } - } - if matched { - task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) - task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) - i-- - continue - } - } - - bodyFilterOutMeter.Mark(int64(len(task.transactions))) - select { - case filter <- task: - case <-f.quit: - return - } - // Schedule the retrieved blocks for ordered import - for _, block := range blocks { - if announce := f.completing[block.Hash()]; announce != nil { - f.enqueue(announce.origin, block) - } - } - } - } -} - -// rescheduleFetch resets the specified fetch timer to the next announce timeout. -func (f *Fetcher) rescheduleFetch(fetch *time.Timer) { - // Short circuit if no blocks are announced - if len(f.announced) == 0 { - return - } - // Otherwise find the earliest expiring announcement - earliest := time.Now() - for _, announces := range f.announced { - if earliest.After(announces[0].time) { - earliest = announces[0].time - } - } - fetch.Reset(arriveTimeout - time.Since(earliest)) -} - -// rescheduleComplete resets the specified completion timer to the next fetch timeout. -func (f *Fetcher) rescheduleComplete(complete *time.Timer) { - // Short circuit if no headers are fetched - if len(f.fetched) == 0 { - return - } - // Otherwise find the earliest expiring announcement - earliest := time.Now() - for _, announces := range f.fetched { - if earliest.After(announces[0].time) { - earliest = announces[0].time - } - } - complete.Reset(gatherSlack - time.Since(earliest)) -} - -// enqueue schedules a new future import operation, if the block to be imported -// has not yet been seen. -func (f *Fetcher) enqueue(peer string, block *types.Block) { - hash := block.Hash() - - // Ensure the peer isn't DOSing us - count := f.queues[peer] + 1 - if count > blockLimit { - glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit) - propBroadcastDOSMeter.Mark(1) - f.forgetHash(hash) - return - } - // Discard any past or too distant blocks - if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { - glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist) - propBroadcastDropMeter.Mark(1) - f.forgetHash(hash) - return - } - // Schedule the block for future importing - if _, ok := f.queued[hash]; !ok { - op := &inject{ - origin: peer, - block: block, - } - f.queues[peer] = count - f.queued[hash] = op - f.queue.Push(op, -float32(block.NumberU64())) - if f.queueChangeHook != nil { - f.queueChangeHook(op.block.Hash(), true) - } - if glog.V(logger.Debug) { - glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size()) - } - } -} - -// insert spawns a new goroutine to run a block insertion into the chain. If the -// block's number is at the same height as the current import phase, if updates -// the phase states accordingly. -func (f *Fetcher) insert(peer string, block *types.Block) { - hash := block.Hash() - - // Run the import on a new thread - glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]) - go func() { - defer func() { f.done <- hash }() - - // If the parent's unknown, abort insertion - parent := f.getBlock(block.ParentHash()) - if parent == nil { - glog.V(logger.Debug).Infof("Peer %s: parent []%x] of block #%d [%x…] unknown", block.ParentHash().Bytes()[:4], peer, block.NumberU64(), hash[:4]) - return - } - // Quickly validate the header and propagate the block if it passes - switch err := f.validateBlock(block, parent); err { - case nil: - // All ok, quickly propagate to our peers - propBroadcastOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, true) - - case core.BlockFutureErr: - // Weird future block, don't fail, but neither propagate - - default: - // Something went very wrong, drop the peer - glog.V(logger.Debug).Infof("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err) - f.dropPeer(peer) - return - } - // Run the actual import and log any issues - if _, err := f.insertChain(types.Blocks{block}); err != nil { - glog.V(logger.Warn).Infof("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err) - return - } - // If import succeeded, broadcast the block - propAnnounceOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, false) - - // Invoke the testing hook if needed - if f.importedHook != nil { - f.importedHook(block) - } - }() -} - -// forgetHash removes all traces of a block announcement from the fetcher's -// internal state. -func (f *Fetcher) forgetHash(hash common.Hash) { - // Remove all pending announces and decrement DOS counters - for _, announce := range f.announced[hash] { - f.announces[announce.origin]-- - if f.announces[announce.origin] == 0 { - delete(f.announces, announce.origin) - } - } - delete(f.announced, hash) - if f.announceChangeHook != nil { - f.announceChangeHook(hash, false) - } - // Remove any pending fetches and decrement the DOS counters - if announce := f.fetching[hash]; announce != nil { - f.announces[announce.origin]-- - if f.announces[announce.origin] == 0 { - delete(f.announces, announce.origin) - } - delete(f.fetching, hash) - } - - // Remove any pending completion requests and decrement the DOS counters - for _, announce := range f.fetched[hash] { - f.announces[announce.origin]-- - if f.announces[announce.origin] == 0 { - delete(f.announces, announce.origin) - } - } - delete(f.fetched, hash) - - // Remove any pending completions and decrement the DOS counters - if announce := f.completing[hash]; announce != nil { - f.announces[announce.origin]-- - if f.announces[announce.origin] == 0 { - delete(f.announces, announce.origin) - } - delete(f.completing, hash) - } -} - -// forgetBlock removes all traces of a queued block from the fetcher's internal -// state. -func (f *Fetcher) forgetBlock(hash common.Hash) { - if insert := f.queued[hash]; insert != nil { - f.queues[insert.origin]-- - if f.queues[insert.origin] == 0 { - delete(f.queues, insert.origin) - } - delete(f.queued, hash) - } -} diff --git a/exp/fetcher/fetcher_test.go b/exp/fetcher/fetcher_test.go deleted file mode 100644 index 684d9803301ad..0000000000000 --- a/exp/fetcher/fetcher_test.go +++ /dev/null @@ -1,780 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package fetcher - -import ( - "errors" - "math/big" - "sync" - "sync/atomic" - "testing" - "time" - - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/params" -) - -var ( - testdb, _ = ethdb.NewMemDatabase() - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddress = crypto.PubkeyToAddress(testKey.PublicKey) - genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) - unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil) -) - -// makeChain creates a chain of n blocks starting at and including parent. -// the returned hash chain is ordered head->parent. In addition, every 3rd block -// contains a transaction and every 5th an uncle to allow testing correct block -// reassembly. -func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { - blocks, _ := core.GenerateChain(nil, parent, testdb, n, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{seed}) - - // If the block number is multiple of 3, send a bonus transaction to the miner - if parent == genesis && i%3 == 0 { - tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - // If the block number is a multiple of 5, add a bonus uncle to the block - if i%5 == 0 { - block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) - } - }) - hashes := make([]common.Hash, n+1) - hashes[len(hashes)-1] = parent.Hash() - blockm := make(map[common.Hash]*types.Block, n+1) - blockm[parent.Hash()] = parent - for i, b := range blocks { - hashes[len(hashes)-i-2] = b.Hash() - blockm[b.Hash()] = b - } - return hashes, blockm -} - -// fetcherTester is a test simulator for mocking out local block chain. -type fetcherTester struct { - fetcher *Fetcher - - hashes []common.Hash // Hash chain belonging to the tester - blocks map[common.Hash]*types.Block // Blocks belonging to the tester - drops map[string]bool // Map of peers dropped by the fetcher - - lock sync.RWMutex -} - -// newTester creates a new fetcher test mocker. -func newTester() *fetcherTester { - tester := &fetcherTester{ - hashes: []common.Hash{genesis.Hash()}, - blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, - drops: make(map[string]bool), - } - tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer) - tester.fetcher.Start() - - return tester -} - -// getBlock retrieves a block from the tester's block chain. -func (f *fetcherTester) getBlock(hash common.Hash) *types.Block { - f.lock.RLock() - defer f.lock.RUnlock() - - return f.blocks[hash] -} - -// verifyBlock is a nop placeholder for the block header verification. -func (f *fetcherTester) verifyBlock(block *types.Block, parent *types.Block) error { - return nil -} - -// broadcastBlock is a nop placeholder for the block broadcasting. -func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) { -} - -// chainHeight retrieves the current height (block number) of the chain. -func (f *fetcherTester) chainHeight() uint64 { - f.lock.RLock() - defer f.lock.RUnlock() - - return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() -} - -// insertChain injects a new blocks into the simulated chain. -func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) { - f.lock.Lock() - defer f.lock.Unlock() - - for i, block := range blocks { - // Make sure the parent in known - if _, ok := f.blocks[block.ParentHash()]; !ok { - return i, errors.New("unknown parent") - } - // Discard any new blocks if the same height already exists - if block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() { - return i, nil - } - // Otherwise build our current chain - f.hashes = append(f.hashes, block.Hash()) - f.blocks[block.Hash()] = block - } - return 0, nil -} - -// dropPeer is an emulator for the peer removal, simply accumulating the various -// peers dropped by the fetcher. -func (f *fetcherTester) dropPeer(peer string) { - f.lock.Lock() - defer f.lock.Unlock() - - f.drops[peer] = true -} - -// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer. -func (f *fetcherTester) makeHeaderFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn { - closure := make(map[common.Hash]*types.Block) - for hash, block := range blocks { - closure[hash] = block - } - // Create a function that return a header from the closure - return func(hash common.Hash) error { - // Gather the blocks to return - headers := make([]*types.Header, 0, 1) - if block, ok := closure[hash]; ok { - headers = append(headers, block.Header()) - } - // Return on a new thread - go f.fetcher.FilterHeaders(headers, time.Now().Add(drift)) - - return nil - } -} - -// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer. -func (f *fetcherTester) makeBodyFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn { - closure := make(map[common.Hash]*types.Block) - for hash, block := range blocks { - closure[hash] = block - } - // Create a function that returns blocks from the closure - return func(hashes []common.Hash) error { - // Gather the block bodies to return - transactions := make([][]*types.Transaction, 0, len(hashes)) - uncles := make([][]*types.Header, 0, len(hashes)) - - for _, hash := range hashes { - if block, ok := closure[hash]; ok { - transactions = append(transactions, block.Transactions()) - uncles = append(uncles, block.Uncles()) - } - } - // Return on a new thread - go f.fetcher.FilterBodies(transactions, uncles, time.Now().Add(drift)) - - return nil - } -} - -// verifyFetchingEvent verifies that one single event arrive on an fetching channel. -func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) { - if arrive { - select { - case <-fetching: - case <-time.After(time.Second): - t.Fatalf("fetching timeout") - } - } else { - select { - case <-fetching: - t.Fatalf("fetching invoked") - case <-time.After(10 * time.Millisecond): - } - } -} - -// verifyCompletingEvent verifies that one single event arrive on an completing channel. -func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) { - if arrive { - select { - case <-completing: - case <-time.After(time.Second): - t.Fatalf("completing timeout") - } - } else { - select { - case <-completing: - t.Fatalf("completing invoked") - case <-time.After(10 * time.Millisecond): - } - } -} - -// verifyImportEvent verifies that one single event arrive on an import channel. -func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) { - if arrive { - select { - case <-imported: - case <-time.After(time.Second): - t.Fatalf("import timeout") - } - } else { - select { - case <-imported: - t.Fatalf("import invoked") - case <-time.After(10 * time.Millisecond): - } - } -} - -// verifyImportCount verifies that exactly count number of events arrive on an -// import hook channel. -func verifyImportCount(t *testing.T, imported chan *types.Block, count int) { - for i := 0; i < count; i++ { - select { - case <-imported: - case <-time.After(time.Second): - t.Fatalf("block %d: import timeout", i+1) - } - } - verifyImportDone(t, imported) -} - -// verifyImportDone verifies that no more events are arriving on an import channel. -func verifyImportDone(t *testing.T, imported chan *types.Block) { - select { - case <-imported: - t.Fatalf("extra block imported") - case <-time.After(50 * time.Millisecond): - } -} - -// Tests that a fetcher accepts block announcements and initiates retrievals for -// them, successfully importing into the local chain. -func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) } -func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) } -func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) } - -func testSequentialAnnouncements(t *testing.T, protocol int) { - // Create a chain of blocks to import - targetBlocks := 4 * hashLimit - hashes, blocks := makeChain(targetBlocks, 0, genesis) - - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - // Iteratively announce blocks until all are imported - imported := make(chan *types.Block) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) -} - -// Tests that if blocks are announced by multiple peers (or even the same buggy -// peer), they will only get downloaded at most once. -func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) } -func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) } -func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) } - -func testConcurrentAnnouncements(t *testing.T, protocol int) { - // Create a chain of blocks to import - targetBlocks := 4 * hashLimit - hashes, blocks := makeChain(targetBlocks, 0, genesis) - - // Assemble a tester with a built in counter for the requests - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - counter := uint32(0) - headerWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) - return headerFetcher(hash) - } - // Iteratively announce blocks until all are imported - imported := make(chan *types.Block) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher) - tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), headerWrapper, bodyFetcher) - tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), headerWrapper, bodyFetcher) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) - - // Make sure no blocks were retrieved twice - if int(counter) != targetBlocks { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks) - } -} - -// Tests that announcements arriving while a previous is being fetched still -// results in a valid import. -func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) } -func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) } -func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) } - -func testOverlappingAnnouncements(t *testing.T, protocol int) { - // Create a chain of blocks to import - targetBlocks := 4 * hashLimit - hashes, blocks := makeChain(targetBlocks, 0, genesis) - - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - // Iteratively announce blocks, but overlap them continuously - overlap := 16 - imported := make(chan *types.Block, len(hashes)-1) - for i := 0; i < overlap; i++ { - imported <- nil - } - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - select { - case <-imported: - case <-time.After(time.Second): - t.Fatalf("block %d: import timeout", len(hashes)-i) - } - } - // Wait for all the imports to complete and check count - verifyImportCount(t, imported, overlap) -} - -// Tests that announces already being retrieved will not be duplicated. -func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) } -func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) } -func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) } - -func testPendingDeduplication(t *testing.T, protocol int) { - // Create a hash and corresponding block - hashes, blocks := makeChain(1, 0, genesis) - - // Assemble a tester with a built in counter and delayed fetcher - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - delay := 50 * time.Millisecond - counter := uint32(0) - headerWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) - - // Simulate a long running fetch - go func() { - time.Sleep(delay) - headerFetcher(hash) - }() - return nil - } - // Announce the same block many times until it's fetched (wait for any pending ops) - for tester.getBlock(hashes[0]) == nil { - tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher) - time.Sleep(time.Millisecond) - } - time.Sleep(delay) - - // Check that all blocks were imported and none fetched twice - if imported := len(tester.blocks); imported != 2 { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 2) - } - if int(counter) != 1 { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1) - } -} - -// Tests that announcements retrieved in a random order are cached and eventually -// imported when all the gaps are filled in. -func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) } -func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) } -func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) } - -func testRandomArrivalImport(t *testing.T, protocol int) { - // Create a chain of blocks to import, and choose one to delay - targetBlocks := maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - skip := targetBlocks / 2 - - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - // Iteratively announce blocks, skipping one entry - imported := make(chan *types.Block, len(hashes)-1) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - for i := len(hashes) - 1; i >= 0; i-- { - if i != skip { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - time.Sleep(time.Millisecond) - } - } - // Finally announce the skipped entry and check full import - tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - verifyImportCount(t, imported, len(hashes)-1) -} - -// Tests that direct block enqueues (due to block propagation vs. hash announce) -// are correctly schedule, filling and import queue gaps. -func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) } -func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) } -func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) } - -func testQueueGapFill(t *testing.T, protocol int) { - // Create a chain of blocks to import, and choose one to not announce at all - targetBlocks := maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - skip := targetBlocks / 2 - - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - // Iteratively announce blocks, skipping one entry - imported := make(chan *types.Block, len(hashes)-1) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - for i := len(hashes) - 1; i >= 0; i-- { - if i != skip { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - time.Sleep(time.Millisecond) - } - } - // Fill the missing block directly as if propagated - tester.fetcher.Enqueue("valid", blocks[hashes[skip]]) - verifyImportCount(t, imported, len(hashes)-1) -} - -// Tests that blocks arriving from various sources (multiple propagations, hash -// announces, etc) do not get scheduled for import multiple times. -func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) } -func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) } -func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) } - -func testImportDeduplication(t *testing.T, protocol int) { - // Create two blocks to import (one for duplication, the other for stalling) - hashes, blocks := makeChain(2, 0, genesis) - - // Create the tester and wrap the importer with a counter - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - counter := uint32(0) - tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) { - atomic.AddUint32(&counter, uint32(len(blocks))) - return tester.insertChain(blocks) - } - // Instrument the fetching and imported events - fetching := make(chan []common.Hash) - imported := make(chan *types.Block, len(hashes)-1) - tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - // Announce the duplicating block, wait for retrieval, and also propagate directly - tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - <-fetching - - tester.fetcher.Enqueue("valid", blocks[hashes[0]]) - tester.fetcher.Enqueue("valid", blocks[hashes[0]]) - tester.fetcher.Enqueue("valid", blocks[hashes[0]]) - - // Fill the missing block directly as if propagated, and check import uniqueness - tester.fetcher.Enqueue("valid", blocks[hashes[1]]) - verifyImportCount(t, imported, 2) - - if counter != 2 { - t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2) - } -} - -// Tests that blocks with numbers much lower or higher than out current head get -// discarded to prevent wasting resources on useless blocks from faulty peers. -func TestDistantPropagationDiscarding(t *testing.T) { - // Create a long chain to import and define the discard boundaries - hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) - head := hashes[len(hashes)/2] - - low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 - - // Create a tester and simulate a head block being the middle of the above chain - tester := newTester() - - tester.lock.Lock() - tester.hashes = []common.Hash{head} - tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} - tester.lock.Unlock() - - // Ensure that a block with a lower number than the threshold is discarded - tester.fetcher.Enqueue("lower", blocks[hashes[low]]) - time.Sleep(10 * time.Millisecond) - if !tester.fetcher.queue.Empty() { - t.Fatalf("fetcher queued stale block") - } - // Ensure that a block with a higher number than the threshold is discarded - tester.fetcher.Enqueue("higher", blocks[hashes[high]]) - time.Sleep(10 * time.Millisecond) - if !tester.fetcher.queue.Empty() { - t.Fatalf("fetcher queued future block") - } -} - -// Tests that announcements with numbers much lower or higher than out current -// head get discarded to prevent wasting resources on useless blocks from faulty -// peers. -func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) } -func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) } -func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) } - -func testDistantAnnouncementDiscarding(t *testing.T, protocol int) { - // Create a long chain to import and define the discard boundaries - hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) - head := hashes[len(hashes)/2] - - low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 - - // Create a tester and simulate a head block being the middle of the above chain - tester := newTester() - - tester.lock.Lock() - tester.hashes = []common.Hash{head} - tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} - tester.lock.Unlock() - - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - fetching := make(chan struct{}, 2) - tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} } - - // Ensure that a block with a lower number than the threshold is discarded - tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - select { - case <-time.After(50 * time.Millisecond): - case <-fetching: - t.Fatalf("fetcher requested stale header") - } - // Ensure that a block with a higher number than the threshold is discarded - tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - select { - case <-time.After(50 * time.Millisecond): - case <-fetching: - t.Fatalf("fetcher requested future header") - } -} - -// Tests that peers announcing blocks with invalid numbers (i.e. not matching -// the headers provided afterwards) get dropped as malicious. -func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) } -func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) } -func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) } - -func testInvalidNumberAnnouncement(t *testing.T, protocol int) { - // Create a single block to import and check numbers against - hashes, blocks := makeChain(1, 0, genesis) - - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - imported := make(chan *types.Block) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - // Announce a block with a bad number, check for immediate drop - tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - verifyImportEvent(t, imported, false) - - tester.lock.RLock() - dropped := tester.drops["bad"] - tester.lock.RUnlock() - - if !dropped { - t.Fatalf("peer with invalid numbered announcement not dropped") - } - // Make sure a good announcement passes without a drop - tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - verifyImportEvent(t, imported, true) - - tester.lock.RLock() - dropped = tester.drops["good"] - tester.lock.RUnlock() - - if dropped { - t.Fatalf("peer with valid numbered announcement dropped") - } - verifyImportDone(t, imported) -} - -// Tests that if a block is empty (i.e. header only), no body request should be -// made, and instead the header should be assembled into a whole block in itself. -func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) } -func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) } -func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) } - -func testEmptyBlockShortCircuit(t *testing.T, protocol int) { - // Create a chain of blocks to import - hashes, blocks := makeChain(32, 0, genesis) - - tester := newTester() - headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher(blocks, 0) - - // Add a monitoring hook for all internal events - fetching := make(chan []common.Hash) - tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } - - completing := make(chan []common.Hash) - tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes } - - imported := make(chan *types.Block) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - - // Iteratively announce blocks until all are imported - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - - // All announces should fetch the header - verifyFetchingEvent(t, fetching, true) - - // Only blocks with data contents should request bodies - verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0) - - // Irrelevant of the construct, import should succeed - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) -} - -// Tests that a peer is unable to use unbounded memory with sending infinite -// block announcements to a node, but that even in the face of such an attack, -// the fetcher remains operational. -func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) } -func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) } -func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) } - -func testHashMemoryExhaustionAttack(t *testing.T, protocol int) { - // Create a tester with instrumented import hooks - tester := newTester() - - imported, announces := make(chan *types.Block), int32(0) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { - if added { - atomic.AddInt32(&announces, 1) - } else { - atomic.AddInt32(&announces, -1) - } - } - // Create a valid chain and an infinite junk chain - targetBlocks := hashLimit + 2*maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - validHeaderFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) - validBodyFetcher := tester.makeBodyFetcher(blocks, 0) - - attack, _ := makeChain(targetBlocks, 0, unknownBlock) - attackerHeaderFetcher := tester.makeHeaderFetcher(nil, -gatherSlack) - attackerBodyFetcher := tester.makeBodyFetcher(nil, 0) - - // Feed the tester a huge hashset from the attacker, and a limited from the valid peer - for i := 0; i < len(attack); i++ { - if i < maxQueueDist { - tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher) - } - tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher) - } - if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist { - t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist) - } - // Wait for fetches to complete - verifyImportCount(t, imported, maxQueueDist) - - // Feed the remaining valid hashes to ensure DOS protection state remains clean - for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) -} - -// Tests that blocks sent to the fetcher (either through propagation or via hash -// announces and retrievals) don't pile up indefinitely, exhausting available -// system memory. -func TestBlockMemoryExhaustionAttack(t *testing.T) { - // Create a tester with instrumented import hooks - tester := newTester() - - imported, enqueued := make(chan *types.Block), int32(0) - tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { - if added { - atomic.AddInt32(&enqueued, 1) - } else { - atomic.AddInt32(&enqueued, -1) - } - } - // Create a valid chain and a batch of dangling (but in range) blocks - targetBlocks := hashLimit + 2*maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - attack := make(map[common.Hash]*types.Block) - for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ { - hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock) - for _, hash := range hashes[:maxQueueDist-2] { - attack[hash] = blocks[hash] - } - } - // Try to feed all the attacker blocks make sure only a limited batch is accepted - for _, block := range attack { - tester.fetcher.Enqueue("attacker", block) - } - time.Sleep(200 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit { - t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit) - } - // Queue up a batch of valid blocks, and check that a new peer is allowed to do so - for i := 0; i < maxQueueDist-1; i++ { - tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]]) - } - time.Sleep(100 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 { - t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1) - } - // Insert the missing piece (and sanity check the import) - tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]]) - verifyImportCount(t, imported, maxQueueDist) - - // Insert the remaining blocks in chunks to ensure clean DOS protection - for i := maxQueueDist; i < len(hashes)-1; i++ { - tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]]) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) -} diff --git a/exp/fetcher/metrics.go b/exp/fetcher/metrics.go deleted file mode 100644 index f521e4f9bbe8c..0000000000000 --- a/exp/fetcher/metrics.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the metrics collected by the fetcher. - -package fetcher - -import ( - "github.com/expanse-org/go-expanse/metrics" -) - -var ( - propAnnounceInMeter = metrics.NewMeter("eth/fetcher/prop/announces/in") - propAnnounceOutTimer = metrics.NewTimer("eth/fetcher/prop/announces/out") - propAnnounceDropMeter = metrics.NewMeter("eth/fetcher/prop/announces/drop") - propAnnounceDOSMeter = metrics.NewMeter("eth/fetcher/prop/announces/dos") - - propBroadcastInMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/in") - propBroadcastOutTimer = metrics.NewTimer("eth/fetcher/prop/broadcasts/out") - propBroadcastDropMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/drop") - propBroadcastDOSMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/dos") - - headerFetchMeter = metrics.NewMeter("eth/fetcher/fetch/headers") - bodyFetchMeter = metrics.NewMeter("eth/fetcher/fetch/bodies") - - headerFilterInMeter = metrics.NewMeter("eth/fetcher/filter/headers/in") - headerFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/headers/out") - bodyFilterInMeter = metrics.NewMeter("eth/fetcher/filter/bodies/in") - bodyFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/bodies/out") -) diff --git a/exp/filters/api.go b/exp/filters/api.go deleted file mode 100644 index 65691fb412783..0000000000000 --- a/exp/filters/api.go +++ /dev/null @@ -1,668 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package filters - -import ( - "crypto/rand" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "sync" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/rpc" - - "golang.org/x/net/context" -) - -var ( - filterTickerTime = 5 * time.Minute -) - -// byte will be inferred -const ( - unknownFilterTy = iota - blockFilterTy - transactionFilterTy - logFilterTy -) - -// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various -// information related to the Expanse protocol such als blocks, transactions and logs. -type PublicFilterAPI struct { - mux *event.TypeMux - - quit chan struct{} - chainDb ethdb.Database - - filterManager *FilterSystem - - filterMapMu sync.RWMutex - filterMapping map[string]int // maps between filter internal filter identifiers and external filter identifiers - - logMu sync.RWMutex - logQueue map[int]*logQueue - - blockMu sync.RWMutex - blockQueue map[int]*hashQueue - - transactionMu sync.RWMutex - transactionQueue map[int]*hashQueue -} - -// NewPublicFilterAPI returns a new PublicFilterAPI instance. -func NewPublicFilterAPI(chainDb ethdb.Database, mux *event.TypeMux) *PublicFilterAPI { - svc := &PublicFilterAPI{ - mux: mux, - chainDb: chainDb, - filterManager: NewFilterSystem(mux), - filterMapping: make(map[string]int), - logQueue: make(map[int]*logQueue), - blockQueue: make(map[int]*hashQueue), - transactionQueue: make(map[int]*hashQueue), - } - go svc.start() - return svc -} - -// Stop quits the work loop. -func (s *PublicFilterAPI) Stop() { - close(s.quit) -} - -// start the work loop, wait and process events. -func (s *PublicFilterAPI) start() { - timer := time.NewTicker(2 * time.Second) - defer timer.Stop() -done: - for { - select { - case <-timer.C: - s.filterManager.Lock() // lock order like filterLoop() - s.logMu.Lock() - for id, filter := range s.logQueue { - if time.Since(filter.timeout) > filterTickerTime { - s.filterManager.Remove(id) - delete(s.logQueue, id) - } - } - s.logMu.Unlock() - - s.blockMu.Lock() - for id, filter := range s.blockQueue { - if time.Since(filter.timeout) > filterTickerTime { - s.filterManager.Remove(id) - delete(s.blockQueue, id) - } - } - s.blockMu.Unlock() - - s.transactionMu.Lock() - for id, filter := range s.transactionQueue { - if time.Since(filter.timeout) > filterTickerTime { - s.filterManager.Remove(id) - delete(s.transactionQueue, id) - } - } - s.transactionMu.Unlock() - s.filterManager.Unlock() - case <-s.quit: - break done - } - } - -} - -// NewBlockFilter create a new filter that returns blocks that are included into the canonical chain. -func (s *PublicFilterAPI) NewBlockFilter() (string, error) { - // protect filterManager.Add() and setting of filter fields - s.filterManager.Lock() - defer s.filterManager.Unlock() - - externalId, err := newFilterId() - if err != nil { - return "", err - } - - filter := New(s.chainDb) - id, err := s.filterManager.Add(filter, ChainFilter) - if err != nil { - return "", err - } - - s.blockMu.Lock() - s.blockQueue[id] = &hashQueue{timeout: time.Now()} - s.blockMu.Unlock() - - filter.BlockCallback = func(block *types.Block, logs vm.Logs) { - s.blockMu.Lock() - defer s.blockMu.Unlock() - - if queue := s.blockQueue[id]; queue != nil { - queue.add(block.Hash()) - } - } - - s.filterMapMu.Lock() - s.filterMapping[externalId] = id - s.filterMapMu.Unlock() - - return externalId, nil -} - -// NewPendingTransactionFilter creates a filter that returns new pending transactions. -func (s *PublicFilterAPI) NewPendingTransactionFilter() (string, error) { - // protect filterManager.Add() and setting of filter fields - s.filterManager.Lock() - defer s.filterManager.Unlock() - - externalId, err := newFilterId() - if err != nil { - return "", err - } - - filter := New(s.chainDb) - id, err := s.filterManager.Add(filter, PendingTxFilter) - if err != nil { - return "", err - } - - s.transactionMu.Lock() - s.transactionQueue[id] = &hashQueue{timeout: time.Now()} - s.transactionMu.Unlock() - - filter.TransactionCallback = func(tx *types.Transaction) { - s.transactionMu.Lock() - defer s.transactionMu.Unlock() - - if queue := s.transactionQueue[id]; queue != nil { - queue.add(tx.Hash()) - } - } - - s.filterMapMu.Lock() - s.filterMapping[externalId] = id - s.filterMapMu.Unlock() - - return externalId, nil -} - -// newLogFilter creates a new log filter. -func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []common.Address, topics [][]common.Hash, callback func(log *vm.Log, removed bool)) (int, error) { - // protect filterManager.Add() and setting of filter fields - s.filterManager.Lock() - defer s.filterManager.Unlock() - - filter := New(s.chainDb) - id, err := s.filterManager.Add(filter, LogFilter) - if err != nil { - return 0, err - } - - s.logMu.Lock() - s.logQueue[id] = &logQueue{timeout: time.Now()} - s.logMu.Unlock() - - filter.SetBeginBlock(earliest) - filter.SetEndBlock(latest) - filter.SetAddresses(addresses) - filter.SetTopics(topics) - filter.LogCallback = func(log *vm.Log, removed bool) { - if callback != nil { - callback(log, removed) - } else { - s.logMu.Lock() - defer s.logMu.Unlock() - if queue := s.logQueue[id]; queue != nil { - queue.add(vmlog{log, removed}) - } - } - } - - return id, nil -} - -// Logs creates a subscription that fires for all new log that match the given filter criteria. -func (s *PublicFilterAPI) Logs(ctx context.Context, args NewFilterArgs) (rpc.Subscription, error) { - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return nil, rpc.ErrNotificationsUnsupported - } - - var ( - externalId string - subscription rpc.Subscription - err error - ) - - if externalId, err = newFilterId(); err != nil { - return nil, err - } - - // uninstall filter when subscription is unsubscribed/cancelled - if subscription, err = notifier.NewSubscription(func(string) { - s.UninstallFilter(externalId) - }); err != nil { - return nil, err - } - - notifySubscriber := func(log *vm.Log, removed bool) { - rpcLog := toRPCLogs(vm.Logs{log}, removed) - if err := subscription.Notify(rpcLog); err != nil { - subscription.Cancel() - } - } - - // from and to block number are not used since subscriptions don't allow you to travel to "time" - var id int - if len(args.Addresses) > 0 { - id, err = s.newLogFilter(-1, -1, args.Addresses, args.Topics, notifySubscriber) - } else { - id, err = s.newLogFilter(-1, -1, nil, args.Topics, notifySubscriber) - } - - if err != nil { - subscription.Cancel() - return nil, err - } - - s.filterMapMu.Lock() - s.filterMapping[externalId] = id - s.filterMapMu.Unlock() - - return subscription, err -} - -// NewFilterArgs represents a request to create a new filter. -type NewFilterArgs struct { - FromBlock rpc.BlockNumber - ToBlock rpc.BlockNumber - Addresses []common.Address - Topics [][]common.Hash -} - -// UnmarshalJSON sets *args fields with given data. -func (args *NewFilterArgs) UnmarshalJSON(data []byte) error { - type input struct { - From *rpc.BlockNumber `json:"fromBlock"` - ToBlock *rpc.BlockNumber `json:"toBlock"` - Addresses interface{} `json:"address"` - Topics []interface{} `json:"topics"` - } - - var raw input - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - if raw.From == nil || raw.From.Int64() < 0 { - args.FromBlock = rpc.LatestBlockNumber - } else { - args.FromBlock = *raw.From - } - - if raw.ToBlock == nil || raw.ToBlock.Int64() < 0 { - args.ToBlock = rpc.LatestBlockNumber - } else { - args.ToBlock = *raw.ToBlock - } - - args.Addresses = []common.Address{} - - if raw.Addresses != nil { - // raw.Address can contain a single address or an array of addresses - var addresses []common.Address - if strAddrs, ok := raw.Addresses.([]interface{}); ok { - for i, addr := range strAddrs { - if strAddr, ok := addr.(string); ok { - if len(strAddr) >= 2 && strAddr[0] == '0' && (strAddr[1] == 'x' || strAddr[1] == 'X') { - strAddr = strAddr[2:] - } - if decAddr, err := hex.DecodeString(strAddr); err == nil { - addresses = append(addresses, common.BytesToAddress(decAddr)) - } else { - return fmt.Errorf("invalid address given") - } - } else { - return fmt.Errorf("invalid address on index %d", i) - } - } - } else if singleAddr, ok := raw.Addresses.(string); ok { - if len(singleAddr) >= 2 && singleAddr[0] == '0' && (singleAddr[1] == 'x' || singleAddr[1] == 'X') { - singleAddr = singleAddr[2:] - } - if decAddr, err := hex.DecodeString(singleAddr); err == nil { - addresses = append(addresses, common.BytesToAddress(decAddr)) - } else { - return fmt.Errorf("invalid address given") - } - } else { - return errors.New("invalid address(es) given") - } - args.Addresses = addresses - } - - // helper function which parses a string to a topic hash - topicConverter := func(raw string) (common.Hash, error) { - if len(raw) == 0 { - return common.Hash{}, nil - } - if len(raw) >= 2 && raw[0] == '0' && (raw[1] == 'x' || raw[1] == 'X') { - raw = raw[2:] - } - if len(raw) != 2 * common.HashLength { - return common.Hash{}, errors.New("invalid topic(s)") - } - if decAddr, err := hex.DecodeString(raw); err == nil { - return common.BytesToHash(decAddr), nil - } - return common.Hash{}, errors.New("invalid topic(s)") - } - - // topics is an array consisting of strings and/or arrays of strings. - // JSON null values are converted to common.Hash{} and ignored by the filter manager. - if len(raw.Topics) > 0 { - args.Topics = make([][]common.Hash, len(raw.Topics)) - for i, t := range raw.Topics { - if t == nil { // ignore topic when matching logs - args.Topics[i] = []common.Hash{common.Hash{}} - } else if topic, ok := t.(string); ok { // match specific topic - top, err := topicConverter(topic) - if err != nil { - return err - } - args.Topics[i] = []common.Hash{top} - } else if topics, ok := t.([]interface{}); ok { // or case e.g. [null, "topic0", "topic1"] - for _, rawTopic := range topics { - if rawTopic == nil { - args.Topics[i] = append(args.Topics[i], common.Hash{}) - } else if topic, ok := rawTopic.(string); ok { - parsed, err := topicConverter(topic) - if err != nil { - return err - } - args.Topics[i] = append(args.Topics[i], parsed) - } else { - return fmt.Errorf("invalid topic(s)") - } - } - } else { - return fmt.Errorf("invalid topic(s)") - } - } - } - - return nil -} - -// NewFilter creates a new filter and returns the filter id. It can be uses to retrieve logs. -func (s *PublicFilterAPI) NewFilter(args NewFilterArgs) (string, error) { - externalId, err := newFilterId() - if err != nil { - return "", err - } - - var id int - if len(args.Addresses) > 0 { - id, err = s.newLogFilter(args.FromBlock.Int64(), args.ToBlock.Int64(), args.Addresses, args.Topics, nil) - } else { - id, err = s.newLogFilter(args.FromBlock.Int64(), args.ToBlock.Int64(), nil, args.Topics, nil) - } - if err != nil { - return "", err - } - - s.filterMapMu.Lock() - s.filterMapping[externalId] = id - s.filterMapMu.Unlock() - - return externalId, nil -} - -// GetLogs returns the logs matching the given argument. -func (s *PublicFilterAPI) GetLogs(args NewFilterArgs) []vmlog { - filter := New(s.chainDb) - filter.SetBeginBlock(args.FromBlock.Int64()) - filter.SetEndBlock(args.ToBlock.Int64()) - filter.SetAddresses(args.Addresses) - filter.SetTopics(args.Topics) - - return toRPCLogs(filter.Find(), false) -} - -// UninstallFilter removes the filter with the given filter id. -func (s *PublicFilterAPI) UninstallFilter(filterId string) bool { - s.filterManager.Lock() - defer s.filterManager.Unlock() - - s.filterMapMu.Lock() - id, ok := s.filterMapping[filterId] - if !ok { - s.filterMapMu.Unlock() - return false - } - delete(s.filterMapping, filterId) - s.filterMapMu.Unlock() - - s.filterManager.Remove(id) - - s.logMu.Lock() - if _, ok := s.logQueue[id]; ok { - delete(s.logQueue, id) - s.logMu.Unlock() - return true - } - s.logMu.Unlock() - - s.blockMu.Lock() - if _, ok := s.blockQueue[id]; ok { - delete(s.blockQueue, id) - s.blockMu.Unlock() - return true - } - s.blockMu.Unlock() - - s.transactionMu.Lock() - if _, ok := s.transactionQueue[id]; ok { - delete(s.transactionQueue, id) - s.transactionMu.Unlock() - return true - } - s.transactionMu.Unlock() - - return false -} - -// getFilterType is a helper utility that determine the type of filter for the given filter id. -func (s *PublicFilterAPI) getFilterType(id int) byte { - if _, ok := s.blockQueue[id]; ok { - return blockFilterTy - } else if _, ok := s.transactionQueue[id]; ok { - return transactionFilterTy - } else if _, ok := s.logQueue[id]; ok { - return logFilterTy - } - - return unknownFilterTy -} - -// blockFilterChanged returns a collection of block hashes for the block filter with the given id. -func (s *PublicFilterAPI) blockFilterChanged(id int) []common.Hash { - s.blockMu.Lock() - defer s.blockMu.Unlock() - - if s.blockQueue[id] != nil { - return s.blockQueue[id].get() - } - return nil -} - -// transactionFilterChanged returns a collection of transaction hashes for the pending -// transaction filter with the given id. -func (s *PublicFilterAPI) transactionFilterChanged(id int) []common.Hash { - s.blockMu.Lock() - defer s.blockMu.Unlock() - - if s.transactionQueue[id] != nil { - return s.transactionQueue[id].get() - } - return nil -} - -// logFilterChanged returns a collection of logs for the log filter with the given id. -func (s *PublicFilterAPI) logFilterChanged(id int) []vmlog { - s.logMu.Lock() - defer s.logMu.Unlock() - - if s.logQueue[id] != nil { - return s.logQueue[id].get() - } - return nil -} - -// GetFilterLogs returns the logs for the filter with the given id. -func (s *PublicFilterAPI) GetFilterLogs(filterId string) []vmlog { - s.filterMapMu.RLock() - id, ok := s.filterMapping[filterId] - s.filterMapMu.RUnlock() - if !ok { - return toRPCLogs(nil, false) - } - - if filter := s.filterManager.Get(id); filter != nil { - return toRPCLogs(filter.Find(), false) - } - - return toRPCLogs(nil, false) -} - -// GetFilterChanges returns the logs for the filter with the given id since last time is was called. -// This can be used for polling. -func (s *PublicFilterAPI) GetFilterChanges(filterId string) interface{} { - s.filterMapMu.RLock() - id, ok := s.filterMapping[filterId] - s.filterMapMu.RUnlock() - - if !ok { // filter not found - return []interface{}{} - } - - switch s.getFilterType(id) { - case blockFilterTy: - return returnHashes(s.blockFilterChanged(id)) - case transactionFilterTy: - return returnHashes(s.transactionFilterChanged(id)) - case logFilterTy: - return s.logFilterChanged(id) - } - - return []interface{}{} -} - -type vmlog struct { - *vm.Log - Removed bool `json:"removed"` -} - -type logQueue struct { - mu sync.Mutex - - logs []vmlog - timeout time.Time - id int -} - -func (l *logQueue) add(logs ...vmlog) { - l.mu.Lock() - defer l.mu.Unlock() - - l.logs = append(l.logs, logs...) -} - -func (l *logQueue) get() []vmlog { - l.mu.Lock() - defer l.mu.Unlock() - - l.timeout = time.Now() - tmp := l.logs - l.logs = nil - return tmp -} - -type hashQueue struct { - mu sync.Mutex - - hashes []common.Hash - timeout time.Time - id int -} - -func (l *hashQueue) add(hashes ...common.Hash) { - l.mu.Lock() - defer l.mu.Unlock() - - l.hashes = append(l.hashes, hashes...) -} - -func (l *hashQueue) get() []common.Hash { - l.mu.Lock() - defer l.mu.Unlock() - - l.timeout = time.Now() - tmp := l.hashes - l.hashes = nil - return tmp -} - -// newFilterId generates a new random filter identifier that can be exposed to the outer world. By publishing random -// identifiers it is not feasible for DApp's to guess filter id's for other DApp's and uninstall or poll for them -// causing the affected DApp to miss data. -func newFilterId() (string, error) { - var subid [16]byte - n, _ := rand.Read(subid[:]) - if n != 16 { - return "", errors.New("Unable to generate filter id") - } - return "0x" + hex.EncodeToString(subid[:]), nil -} - -// toRPCLogs is a helper that will convert a vm.Logs array to an structure which -// can hold additional information about the logs such as whether it was deleted. -// Additionally when nil is given it will by default instead create an empty slice -// instead. This is required by the RPC specification. -func toRPCLogs(logs vm.Logs, removed bool) []vmlog { - convertedLogs := make([]vmlog, len(logs)) - for i, log := range logs { - convertedLogs[i] = vmlog{Log: log, Removed: removed} - } - return convertedLogs -} - -// returnHashes is a helper that will return an empty hash array case the given hash array is nil, otherwise is will -// return the given hashes. The RPC interfaces defines that always an array is returned. -func returnHashes(hashes []common.Hash) []common.Hash { - if hashes == nil { - return []common.Hash{} - } - return hashes -} diff --git a/exp/filters/api_test.go b/exp/filters/api_test.go deleted file mode 100644 index 0dcec62dc3653..0000000000000 --- a/exp/filters/api_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package filters_test - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/exp/filters" - "github.com/expanse-org/go-expanse/rpc" -) - -func TestUnmarshalJSONNewFilterArgs(t *testing.T) { - var ( - fromBlock rpc.BlockNumber = 0x123435 - toBlock rpc.BlockNumber = 0xabcdef - address0 = common.StringToAddress("70c87d191324e6712a591f304b4eedef6ad9bb9d") - address1 = common.StringToAddress("9b2055d370f73ec7d8a03e965129118dc8f5bf83") - topic0 = common.HexToHash("3ac225168df54212a25c1c01fd35bebfea408fdac2e31ddd6f80a4bbf9a5f1ca") - topic1 = common.HexToHash("9084a792d2f8b16a62b882fd56f7860c07bf5fa91dd8a2ae7e809e5180fef0b3") - topic2 = common.HexToHash("6ccae1c4af4152f460ff510e573399795dfab5dcf1fa60d1f33ac8fdc1e480ce") - nullTopic = common.Hash{} - ) - - // default values - var test0 filters.NewFilterArgs - if err := json.Unmarshal([]byte("{}"), &test0); err != nil { - t.Fatal(err) - } - if test0.FromBlock != rpc.LatestBlockNumber { - t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.FromBlock) - } - if test0.ToBlock != rpc.LatestBlockNumber { - t.Fatalf("expected %d, got %d", rpc.LatestBlockNumber, test0.ToBlock) - } - if len(test0.Addresses) != 0 { - t.Fatalf("expected 0 addresses, got %d", len(test0.Addresses)) - } - if len(test0.Topics) != 0 { - t.Fatalf("expected 0 topics, got %d topics", len(test0.Topics)) - } - - // from, to block number - var test1 filters.NewFilterArgs - vector := fmt.Sprintf(`{"fromBlock":"0x%x","toBlock":"0x%x"}`, fromBlock, toBlock) - if err := json.Unmarshal([]byte(vector), &test1); err != nil { - t.Fatal(err) - } - if test1.FromBlock != fromBlock { - t.Fatalf("expected FromBlock %d, got %d", fromBlock, test1.FromBlock) - } - if test1.ToBlock != toBlock { - t.Fatalf("expected ToBlock %d, got %d", toBlock, test1.ToBlock) - } - - // single address - var test2 filters.NewFilterArgs - vector = fmt.Sprintf(`{"address": "%s"}`, address0.Hex()) - if err := json.Unmarshal([]byte(vector), &test2); err != nil { - t.Fatal(err) - } - if len(test2.Addresses) != 1 { - t.Fatalf("expected 1 address, got %d address(es)", len(test2.Addresses)) - } - if test2.Addresses[0] != address0 { - t.Fatalf("expected address %x, got %x", address0, test2.Addresses[0]) - } - - // multiple address - var test3 filters.NewFilterArgs - vector = fmt.Sprintf(`{"address": ["%s", "%s"]}`, address0.Hex(), address1.Hex()) - if err := json.Unmarshal([]byte(vector), &test3); err != nil { - t.Fatal(err) - } - if len(test3.Addresses) != 2 { - t.Fatalf("expected 2 addresses, got %d address(es)", len(test3.Addresses)) - } - if test3.Addresses[0] != address0 { - t.Fatalf("expected address %x, got %x", address0, test3.Addresses[0]) - } - if test3.Addresses[1] != address1 { - t.Fatalf("expected address %x, got %x", address1, test3.Addresses[1]) - } - - // single topic - var test4 filters.NewFilterArgs - vector = fmt.Sprintf(`{"topics": ["%s"]}`, topic0.Hex()) - if err := json.Unmarshal([]byte(vector), &test4); err != nil { - t.Fatal(err) - } - if len(test4.Topics) != 1 { - t.Fatalf("expected 1 topic, got %d", len(test4.Topics)) - } - if len(test4.Topics[0]) != 1 { - t.Fatalf("expected len(topics[0]) to be 1, got %d", len(test4.Topics[0])) - } - if test4.Topics[0][0] != topic0 { - t.Fatalf("got %x, expected %x", test4.Topics[0][0], topic0) - } - - // test multiple "AND" topics - var test5 filters.NewFilterArgs - vector = fmt.Sprintf(`{"topics": ["%s", "%s"]}`, topic0.Hex(), topic1.Hex()) - if err := json.Unmarshal([]byte(vector), &test5); err != nil { - t.Fatal(err) - } - if len(test5.Topics) != 2 { - t.Fatalf("expected 2 topics, got %d", len(test5.Topics)) - } - if len(test5.Topics[0]) != 1 { - t.Fatalf("expected 1 topic, got %d", len(test5.Topics[0])) - } - if test5.Topics[0][0] != topic0 { - t.Fatalf("got %x, expected %x", test5.Topics[0][0], topic0) - } - if len(test5.Topics[1]) != 1 { - t.Fatalf("expected 1 topic, got %d", len(test5.Topics[1])) - } - if test5.Topics[1][0] != topic1 { - t.Fatalf("got %x, expected %x", test5.Topics[1][0], topic1) - } - - // test optional topic - var test6 filters.NewFilterArgs - vector = fmt.Sprintf(`{"topics": ["%s", null, "%s"]}`, topic0.Hex(), topic2.Hex()) - if err := json.Unmarshal([]byte(vector), &test6); err != nil { - t.Fatal(err) - } - if len(test6.Topics) != 3 { - t.Fatalf("expected 3 topics, got %d", len(test6.Topics)) - } - if len(test6.Topics[0]) != 1 { - t.Fatalf("expected 1 topic, got %d", len(test6.Topics[0])) - } - if test6.Topics[0][0] != topic0 { - t.Fatalf("got %x, expected %x", test6.Topics[0][0], topic0) - } - if len(test6.Topics[1]) != 1 { - t.Fatalf("expected 1 topic, got %d", len(test6.Topics[1])) - } - if test6.Topics[1][0] != nullTopic { - t.Fatalf("got %x, expected empty hash", test6.Topics[1][0]) - } - if len(test6.Topics[2]) != 1 { - t.Fatalf("expected 1 topic, got %d", len(test6.Topics[2])) - } - if test6.Topics[2][0] != topic2 { - t.Fatalf("got %x, expected %x", test6.Topics[2][0], topic2) - } - - // test OR topics - var test7 filters.NewFilterArgs - vector = fmt.Sprintf(`{"topics": [["%s", "%s"], null, ["%s", null]]}`, topic0.Hex(), topic1.Hex(), topic2.Hex()) - if err := json.Unmarshal([]byte(vector), &test7); err != nil { - t.Fatal(err) - } - if len(test7.Topics) != 3 { - t.Fatalf("expected 3 topics, got %d topics", len(test7.Topics)) - } - if len(test7.Topics[0]) != 2 { - t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[0])) - } - if test7.Topics[0][0] != topic0 || test7.Topics[0][1] != topic1 { - t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]", - topic0, topic1, test7.Topics[0][0], test7.Topics[0][1], - ) - } - if len(test7.Topics[1]) != 1 { - t.Fatalf("expected 1 topic, got %d topics", len(test7.Topics[1])) - } - if test7.Topics[1][0] != nullTopic { - t.Fatalf("expected empty hash, got %x", test7.Topics[1][0]) - } - if len(test7.Topics[2]) != 2 { - t.Fatalf("expected 2 topics, got %d topics", len(test7.Topics[2])) - } - if test7.Topics[2][0] != topic2 || test7.Topics[2][1] != nullTopic { - t.Fatalf("invalid topics expected [%x,%x], got [%x,%x]", - topic2, nullTopic, test7.Topics[2][0], test7.Topics[2][1], - ) - } -} diff --git a/exp/filters/filter.go b/exp/filters/filter.go deleted file mode 100644 index f9f018c4fa087..0000000000000 --- a/exp/filters/filter.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package filters - -import ( - "math" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/ethdb" -) - -type AccountChange struct { - Address, StateAddress []byte -} - -// Filtering interface -type Filter struct { - created time.Time - - db ethdb.Database - begin, end int64 - addresses []common.Address - topics [][]common.Hash - - BlockCallback func(*types.Block, vm.Logs) - TransactionCallback func(*types.Transaction) - LogCallback func(*vm.Log, bool) -} - -// Create a new filter which uses a bloom filter on blocks to figure out whether a particular block -// is interesting or not. -func New(db ethdb.Database) *Filter { - return &Filter{db: db} -} - -// Set the earliest and latest block for filtering. -// -1 = latest block (i.e., the current block) -// hash = particular hash from-to -func (self *Filter) SetBeginBlock(begin int64) { - self.begin = begin -} - -func (self *Filter) SetEndBlock(end int64) { - self.end = end -} - -func (self *Filter) SetAddresses(addr []common.Address) { - self.addresses = addr -} - -func (self *Filter) SetTopics(topics [][]common.Hash) { - self.topics = topics -} - -// Run filters logs with the current parameters set -func (self *Filter) Find() vm.Logs { - latestHash := core.GetHeadBlockHash(self.db) - latestBlock := core.GetBlock(self.db, latestHash) - if latestBlock == nil { - return vm.Logs{} - } - var beginBlockNo uint64 = uint64(self.begin) - if self.begin == -1 { - beginBlockNo = latestBlock.NumberU64() - } - var endBlockNo uint64 = uint64(self.end) - if self.end == -1 { - endBlockNo = latestBlock.NumberU64() - } - - // if no addresses are present we can't make use of fast search which - // uses the mipmap bloom filters to check for fast inclusion and uses - // higher range probability in order to ensure at least a false positive - if len(self.addresses) == 0 { - return self.getLogs(beginBlockNo, endBlockNo) - } - return self.mipFind(beginBlockNo, endBlockNo, 0) -} - -func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) { - level := core.MIPMapLevels[depth] - // normalise numerator so we can work in level specific batches and - // work with the proper range checks - for num := start / level * level; num <= end; num += level { - // find addresses in bloom filters - bloom := core.GetMipmapBloom(self.db, num, level) - for _, addr := range self.addresses { - if bloom.TestBytes(addr[:]) { - // range check normalised values and make sure that - // we're resolving the correct range instead of the - // normalised values. - start := uint64(math.Max(float64(num), float64(start))) - end := uint64(math.Min(float64(num+level-1), float64(end))) - if depth+1 == len(core.MIPMapLevels) { - logs = append(logs, self.getLogs(start, end)...) - } else { - logs = append(logs, self.mipFind(start, end, depth+1)...) - } - // break so we don't check the same range for each - // possible address. Checks on multiple addresses - // are handled further down the stack. - break - } - } - } - - return logs -} - -func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) { - for i := start; i <= end; i++ { - var block *types.Block - hash := core.GetCanonicalHash(self.db, i) - if hash != (common.Hash{}) { - block = core.GetBlock(self.db, hash) - } - if block == nil { // block not found/written - return logs - } - - // Use bloom filtering to see if this block is interesting given the - // current parameters - if self.bloomFilter(block) { - // Get the logs of the block - var ( - receipts = core.GetBlockReceipts(self.db, block.Hash()) - unfiltered vm.Logs - ) - for _, receipt := range receipts { - unfiltered = append(unfiltered, receipt.Logs...) - } - logs = append(logs, self.FilterLogs(unfiltered)...) - } - } - - return logs -} - -func includes(addresses []common.Address, a common.Address) bool { - for _, addr := range addresses { - if addr == a { - return true - } - } - - return false -} - -func (self *Filter) FilterLogs(logs vm.Logs) vm.Logs { - var ret vm.Logs - - // Filter the logs for interesting stuff -Logs: - for _, log := range logs { - if len(self.addresses) > 0 && !includes(self.addresses, log.Address) { - continue - } - - logTopics := make([]common.Hash, len(self.topics)) - copy(logTopics, log.Topics) - - // If the to filtered topics is greater than the amount of topics in - // logs, skip. - if len(self.topics) > len(log.Topics) { - continue Logs - } - - for i, topics := range self.topics { - var match bool - for _, topic := range topics { - // common.Hash{} is a match all (wildcard) - if (topic == common.Hash{}) || log.Topics[i] == topic { - match = true - break - } - } - - if !match { - continue Logs - } - - } - - ret = append(ret, log) - } - - return ret -} - -func (self *Filter) bloomFilter(block *types.Block) bool { - if len(self.addresses) > 0 { - var included bool - for _, addr := range self.addresses { - if types.BloomLookup(block.Bloom(), addr) { - included = true - break - } - } - - if !included { - return false - } - } - - for _, sub := range self.topics { - var included bool - for _, topic := range sub { - if (topic == common.Hash{}) || types.BloomLookup(block.Bloom(), topic) { - included = true - break - } - } - if !included { - return false - } - } - - return true -} diff --git a/exp/filters/filter_system.go b/exp/filters/filter_system.go deleted file mode 100644 index 33c9e9e9abbde..0000000000000 --- a/exp/filters/filter_system.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// package filters implements an expanse filtering system for block, -// transactions and log events. -package filters - -import ( - "fmt" - "sync" - "time" - - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/event" -) - -// FilterType determines the type of filter and is used to put the filter in to -// the correct bucket when added. -type FilterType byte - -const ( - ChainFilter FilterType = iota // new block events filter - PendingTxFilter // pending transaction filter - LogFilter // new or removed log filter - PendingLogFilter // pending log filter -) - -// FilterSystem manages filters that filter specific events such as -// block, transaction and log events. The Filtering system can be used to listen -// for specific LOG events fired by the EVM (Ethereum Virtual Machine). -type FilterSystem struct { - filterMu sync.RWMutex - filterId int - - chainFilters map[int]*Filter - pendingTxFilters map[int]*Filter - logFilters map[int]*Filter - pendingLogFilters map[int]*Filter - - // generic is an ugly hack for Get - generic map[int]*Filter - - sub event.Subscription -} - -// NewFilterSystem returns a newly allocated filter manager -func NewFilterSystem(mux *event.TypeMux) *FilterSystem { - fs := &FilterSystem{ - chainFilters: make(map[int]*Filter), - pendingTxFilters: make(map[int]*Filter), - logFilters: make(map[int]*Filter), - pendingLogFilters: make(map[int]*Filter), - generic: make(map[int]*Filter), - } - fs.sub = mux.Subscribe( - core.PendingLogsEvent{}, - core.RemovedLogsEvent{}, - core.ChainEvent{}, - core.TxPreEvent{}, - vm.Logs(nil), - ) - go fs.filterLoop() - return fs -} - -// Stop quits the filter loop required for polling events -func (fs *FilterSystem) Stop() { - fs.sub.Unsubscribe() -} - -// Acquire filter system maps lock, required to force lock acquisition -// sequence with filterMu acquired first to avoid deadlocks by callbacks -func (fs *FilterSystem) Lock() { - fs.filterMu.Lock() -} - -// Release filter system maps lock -func (fs *FilterSystem) Unlock() { - fs.filterMu.Unlock() -} - -// Add adds a filter to the filter manager -// Expects filterMu to be locked. -func (fs *FilterSystem) Add(filter *Filter, filterType FilterType) (int, error) { - id := fs.filterId - filter.created = time.Now() - - switch filterType { - case ChainFilter: - fs.chainFilters[id] = filter - case PendingTxFilter: - fs.pendingTxFilters[id] = filter - case LogFilter: - fs.logFilters[id] = filter - case PendingLogFilter: - fs.pendingLogFilters[id] = filter - default: - return 0, fmt.Errorf("unknown filter type %v", filterType) - } - fs.generic[id] = filter - - fs.filterId++ - - return id, nil -} - -// Remove removes a filter by filter id -// Expects filterMu to be locked. -func (fs *FilterSystem) Remove(id int) { - delete(fs.chainFilters, id) - delete(fs.pendingTxFilters, id) - delete(fs.logFilters, id) - delete(fs.pendingLogFilters, id) - delete(fs.generic, id) -} - -func (fs *FilterSystem) Get(id int) *Filter { - fs.filterMu.RLock() - defer fs.filterMu.RUnlock() - - return fs.generic[id] -} - -// filterLoop waits for specific events from expanse and fires their handlers -// when the filter matches the requirements. -func (fs *FilterSystem) filterLoop() { - for event := range fs.sub.Chan() { - switch ev := event.Data.(type) { - case core.ChainEvent: - fs.filterMu.RLock() - for _, filter := range fs.chainFilters { - if filter.BlockCallback != nil && !filter.created.After(event.Time) { - filter.BlockCallback(ev.Block, ev.Logs) - } - } - fs.filterMu.RUnlock() - case core.TxPreEvent: - fs.filterMu.RLock() - for _, filter := range fs.pendingTxFilters { - if filter.TransactionCallback != nil && !filter.created.After(event.Time) { - filter.TransactionCallback(ev.Tx) - } - } - fs.filterMu.RUnlock() - - case vm.Logs: - fs.filterMu.RLock() - for _, filter := range fs.logFilters { - if filter.LogCallback != nil && !filter.created.After(event.Time) { - for _, log := range filter.FilterLogs(ev) { - filter.LogCallback(log, false) - } - } - } - fs.filterMu.RUnlock() - case core.RemovedLogsEvent: - fs.filterMu.RLock() - for _, filter := range fs.logFilters { - if filter.LogCallback != nil && !filter.created.After(event.Time) { - for _, removedLog := range filter.FilterLogs(ev.Logs) { - filter.LogCallback(removedLog, true) - } - } - } - fs.filterMu.RUnlock() - case core.PendingLogsEvent: - fs.filterMu.RLock() - for _, filter := range fs.pendingLogFilters { - if filter.LogCallback != nil && !filter.created.After(event.Time) { - for _, pendingLog := range ev.Logs { - filter.LogCallback(pendingLog, false) - } - } - } - fs.filterMu.RUnlock() - } - } -} diff --git a/exp/filters/filter_system_test.go b/exp/filters/filter_system_test.go deleted file mode 100644 index b14847783712d..0000000000000 --- a/exp/filters/filter_system_test.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package filters - -import ( - "testing" - "time" - - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/event" -) - -func TestCallbacks(t *testing.T) { - var ( - mux event.TypeMux - fs = NewFilterSystem(&mux) - blockDone = make(chan struct{}) - txDone = make(chan struct{}) - logDone = make(chan struct{}) - removedLogDone = make(chan struct{}) - pendingLogDone = make(chan struct{}) - ) - - blockFilter := &Filter{ - BlockCallback: func(*types.Block, vm.Logs) { - close(blockDone) - }, - } - txFilter := &Filter{ - TransactionCallback: func(*types.Transaction) { - close(txDone) - }, - } - logFilter := &Filter{ - LogCallback: func(l *vm.Log, oob bool) { - if !oob { - close(logDone) - } - }, - } - removedLogFilter := &Filter{ - LogCallback: func(l *vm.Log, oob bool) { - if oob { - close(removedLogDone) - } - }, - } - pendingLogFilter := &Filter{ - LogCallback: func(*vm.Log, bool) { - close(pendingLogDone) - }, - } - - fs.Add(blockFilter, ChainFilter) - fs.Add(txFilter, PendingTxFilter) - fs.Add(logFilter, LogFilter) - fs.Add(removedLogFilter, LogFilter) - fs.Add(pendingLogFilter, PendingLogFilter) - - mux.Post(core.ChainEvent{}) - mux.Post(core.TxPreEvent{}) - mux.Post(vm.Logs{&vm.Log{}}) - mux.Post(core.RemovedLogsEvent{Logs: vm.Logs{&vm.Log{}}}) - mux.Post(core.PendingLogsEvent{Logs: vm.Logs{&vm.Log{}}}) - - const dura = 5 * time.Second - failTimer := time.NewTimer(dura) - select { - case <-blockDone: - case <-failTimer.C: - t.Error("block filter failed to trigger (timeout)") - } - - failTimer.Reset(dura) - select { - case <-txDone: - case <-failTimer.C: - t.Error("transaction filter failed to trigger (timeout)") - } - - failTimer.Reset(dura) - select { - case <-logDone: - case <-failTimer.C: - t.Error("log filter failed to trigger (timeout)") - } - - failTimer.Reset(dura) - select { - case <-removedLogDone: - case <-failTimer.C: - t.Error("removed log filter failed to trigger (timeout)") - } - - failTimer.Reset(dura) - select { - case <-pendingLogDone: - case <-failTimer.C: - t.Error("pending log filter failed to trigger (timeout)") - } -} diff --git a/exp/filters/filter_test.go b/exp/filters/filter_test.go deleted file mode 100644 index dc5274cf3079c..0000000000000 --- a/exp/filters/filter_test.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package filters - -import ( - "io/ioutil" - "math/big" - "os" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/ethdb" -) - -func makeReceipt(addr common.Address) *types.Receipt { - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{ - &vm.Log{Address: addr}, - } - receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - return receipt -} - -func BenchmarkMipmaps(b *testing.B) { - dir, err := ioutil.TempDir("", "mipmap") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(dir) - - var ( - db, _ = ethdb.NewLDBDatabase(dir, 0, 0) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = common.BytesToAddress([]byte("jeff")) - addr3 = common.BytesToAddress([]byte("expanse")) - addr4 = common.BytesToAddress([]byte("random addresses please")) - ) - defer db.Close() - - genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{Address: addr1, Balance: big.NewInt(1000000)}) - chain, receipts := core.GenerateChain(nil, genesis, db, 100010, func(i int, gen *core.BlockGen) { - var receipts types.Receipts - switch i { - case 2403: - receipt := makeReceipt(addr1) - receipts = types.Receipts{receipt} - gen.AddUncheckedReceipt(receipt) - case 1034: - receipt := makeReceipt(addr2) - receipts = types.Receipts{receipt} - gen.AddUncheckedReceipt(receipt) - case 34: - receipt := makeReceipt(addr3) - receipts = types.Receipts{receipt} - gen.AddUncheckedReceipt(receipt) - case 99999: - receipt := makeReceipt(addr4) - receipts = types.Receipts{receipt} - gen.AddUncheckedReceipt(receipt) - - } - - // store the receipts - err := core.WriteReceipts(db, receipts) - if err != nil { - b.Fatal(err) - } - core.WriteMipmapBloom(db, uint64(i+1), receipts) - }) - for i, block := range chain { - core.WriteBlock(db, block) - if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - b.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { - b.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil { - b.Fatal("error writing block receipts:", err) - } - } - b.ResetTimer() - - filter := New(db) - filter.SetAddresses([]common.Address{addr1, addr2, addr3, addr4}) - filter.SetBeginBlock(0) - filter.SetEndBlock(-1) - - for i := 0; i < b.N; i++ { - logs := filter.Find() - if len(logs) != 4 { - b.Fatal("expected 4 log, got", len(logs)) - } - } -} - -func TestFilters(t *testing.T) { - dir, err := ioutil.TempDir("", "mipmap") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - var ( - db, _ = ethdb.NewLDBDatabase(dir, 0, 0) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key1.PublicKey) - - hash1 = common.BytesToHash([]byte("topic1")) - hash2 = common.BytesToHash([]byte("topic2")) - hash3 = common.BytesToHash([]byte("topic3")) - hash4 = common.BytesToHash([]byte("topic4")) - ) - defer db.Close() - - genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{Address: addr, Balance: big.NewInt(1000000)}) - chain, receipts := core.GenerateChain(nil, genesis, db, 1000, func(i int, gen *core.BlockGen) { - var receipts types.Receipts - switch i { - case 1: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{ - &vm.Log{ - Address: addr, - Topics: []common.Hash{hash1}, - }, - } - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - case 2: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{ - &vm.Log{ - Address: addr, - Topics: []common.Hash{hash2}, - }, - } - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - case 998: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{ - &vm.Log{ - Address: addr, - Topics: []common.Hash{hash3}, - }, - } - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - case 999: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{ - &vm.Log{ - Address: addr, - Topics: []common.Hash{hash4}, - }, - } - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - } - - // store the receipts - err := core.WriteReceipts(db, receipts) - if err != nil { - t.Fatal(err) - } - // i is used as block number for the writes but since the i - // starts at 0 and block 0 (genesis) is already present increment - // by one - core.WriteMipmapBloom(db, uint64(i+1), receipts) - }) - for i, block := range chain { - core.WriteBlock(db, block) - if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil { - t.Fatal("error writing block receipts:", err) - } - } - - filter := New(db) - filter.SetAddresses([]common.Address{addr}) - filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2, hash3, hash4}}) - filter.SetBeginBlock(0) - filter.SetEndBlock(-1) - - logs := filter.Find() - if len(logs) != 4 { - t.Error("expected 4 log, got", len(logs)) - } - - filter = New(db) - filter.SetAddresses([]common.Address{addr}) - filter.SetTopics([][]common.Hash{[]common.Hash{hash3}}) - filter.SetBeginBlock(900) - filter.SetEndBlock(999) - logs = filter.Find() - if len(logs) != 1 { - t.Error("expected 1 log, got", len(logs)) - } - if len(logs) > 0 && logs[0].Topics[0] != hash3 { - t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) - } - - filter = New(db) - filter.SetAddresses([]common.Address{addr}) - filter.SetTopics([][]common.Hash{[]common.Hash{hash3}}) - filter.SetBeginBlock(990) - filter.SetEndBlock(-1) - logs = filter.Find() - if len(logs) != 1 { - t.Error("expected 1 log, got", len(logs)) - } - if len(logs) > 0 && logs[0].Topics[0] != hash3 { - t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) - } - - filter = New(db) - filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2}}) - filter.SetBeginBlock(1) - filter.SetEndBlock(10) - - logs = filter.Find() - if len(logs) != 2 { - t.Error("expected 2 log, got", len(logs)) - } - - failHash := common.BytesToHash([]byte("fail")) - filter = New(db) - filter.SetTopics([][]common.Hash{[]common.Hash{failHash}}) - filter.SetBeginBlock(0) - filter.SetEndBlock(-1) - - logs = filter.Find() - if len(logs) != 0 { - t.Error("expected 0 log, got", len(logs)) - } - - failAddr := common.BytesToAddress([]byte("failmenow")) - filter = New(db) - filter.SetAddresses([]common.Address{failAddr}) - filter.SetBeginBlock(0) - filter.SetEndBlock(-1) - - logs = filter.Find() - if len(logs) != 0 { - t.Error("expected 0 log, got", len(logs)) - } - - filter = New(db) - filter.SetTopics([][]common.Hash{[]common.Hash{failHash}, []common.Hash{hash1}}) - filter.SetBeginBlock(0) - filter.SetEndBlock(-1) - - logs = filter.Find() - if len(logs) != 0 { - t.Error("expected 0 log, got", len(logs)) - } -} diff --git a/exp/gasprice.go b/exp/gasprice.go deleted file mode 100644 index 13d82a94da907..0000000000000 --- a/exp/gasprice.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package exp - -import ( - "math/big" - "math/rand" - "sync" - - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -const ( - gpoProcessPastBlocks = 100 - - // for testing - gpoDefaultBaseCorrectionFactor = 110 - gpoDefaultMinGasPrice = 10000000000000 -) - -type blockPriceInfo struct { - baseGasPrice *big.Int -} - -// GasPriceOracle recommends gas prices based on the content of recent -// blocks. -type GasPriceOracle struct { - exp *Expanse - initOnce sync.Once - minPrice *big.Int - lastBaseMutex sync.Mutex - lastBase *big.Int - - // state of listenLoop - blocks map[uint64]*blockPriceInfo - firstProcessed, lastProcessed uint64 - minBase *big.Int -} - -// NewGasPriceOracle returns a new oracle. -func NewGasPriceOracle(exp *Expanse) *GasPriceOracle { - minprice := exp.GpoMinGasPrice - if minprice == nil { - minprice = big.NewInt(gpoDefaultMinGasPrice) - } - minbase := new(big.Int).Mul(minprice, big.NewInt(100)) - if exp.GpobaseCorrectionFactor > 0 { - minbase = minbase.Div(minbase, big.NewInt(int64(exp.GpobaseCorrectionFactor))) - } - return &GasPriceOracle{ - exp: exp, - blocks: make(map[uint64]*blockPriceInfo), - minBase: minbase, - minPrice: minprice, - lastBase: minprice, - } -} - -func (gpo *GasPriceOracle) init() { - gpo.initOnce.Do(func() { - gpo.processPastBlocks(gpo.exp.BlockChain()) - go gpo.listenLoop() - }) -} - -func (self *GasPriceOracle) processPastBlocks(chain *core.BlockChain) { - last := int64(-1) - cblock := chain.CurrentBlock() - if cblock != nil { - last = int64(cblock.NumberU64()) - } - first := int64(0) - if last > gpoProcessPastBlocks { - first = last - gpoProcessPastBlocks - } - self.firstProcessed = uint64(first) - for i := first; i <= last; i++ { - block := chain.GetBlockByNumber(uint64(i)) - if block != nil { - self.processBlock(block) - } - } - -} - -func (self *GasPriceOracle) listenLoop() { - events := self.exp.EventMux().Subscribe(core.ChainEvent{}, core.ChainSplitEvent{}) - defer events.Unsubscribe() - - for event := range events.Chan() { - switch event := event.Data.(type) { - case core.ChainEvent: - self.processBlock(event.Block) - case core.ChainSplitEvent: - self.processBlock(event.Block) - } - } -} - -func (self *GasPriceOracle) processBlock(block *types.Block) { - i := block.NumberU64() - if i > self.lastProcessed { - self.lastProcessed = i - } - - lastBase := self.minPrice - bpl := self.blocks[i-1] - if bpl != nil { - lastBase = bpl.baseGasPrice - } - if lastBase == nil { - return - } - - var corr int - lp := self.lowestPrice(block) - if lp == nil { - return - } - - if lastBase.Cmp(lp) < 0 { - corr = self.exp.GpobaseStepUp - } else { - corr = -self.exp.GpobaseStepDown - } - - crand := int64(corr * (900 + rand.Intn(201))) - newBase := new(big.Int).Mul(lastBase, big.NewInt(1000000+crand)) - newBase.Div(newBase, big.NewInt(1000000)) - - if newBase.Cmp(self.minBase) < 0 { - newBase = self.minBase - } - - bpi := self.blocks[i] - if bpi == nil { - bpi = &blockPriceInfo{} - self.blocks[i] = bpi - } - bpi.baseGasPrice = newBase - self.lastBaseMutex.Lock() - self.lastBase = newBase - self.lastBaseMutex.Unlock() - - glog.V(logger.Detail).Infof("Processed block #%v, base price is %v\n", block.NumberU64(), newBase.Int64()) -} - -// returns the lowers possible price with which a tx was or could have been included -func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int { - gasUsed := big.NewInt(0) - - receipts := core.GetBlockReceipts(self.exp.ChainDb(), block.Hash()) - if len(receipts) > 0 { - if cgu := receipts[len(receipts)-1].CumulativeGasUsed; cgu != nil { - gasUsed = receipts[len(receipts)-1].CumulativeGasUsed - } - } - - if new(big.Int).Mul(gasUsed, big.NewInt(100)).Cmp(new(big.Int).Mul(block.GasLimit(), - big.NewInt(int64(self.exp.GpoFullBlockRatio)))) < 0 { - // block is not full, could have posted a tx with MinGasPrice - return big.NewInt(0) - } - - txs := block.Transactions() - if len(txs) == 0 { - return big.NewInt(0) - } - // block is full, find smallest gasPrice - minPrice := txs[0].GasPrice() - for i := 1; i < len(txs); i++ { - price := txs[i].GasPrice() - if price.Cmp(minPrice) < 0 { - minPrice = price - } - } - return minPrice -} - -// SuggestPrice returns the recommended gas price. -func (self *GasPriceOracle) SuggestPrice() *big.Int { - self.init() - self.lastBaseMutex.Lock() - price := new(big.Int).Set(self.lastBase) - self.lastBaseMutex.Unlock() - - price.Mul(price, big.NewInt(int64(self.exp.GpobaseCorrectionFactor))) - price.Div(price, big.NewInt(100)) - if price.Cmp(self.minPrice) < 0 { - price.Set(self.minPrice) - } else if self.exp.GpoMaxGasPrice != nil && price.Cmp(self.exp.GpoMaxGasPrice) > 0 { - price.Set(self.exp.GpoMaxGasPrice) - } - return price -} diff --git a/exp/gpu_mining.go b/exp/gpu_mining.go deleted file mode 100644 index d884730ac4eb8..0000000000000 --- a/exp/gpu_mining.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build opencl - -package exp - -import ( - "fmt" - "math/big" - "strconv" - "strings" - "time" - - "github.com/expanse-org/ethash" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/miner" -) - -func (s *Expanse) StartMining(threads int, gpus string) error { - eb, err := s.Etherbase() - if err != nil { - err = fmt.Errorf("Cannot start mining without etherbase address: %v", err) - glog.V(logger.Error).Infoln(err) - return err - } - - // GPU mining - if gpus != "" { - var ids []int - for _, s := range strings.Split(gpus, ",") { - i, err := strconv.Atoi(s) - if err != nil { - return fmt.Errorf("Invalid GPU id(s): %v", err) - } - if i < 0 { - return fmt.Errorf("Invalid GPU id: %v", i) - } - ids = append(ids, i) - } - - // TODO: re-creating miner is a bit ugly - s.miner = miner.New(s, s.chainConfig, s.EventMux(), ethash.NewCL(ids)) - go s.miner.Start(eb, len(ids)) - return nil - } - - // CPU mining - go s.miner.Start(eb, threads) - return nil -} - -func GPUBench(gpuid uint64) { - e := ethash.NewCL([]int{int(gpuid)}) - - var h common.Hash - bogoHeader := &types.Header{ - ParentHash: h, - Number: big.NewInt(int64(42)), - Difficulty: big.NewInt(int64(999999999999999)), - } - bogoBlock := types.NewBlock(bogoHeader, nil, nil, nil) - - err := ethash.InitCL(bogoBlock.NumberU64(), e) - if err != nil { - fmt.Println("OpenCL init error: ", err) - return - } - - stopChan := make(chan struct{}) - reportHashRate := func() { - for { - time.Sleep(3 * time.Second) - fmt.Printf("hashes/s : %v\n", e.GetHashrate()) - } - } - fmt.Printf("Starting benchmark (%v seconds)\n", 60) - go reportHashRate() - go e.Search(bogoBlock, stopChan, 0) - time.Sleep(60 * time.Second) - fmt.Println("OK.") -} - -func PrintOpenCLDevices() { - ethash.PrintDevices() -} diff --git a/exp/handler.go b/exp/handler.go deleted file mode 100644 index 5d2ecb75cf98c..0000000000000 --- a/exp/handler.go +++ /dev/null @@ -1,783 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package exp - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "math/big" - "sync" - "sync/atomic" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/exp/downloader" - "github.com/expanse-org/go-expanse/exp/fetcher" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/p2p/discover" - "github.com/expanse-org/go-expanse/pow" - "github.com/expanse-org/go-expanse/rlp" -) - -const ( - softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. - estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header -) - -var ( - daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge -) - -// errIncompatibleConfig is returned if the requested protocols and configs are -// not compatible (low protocol version restrictions and high requirements). -var errIncompatibleConfig = errors.New("incompatible configuration") - -func errResp(code errCode, format string, v ...interface{}) error { - return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) -} - -type ProtocolManager struct { - networkId int - - fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) - synced uint32 // Flag whether we're considered synchronised (enables transaction processing) - - txpool txPool - blockchain *core.BlockChain - chaindb ethdb.Database - chainconfig *core.ChainConfig - - downloader *downloader.Downloader - fetcher *fetcher.Fetcher - peers *peerSet - - SubProtocols []p2p.Protocol - - eventMux *event.TypeMux - txSub event.Subscription - minedBlockSub event.Subscription - - // channels for fetcher, syncer, txsyncLoop - newPeerCh chan *peer - txsyncCh chan *txsync - quitSync chan struct{} - noMorePeers chan struct{} - - // wait group is used for graceful shutdowns during downloading - // and processing - wg sync.WaitGroup - - badBlockReportingEnabled bool -} - -// NewProtocolManager returns a new expanse sub protocol manager. The Expanse sub protocol manages peers capable -// with the expanse network. -func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { - // Create the protocol manager with the base fields - manager := &ProtocolManager{ - networkId: networkId, - eventMux: mux, - txpool: txpool, - blockchain: blockchain, - chaindb: chaindb, - chainconfig: config, - peers: newPeerSet(), - newPeerCh: make(chan *peer), - noMorePeers: make(chan struct{}), - txsyncCh: make(chan *txsync), - quitSync: make(chan struct{}), - } - // Figure out whether to allow fast sync or not - if fastSync && blockchain.CurrentBlock().NumberU64() > 0 { - glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled") - fastSync = false - } - if fastSync { - manager.fastSync = uint32(1) - } - // Initiate a sub-protocol for every implemented version we can handle - - manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions)) - for i, version := range ProtocolVersions { - // Skip protocol version if incompatible with the mode of operation - if fastSync && version < eth63 { - continue - } - // Compatible; initialise the sub-protocol - version := version // Closure for the run - manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{ - - Name: ProtocolName, - Version: version, - Length: ProtocolLengths[i], - Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - peer := manager.newPeer(int(version), p, rw) - select { - case manager.newPeerCh <- peer: - manager.wg.Add(1) - defer manager.wg.Done() - return manager.handle(peer) - case <-manager.quitSync: - return p2p.DiscQuitting - } - }, - NodeInfo: func() interface{} { - return manager.NodeInfo() - }, - PeerInfo: func(id discover.NodeID) interface{} { - if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { - return p.Info() - } - return nil - }, - }) - } - if len(manager.SubProtocols) == 0 { - return nil, errIncompatibleConfig - } - // Construct the different synchronisation mechanisms - manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeader, - blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, - blockchain.GetTd, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback, - manager.removePeer) - - validator := func(block *types.Block, parent *types.Block) error { - return core.ValidateHeader(config, pow, block.Header(), parent.Header(), true, false) - } - heighter := func() uint64 { - return blockchain.CurrentBlock().NumberU64() - } - inserter := func(blocks types.Blocks) (int, error) { - atomic.StoreUint32(&manager.synced, 1) // Mark initial sync done on any fetcher import - return manager.insertChain(blocks) - } - manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer) - - if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 { - glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled") - manager.badBlockReportingEnabled = true - } - - return manager, nil -} - -func (pm *ProtocolManager) insertChain(blocks types.Blocks) (i int, err error) { - i, err = pm.blockchain.InsertChain(blocks) - if pm.badBlockReportingEnabled && core.IsValidationErr(err) && i < len(blocks) { - go sendBadBlockReport(blocks[i], err) - } - return i, err -} - -func (pm *ProtocolManager) removePeer(id string) { - // Short circuit if the peer was already removed - peer := pm.peers.Peer(id) - if peer == nil { - return - } - glog.V(logger.Debug).Infoln("Removing peer", id) - - // Unregister the peer from the downloader and Expanse peer set - pm.downloader.UnregisterPeer(id) - if err := pm.peers.Unregister(id); err != nil { - glog.V(logger.Error).Infoln("Removal failed:", err) - } - // Hard disconnect at the networking layer - if peer != nil { - peer.Peer.Disconnect(p2p.DiscUselessPeer) - } -} - -func (pm *ProtocolManager) Start() { - // broadcast transactions - pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{}) - go pm.txBroadcastLoop() - // broadcast mined blocks - pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{}) - go pm.minedBroadcastLoop() - - // start sync handlers - go pm.syncer() - go pm.txsyncLoop() -} - -func (pm *ProtocolManager) Stop() { - glog.V(logger.Info).Infoln("Stopping expanse protocol handler...") - - pm.txSub.Unsubscribe() // quits txBroadcastLoop - pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop - - // Quit the sync loop. - // After this send has completed, no new peers will be accepted. - pm.noMorePeers <- struct{}{} - - // Quit fetcher, txsyncLoop. - close(pm.quitSync) - - // Disconnect existing sessions. - // This also closes the gate for any new registrations on the peer set. - // sessions which are already established but not added to pm.peers yet - // will exit when they try to register. - pm.peers.Close() - - // Wait for all peer handler goroutines and the loops to come down. - pm.wg.Wait() - - glog.V(logger.Info).Infoln("Expanse protocol handler stopped") -} - -func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { - return newPeer(pv, p, newMeteredMsgWriter(rw)) -} - -// handle is the callback invoked to manage the life cycle of an exp peer. When -// this function terminates, the peer is disconnected. -func (pm *ProtocolManager) handle(p *peer) error { - glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) - - // Execute the Expanse handshake - td, head, genesis := pm.blockchain.Status() - if err := p.Handshake(pm.networkId, td, head, genesis); err != nil { - glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err) - return err - } - if rw, ok := p.rw.(*meteredMsgReadWriter); ok { - rw.Init(p.version) - } - // Register the peer locally - glog.V(logger.Detail).Infof("%v: adding peer", p) - if err := pm.peers.Register(p); err != nil { - glog.V(logger.Error).Infof("%v: addition failed: %v", p, err) - return err - } - defer pm.removePeer(p.id) - - // Register the peer in the downloader. If the downloader considers it banned, we disconnect - if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head, p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData); err != nil { - return err - } - // Propagate existing transactions. new transactions appearing - // after this will be sent via broadcasts. - pm.syncTransactions(p) - - // If we're DAO hard-fork aware, validate any remote peer with regard to the hard-fork - if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil { - // Request the peer's DAO fork header for extra-data validation - if err := p.RequestHeadersByNumber(daoBlock.Uint64(), 1, 0, false); err != nil { - return err - } - // Start a timer to disconnect if the peer doesn't reply in time - p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() { - glog.V(logger.Warn).Infof("%v: timed out DAO fork-check, dropping", p) - pm.removePeer(p.id) - }) - // Make sure it's cleaned up if the peer dies off - defer func() { - if p.forkDrop != nil { - p.forkDrop.Stop() - p.forkDrop = nil - } - }() - } - // main loop. handle incoming messages. - for { - if err := pm.handleMsg(p); err != nil { - glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err) - return err - } - } -} - -// handleMsg is invoked whenever an inbound message is received from a remote -// peer. The remote connection is torn down upon returning any error. -func (pm *ProtocolManager) handleMsg(p *peer) error { - // Read the next message from the remote peer, and ensure it's fully consumed - msg, err := p.rw.ReadMsg() - if err != nil { - return err - } - if msg.Size > ProtocolMaxMsgSize { - return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) - } - defer msg.Discard() - - // Handle the message depending on its contents - switch { - case msg.Code == StatusMsg: - // Status messages should never arrive after the handshake - return errResp(ErrExtraStatusMsg, "uncontrolled status message") - - // Block header query, collect the requested headers and reply - case msg.Code == GetBlockHeadersMsg: - // Decode the complex header query - var query getBlockHeadersData - if err := msg.Decode(&query); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - hashMode := query.Origin.Hash != (common.Hash{}) - - // Gather headers until the fetch or network limits is reached - var ( - bytes common.StorageSize - headers []*types.Header - unknown bool - ) - for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch { - // Retrieve the next header satisfying the query - var origin *types.Header - if hashMode { - origin = pm.blockchain.GetHeader(query.Origin.Hash) - } else { - origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) - } - if origin == nil { - break - } - headers = append(headers, origin) - bytes += estHeaderRlpSize - - // Advance to the next header of the query - switch { - case query.Origin.Hash != (common.Hash{}) && query.Reverse: - // Hash based traversal towards the genesis block - for i := 0; i < int(query.Skip)+1; i++ { - if header := pm.blockchain.GetHeader(query.Origin.Hash); header != nil { - query.Origin.Hash = header.ParentHash - } else { - unknown = true - break - } - } - case query.Origin.Hash != (common.Hash{}) && !query.Reverse: - // Hash based traversal towards the leaf block - var ( - current = origin.Number.Uint64() - next = current + query.Skip + 1 - ) - if next <= current { - infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") - glog.V(logger.Warn).Infof("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos) - unknown = true - } else { - if header := pm.blockchain.GetHeaderByNumber(next); header != nil { - if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { - query.Origin.Hash = header.Hash() - } else { - unknown = true - } - } else { - unknown = true - } - } - case query.Reverse: - // Number based traversal towards the genesis block - if query.Origin.Number >= query.Skip+1 { - query.Origin.Number -= (query.Skip + 1) - } else { - unknown = true - } - - case !query.Reverse: - // Number based traversal towards the leaf block - query.Origin.Number += (query.Skip + 1) - } - } - return p.SendBlockHeaders(headers) - - case msg.Code == BlockHeadersMsg: - // A batch of headers arrived to one of our previous requests - var headers []*types.Header - if err := msg.Decode(&headers); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // If no headers were received, but we're expending a DAO fork check, maybe it's that - if len(headers) == 0 && p.forkDrop != nil { - // Possibly an empty reply to the fork header checks, sanity check TDs - verifyDAO := true - - // If we already have a DAO header, we can check the peer's TD against it. If - // the peer's ahead of this, it too must have a reply to the DAO check - if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil { - if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash())) >= 0 { - verifyDAO = false - } - } - // If we're seemingly on the same chain, disable the drop timer - if verifyDAO { - glog.V(logger.Debug).Infof("%v: seems to be on the same side of the DAO fork", p) - p.forkDrop.Stop() - p.forkDrop = nil - return nil - } - } - // Filter out any explicitly requested headers, deliver the rest to the downloader - filter := len(headers) == 1 - if filter { - // If it's a potential DAO fork check, validate against the rules - if p.forkDrop != nil && pm.chainconfig.DAOForkBlock.Cmp(headers[0].Number) == 0 { - // Disable the fork drop timer - p.forkDrop.Stop() - p.forkDrop = nil - - // Validate the header and either drop the peer or continue - if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil { - glog.V(logger.Debug).Infof("%v: verified to be on the other side of the DAO fork, dropping", p) - return err - } - glog.V(logger.Debug).Infof("%v: verified to be on the same side of the DAO fork", p) - return nil - } - // Irrelevant of the fork checks, send the header to the fetcher just in case - headers = pm.fetcher.FilterHeaders(headers, time.Now()) - } - if len(headers) > 0 || !filter { - err := pm.downloader.DeliverHeaders(p.id, headers) - if err != nil { - glog.V(logger.Debug).Infoln(err) - } - } - - case msg.Code == GetBlockBodiesMsg: - // Decode the retrieval message - msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) - if _, err := msgStream.List(); err != nil { - return err - } - // Gather blocks until the fetch or network limits is reached - var ( - hash common.Hash - bytes int - bodies []rlp.RawValue - ) - for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch { - // Retrieve the hash of the next block - if err := msgStream.Decode(&hash); err == rlp.EOL { - break - } else if err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Retrieve the requested block body, stopping if enough was found - if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 { - bodies = append(bodies, data) - bytes += len(data) - } - } - return p.SendBlockBodiesRLP(bodies) - - case msg.Code == BlockBodiesMsg: - // A batch of block bodies arrived to one of our previous requests - var request blockBodiesData - if err := msg.Decode(&request); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Deliver them all to the downloader for queuing - trasactions := make([][]*types.Transaction, len(request)) - uncles := make([][]*types.Header, len(request)) - - for i, body := range request { - trasactions[i] = body.Transactions - uncles[i] = body.Uncles - } - // Filter out any explicitly requested bodies, deliver the rest to the downloader - filter := len(trasactions) > 0 || len(uncles) > 0 - if filter { - trasactions, uncles = pm.fetcher.FilterBodies(trasactions, uncles, time.Now()) - } - if len(trasactions) > 0 || len(uncles) > 0 || !filter { - err := pm.downloader.DeliverBodies(p.id, trasactions, uncles) - if err != nil { - glog.V(logger.Debug).Infoln(err) - } - } - - case p.version >= eth63 && msg.Code == GetNodeDataMsg: - // Decode the retrieval message - msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) - if _, err := msgStream.List(); err != nil { - return err - } - // Gather state data until the fetch or network limits is reached - var ( - hash common.Hash - bytes int - data [][]byte - ) - for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch { - // Retrieve the hash of the next state entry - if err := msgStream.Decode(&hash); err == rlp.EOL { - break - } else if err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Retrieve the requested state entry, stopping if enough was found - if entry, err := pm.chaindb.Get(hash.Bytes()); err == nil { - data = append(data, entry) - bytes += len(entry) - } - } - return p.SendNodeData(data) - - case p.version >= eth63 && msg.Code == NodeDataMsg: - // A batch of node state data arrived to one of our previous requests - var data [][]byte - if err := msg.Decode(&data); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Deliver all to the downloader - if err := pm.downloader.DeliverNodeData(p.id, data); err != nil { - glog.V(logger.Debug).Infof("failed to deliver node state data: %v", err) - } - - case p.version >= eth63 && msg.Code == GetReceiptsMsg: - // Decode the retrieval message - msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) - if _, err := msgStream.List(); err != nil { - return err - } - // Gather state data until the fetch or network limits is reached - var ( - hash common.Hash - bytes int - receipts []rlp.RawValue - ) - for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch { - // Retrieve the hash of the next block - if err := msgStream.Decode(&hash); err == rlp.EOL { - break - } else if err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Retrieve the requested block's receipts, skipping if unknown to us - results := core.GetBlockReceipts(pm.chaindb, hash) - if results == nil { - if header := pm.blockchain.GetHeader(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { - continue - } - } - // If known, encode and queue for response packet - if encoded, err := rlp.EncodeToBytes(results); err != nil { - glog.V(logger.Error).Infof("failed to encode receipt: %v", err) - } else { - receipts = append(receipts, encoded) - bytes += len(encoded) - } - } - return p.SendReceiptsRLP(receipts) - - case p.version >= eth63 && msg.Code == ReceiptsMsg: - // A batch of receipts arrived to one of our previous requests - var receipts [][]*types.Receipt - if err := msg.Decode(&receipts); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Deliver all to the downloader - if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil { - glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err) - } - - case msg.Code == NewBlockHashesMsg: - // Retrieve and deserialize the remote new block hashes notification - type announce struct { - Hash common.Hash - Number uint64 - } - var announces = []announce{} - - if p.version < eth62 { - // We're running the old protocol, make block number unknown (0) - var hashes []common.Hash - if err := msg.Decode(&hashes); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - for _, hash := range hashes { - announces = append(announces, announce{hash, 0}) - } - } else { - // Otherwise extract both block hash and number - var request newBlockHashesData - if err := msg.Decode(&request); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - for _, block := range request { - announces = append(announces, announce{block.Hash, block.Number}) - } - } - // Mark the hashes as present at the remote node - for _, block := range announces { - p.MarkBlock(block.Hash) - } - // Schedule all the unknown hashes for retrieval - unknown := make([]announce, 0, len(announces)) - for _, block := range announces { - if !pm.blockchain.HasBlock(block.Hash) { - unknown = append(unknown, block) - } - } - for _, block := range unknown { - pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies) - } - - case msg.Code == NewBlockMsg: - // Retrieve and decode the propagated block - var request newBlockData - if err := msg.Decode(&request); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - if err := request.Block.ValidateFields(); err != nil { - return errResp(ErrDecode, "block validation %v: %v", msg, err) - } - request.Block.ReceivedAt = msg.ReceivedAt - request.Block.ReceivedFrom = p - - // Mark the peer as owning the block and schedule it for import - p.MarkBlock(request.Block.Hash()) - pm.fetcher.Enqueue(p.id, request.Block) - - // Assuming the block is importable by the peer, but possibly not yet done so, - // calculate the head hash and TD that the peer truly must have. - var ( - trueHead = request.Block.ParentHash() - trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty()) - ) - // Update the peers total difficulty if better than the previous - if _, td := p.Head(); trueTD.Cmp(td) > 0 { - p.SetHead(trueHead, trueTD) - - // Schedule a sync if above ours. Note, this will not fire a sync for a gap of - // a singe block (as the true TD is below the propagated block), however this - // scenario should easily be covered by the fetcher. - currentBlock := pm.blockchain.CurrentBlock() - if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash())) > 0 { - go pm.synchronise(p) - } - } - - case msg.Code == TxMsg: - // Transactions arrived, make sure we have a valid and fresh chain to handle them - if atomic.LoadUint32(&pm.synced) == 0 { - break - } - // Transactions can be processed, parse all of them and deliver to the pool - var txs []*types.Transaction - if err := msg.Decode(&txs); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - for i, tx := range txs { - // Validate and mark the remote transaction - if tx == nil { - return errResp(ErrDecode, "transaction %d is nil", i) - } - p.MarkTransaction(tx.Hash()) - } - pm.txpool.AddBatch(txs) - - default: - return errResp(ErrInvalidMsgCode, "%v", msg.Code) - } - return nil -} - -// BroadcastBlock will either propagate a block to a subset of it's peers, or -// will only announce it's availability (depending what's requested). -func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { - hash := block.Hash() - peers := pm.peers.PeersWithoutBlock(hash) - - // If propagation is requested, send to a subset of the peer - if propagate { - // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) - var td *big.Int - if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil { - td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash())) - } else { - glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]) - return - } - // Send the block to a subset of our peers - transfer := peers[:int(math.Sqrt(float64(len(peers))))] - for _, peer := range transfer { - peer.SendNewBlock(block, td) - } - glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)) - } - // Otherwise if the block is indeed in out own chain, announce it - if pm.blockchain.HasBlock(hash) { - for _, peer := range peers { - peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()}) - } - glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)) - } -} - -// BroadcastTx will propagate a transaction to all peers which are not known to -// already have the given transaction. -func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) { - // Broadcast transaction to a batch of peers not knowing about it - peers := pm.peers.PeersWithoutTx(hash) - //FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))] - for _, peer := range peers { - peer.SendTransactions(types.Transactions{tx}) - } - glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers") -} - -// Mined broadcast loop -func (self *ProtocolManager) minedBroadcastLoop() { - // automatically stops if unsubscribe - for obj := range self.minedBlockSub.Chan() { - switch ev := obj.Data.(type) { - case core.NewMinedBlockEvent: - self.BroadcastBlock(ev.Block, true) // First propagate block to peers - self.BroadcastBlock(ev.Block, false) // Only then announce to the rest - } - } -} - -func (self *ProtocolManager) txBroadcastLoop() { - // automatically stops if unsubscribe - for obj := range self.txSub.Chan() { - event := obj.Data.(core.TxPreEvent) - self.BroadcastTx(event.Tx.Hash(), event.Tx) - } -} - -// EthNodeInfo represents a short summary of the Expanse sub-protocol metadata known -// about the host peer. -type EthNodeInfo struct { - Network int `json:"network"` // Expanse network ID (0=Olympic, 1=Frontier, 2=Morden) - Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain - Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block - Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block -} - -// NodeInfo retrieves some protocol metadata about the running host node. -func (self *ProtocolManager) NodeInfo() *EthNodeInfo { - return &EthNodeInfo{ - Network: self.networkId, - Difficulty: self.blockchain.GetTd(self.blockchain.CurrentBlock().Hash()), - Genesis: self.blockchain.Genesis().Hash(), - Head: self.blockchain.CurrentBlock().Hash(), - } -} diff --git a/exp/handler_test.go b/exp/handler_test.go deleted file mode 100644 index d3cc4b2262a8e..0000000000000 --- a/exp/handler_test.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . -package exp -import ( - "math" - "math/big" - "math/rand" - "testing" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/eth/downloader" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/params" -) - -// Tests that protocol versions and modes of operations are matched up properly. -func TestProtocolCompatibility(t *testing.T) { - // Define the compatibility chart - tests := []struct { - version uint - fastSync bool - compatible bool - }{ - {61, false, true}, {62, false, true}, {63, false, true}, - {61, true, false}, {62, true, false}, {63, true, true}, - } - // Make sure anything we screw up is restored - backup := ProtocolVersions - defer func() { ProtocolVersions = backup }() - - // Try all available compatibility configs and check for errors - for i, tt := range tests { - ProtocolVersions = []uint{tt.version} - - pm, err := newTestProtocolManager(tt.fastSync, 0, nil, nil) - if pm != nil { - defer pm.Stop() - } - if (err == nil && !tt.compatible) || (err != nil && tt.compatible) { - t.Errorf("test %d: compatibility mismatch: have error %v, want compatibility %v", i, err, tt.compatible) - } - } -} - -// Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) } -func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) } - -func testGetBlockHeaders(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() - - // Create a "random" unknown hash for testing - var unknown common.Hash - for i, _ := range unknown { - unknown[i] = byte(i) - } - // Create a batch of tests for various scenarios - limit := uint64(downloader.MaxHeaderFetch) - tests := []struct { - query *getBlockHeadersData // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected - }{ - // A single random block should be retrievable by hash and number too - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, - []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1}, - []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, - }, - // Multiple headers should be retrievable in both directions - { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 1).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 2).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 1).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 2).Hash(), - }, - }, - // Multiple headers with skip lists should be retrievable - { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 4).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 8).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 4).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 8).Hash(), - }, - }, - // The chain endpoints should be retrievable - { - &getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1}, - []common.Hash{pm.blockchain.GetBlockByNumber(0).Hash()}, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64()}, Amount: 1}, - []common.Hash{pm.blockchain.CurrentBlock().Hash()}, - }, - // Ensure protocol limits are honored - { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, - pm.blockchain.GetBlockHashesFromHash(pm.blockchain.CurrentBlock().Hash(), limit), - }, - // Check that requesting more than available is handled gracefully - { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64()).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(4).Hash(), - pm.blockchain.GetBlockByNumber(0).Hash(), - }, - }, - // Check that requesting more than available is handled gracefully, even if mid skip - { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 1).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(4).Hash(), - pm.blockchain.GetBlockByNumber(1).Hash(), - }, - }, - // Check a corner case where requesting more can iterate past the endpoints - { - &getBlockHeadersData{Origin: hashOrNumber{Number: 2}, Amount: 5, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(2).Hash(), - pm.blockchain.GetBlockByNumber(1).Hash(), - pm.blockchain.GetBlockByNumber(0).Hash(), - }, - }, - // Check a corner case where skipping overflow loops back into the chain start - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(3).Hash(), - }, - }, - // Check a corner case where skipping overflow loops back to the same header - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(1).Hash(), - }, - }, - // Check that non existing headers aren't returned - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1}, - []common.Hash{}, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() + 1}, Amount: 1}, - []common.Hash{}, - }, - } - // Run each of the tests and verify the results against the chain - for i, tt := range tests { - // Collect the headers to expect in the response - headers := []*types.Header{} - for _, hash := range tt.expect { - headers = append(headers, pm.blockchain.GetBlock(hash).Header()) - } - // Send the hash request and verify the response - p2p.Send(peer.app, 0x03, tt.query) - if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil { - t.Errorf("test %d: headers mismatch: %v", i, err) - } - // If the test used number origins, repeat with hashes as the too - if tt.query.Origin.Hash == (common.Hash{}) { - if origin := pm.blockchain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { - tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 - - p2p.Send(peer.app, 0x03, tt.query) - if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil { - t.Errorf("test %d: headers mismatch: %v", i, err) - } - } - } - } -} - -// Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) } -func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) } - -func testGetBlockBodies(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, downloader.MaxBlockFetch+15, nil, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() - - // Create a batch of tests for various scenarios - limit := downloader.MaxBlockFetch - tests := []struct { - random int // Number of blocks to fetch randomly from the chain - explicit []common.Hash // Explicitly requested blocks - available []bool // Availability of explicitly requested blocks - expected int // Total number of existing blocks to expect - }{ - {1, nil, nil, 1}, // A single random block should be retrievable - {10, nil, nil, 10}, // Multiple random blocks should be retrievable - {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable - {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned - {0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable - {0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned - - // Existing and non-existing blocks interleaved should not cause problems - {0, []common.Hash{ - common.Hash{}, - pm.blockchain.GetBlockByNumber(1).Hash(), - common.Hash{}, - pm.blockchain.GetBlockByNumber(10).Hash(), - common.Hash{}, - pm.blockchain.GetBlockByNumber(100).Hash(), - common.Hash{}, - }, []bool{false, true, false, true, false, true, false}, 3}, - } - // Run each of the tests and verify the results against the chain - for i, tt := range tests { - // Collect the hashes to request, and the response to expect - hashes, seen := []common.Hash{}, make(map[int64]bool) - bodies := []*blockBody{} - - for j := 0; j < tt.random; j++ { - for { - num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64())) - if !seen[num] { - seen[num] = true - - block := pm.blockchain.GetBlockByNumber(uint64(num)) - hashes = append(hashes, block.Hash()) - if len(bodies) < tt.expected { - bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) - } - break - } - } - } - for j, hash := range tt.explicit { - hashes = append(hashes, hash) - if tt.available[j] && len(bodies) < tt.expected { - block := pm.blockchain.GetBlock(hash) - bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) - } - } - // Send the hash request and verify the response - p2p.Send(peer.app, 0x05, hashes) - if err := p2p.ExpectMsg(peer.app, 0x06, bodies); err != nil { - t.Errorf("test %d: bodies mismatch: %v", i, err) - } - } -} - -// Tests that the node state database can be retrieved based on hashes. -func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) } - -func testGetNodeData(t *testing.T, protocol int) { - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey) - tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - pm := newTestProtocolManagerMust(t, false, 4, generator, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() - - // Fetch for now the entire chain db - hashes := []common.Hash{} - for _, key := range pm.chaindb.(*ethdb.MemDatabase).Keys() { - if len(key) == len(common.Hash{}) { - hashes = append(hashes, common.BytesToHash(key)) - } - } - p2p.Send(peer.app, 0x0d, hashes) - msg, err := peer.app.ReadMsg() - if err != nil { - t.Fatalf("failed to read node data response: %v", err) - } - if msg.Code != 0x0e { - t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, 0x0c) - } - var data [][]byte - if err := msg.Decode(&data); err != nil { - t.Fatalf("failed to decode response node data: %v", err) - } - // Verify that all hashes correspond to the requested data, and reconstruct a state tree - for i, want := range hashes { - if hash := crypto.Keccak256Hash(data[i]); hash != want { - t.Errorf("data hash mismatch: have %x, want %x", hash, want) - } - } - statedb, _ := ethdb.NewMemDatabase() - for i := 0; i < len(data); i++ { - statedb.Put(hashes[i].Bytes(), data[i]) - } - accounts := []common.Address{testBank.Address, acc1Addr, acc2Addr} - for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { - trie, _ := state.New(pm.blockchain.GetBlockByNumber(i).Root(), statedb) - - for j, acc := range accounts { - state, _ := pm.blockchain.State() - bw := state.GetBalance(acc) - bh := trie.GetBalance(acc) - - if (bw != nil && bh == nil) || (bw == nil && bh != nil) { - t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - if bw != nil && bh != nil && bw.Cmp(bw) != 0 { - t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - } - } -} - -// Tests that the transaction receipts can be retrieved based on hashes. -func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) } - -func testGetReceipt(t *testing.T, protocol int) { - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey) - tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - pm := newTestProtocolManagerMust(t, false, 4, generator, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() - - // Collect the hashes to request, and the response to expect - hashes, receipts := []common.Hash{}, []types.Receipts{} - for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { - block := pm.blockchain.GetBlockByNumber(i) - - hashes = append(hashes, block.Hash()) - receipts = append(receipts, core.GetBlockReceipts(pm.chaindb, block.Hash())) - } - // Send the hash request and verify the response - p2p.Send(peer.app, 0x0f, hashes) - if err := p2p.ExpectMsg(peer.app, 0x10, receipts); err != nil { - t.Errorf("receipts mismatch: %v", err) - } -} - -// Tests that post eth protocol handshake, DAO fork-enabled clients also execute -// a DAO "challenge" verifying each others' DAO fork headers to ensure they're on -// compatible chains. -func TestDAOChallengeNoVsNo(t *testing.T) { testDAOChallenge(t, false, false, false) } -func TestDAOChallengeNoVsPro(t *testing.T) { testDAOChallenge(t, false, true, false) } -func TestDAOChallengeProVsNo(t *testing.T) { testDAOChallenge(t, true, false, false) } -func TestDAOChallengeProVsPro(t *testing.T) { testDAOChallenge(t, true, true, false) } -func TestDAOChallengeNoVsTimeout(t *testing.T) { testDAOChallenge(t, false, false, true) } -func TestDAOChallengeProVsTimeout(t *testing.T) { testDAOChallenge(t, true, true, true) } - -func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool) { - // Reduce the DAO handshake challenge timeout - if timeout { - defer func(old time.Duration) { daoChallengeTimeout = old }(daoChallengeTimeout) - daoChallengeTimeout = 500 * time.Millisecond - } - // Create a DAO aware protocol manager - var ( - evmux = new(event.TypeMux) - pow = new(core.FakePow) - db, _ = ethdb.NewMemDatabase() - genesis = core.WriteGenesisBlockForTesting(db) - config = &core.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked} - blockchain, _ = core.NewBlockChain(db, config, pow, evmux) - ) - pm, err := NewProtocolManager(config, false, NetworkId, evmux, new(testTxPool), pow, blockchain, db) - if err != nil { - t.Fatalf("failed to start test protocol manager: %v", err) - } - pm.Start() - defer pm.Stop() - - // Connect a new peer and check that we receive the DAO challenge - peer, _ := newTestPeer("peer", eth63, pm, true) - defer peer.close() - - challenge := &getBlockHeadersData{ - Origin: hashOrNumber{Number: config.DAOForkBlock.Uint64()}, - Amount: 1, - Skip: 0, - Reverse: false, - } - if err := p2p.ExpectMsg(peer.app, GetBlockHeadersMsg, challenge); err != nil { - t.Fatalf("challenge mismatch: %v", err) - } - // Create a block to reply to the challenge if no timeout is simualted - if !timeout { - blocks, _ := core.GenerateChain(nil, genesis, db, 1, func(i int, block *core.BlockGen) { - if remoteForked { - block.SetExtra(params.DAOForkBlockExtra) - } - }) - if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{blocks[0].Header()}); err != nil { - t.Fatalf("failed to answer challenge: %v", err) - } - time.Sleep(100 * time.Millisecond) // Sleep to avoid the verification racing with the drops - } else { - // Otherwise wait until the test timeout passes - time.Sleep(daoChallengeTimeout + 500*time.Millisecond) - } - // Verify that depending on fork side, the remote peer is maintained or dropped - if localForked == remoteForked && !timeout { - if peers := pm.peers.Len(); peers != 1 { - t.Fatalf("peer count mismatch: have %d, want %d", peers, 1) - } - } else { - if peers := pm.peers.Len(); peers != 0 { - t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) - } - } -} diff --git a/exp/helper_test.go b/exp/helper_test.go deleted file mode 100644 index 53efef7d93757..0000000000000 --- a/exp/helper_test.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// This file contains some shares testing functionality, common to multiple -// different files and modules being tested. - -package exp - -import ( - "crypto/ecdsa" - "crypto/rand" - "math/big" - "sort" - "sync" - "testing" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/ethdb" - "github.com/expanse-org/go-expanse/event" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/p2p/discover" -) - -var ( - testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testBank = core.GenesisAccount{ - Address: crypto.PubkeyToAddress(testBankKey.PublicKey), - Balance: big.NewInt(1000000), - } -) - -// newTestProtocolManager creates a new protocol manager for testing purposes, -// with the given number of blocks already known, and potential notification -// channels for different events. -func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) { - var ( - evmux = new(event.TypeMux) - pow = new(core.FakePow) - db, _ = ethdb.NewMemDatabase() - genesis = core.WriteGenesisBlockForTesting(db, testBank) - chainConfig = &core.ChainConfig{HomesteadBlock: big.NewInt(0)} // homestead set to 0 because of chain maker - blockchain, _ = core.NewBlockChain(db, chainConfig, pow, evmux) - ) - chain, _ := core.GenerateChain(nil, genesis, db, blocks, generator) - if _, err := blockchain.InsertChain(chain); err != nil { - panic(err) - } - - pm, err := NewProtocolManager(chainConfig, fastSync, NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) - if err != nil { - return nil, err - } - pm.Start() - return pm, nil -} - -// newTestProtocolManagerMust creates a new protocol manager for testing purposes, -// with the given number of blocks already known, and potential notification -// channels for different events. In case of an error, the constructor force- -// fails the test. -func newTestProtocolManagerMust(t *testing.T, fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { - pm, err := newTestProtocolManager(fastSync, blocks, generator, newtx) - if err != nil { - t.Fatalf("Failed to create protocol manager: %v", err) - } - return pm -} - -// testTxPool is a fake, helper transaction pool for testing purposes -type testTxPool struct { - pool []*types.Transaction // Collection of all transactions - added chan<- []*types.Transaction // Notification channel for new transactions - - lock sync.RWMutex // Protects the transaction pool -} - -// AddBatch appends a batch of transactions to the pool, and notifies any -// listeners if the addition channel is non nil -func (p *testTxPool) AddBatch(txs []*types.Transaction) { - p.lock.Lock() - defer p.lock.Unlock() - - p.pool = append(p.pool, txs...) - if p.added != nil { - p.added <- txs - } -} - -// Pending returns all the transactions known to the pool -func (p *testTxPool) Pending() map[common.Address]types.Transactions { - p.lock.RLock() - defer p.lock.RUnlock() - - batches := make(map[common.Address]types.Transactions) - for _, tx := range p.pool { - from, _ := tx.From() - batches[from] = append(batches[from], tx) - } - for _, batch := range batches { - sort.Sort(types.TxByNonce(batch)) - } - return batches -} - -// newTestTransaction create a new dummy transaction. -func newTestTransaction(from *ecdsa.PrivateKey, nonce uint64, datasize int) *types.Transaction { - tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize)) - tx, _ = tx.SignECDSA(from) - return tx -} - -// testPeer is a simulated peer to allow testing direct network calls. -type testPeer struct { - net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging - app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side - *peer -} - -// newTestPeer creates a new peer registered at the given protocol manager. -func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*testPeer, <-chan error) { - // Create a message pipe to communicate through - app, net := p2p.MsgPipe() - - // Generate a random id and create the peer - var id discover.NodeID - rand.Read(id[:]) - - peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net) - - // Start the peer on a new thread - errc := make(chan error, 1) - go func() { - select { - case pm.newPeerCh <- peer: - errc <- pm.handle(peer) - case <-pm.quitSync: - errc <- p2p.DiscQuitting - } - }() - tp := &testPeer{app: app, net: net, peer: peer} - // Execute any implicitly requested handshakes and return - if shake { - td, head, genesis := pm.blockchain.Status() - tp.handshake(nil, td, head, genesis) - } - return tp, errc -} - -// handshake simulates a trivial handshake that expects the same state from the -// remote side as we are simulating locally. -func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) { - msg := &statusData{ - ProtocolVersion: uint32(p.version), - NetworkId: uint32(NetworkId), - TD: td, - CurrentBlock: head, - GenesisBlock: genesis, - } - if err := p2p.ExpectMsg(p.app, StatusMsg, msg); err != nil { - t.Fatalf("status recv: %v", err) - } - if err := p2p.Send(p.app, StatusMsg, msg); err != nil { - t.Fatalf("status send: %v", err) - } -} - -// close terminates the local side of the peer, notifying the remote protocol -// manager of termination. -func (p *testPeer) close() { - p.app.Close() -} diff --git a/exp/metrics.go b/exp/metrics.go deleted file mode 100644 index d42f372f31d87..0000000000000 --- a/exp/metrics.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package exp - -import ( - "github.com/expanse-org/go-expanse/metrics" - "github.com/expanse-org/go-expanse/p2p" -) - -var ( - propTxnInPacketsMeter = metrics.NewMeter("eth/prop/txns/in/packets") - propTxnInTrafficMeter = metrics.NewMeter("eth/prop/txns/in/traffic") - propTxnOutPacketsMeter = metrics.NewMeter("eth/prop/txns/out/packets") - propTxnOutTrafficMeter = metrics.NewMeter("eth/prop/txns/out/traffic") - propHashInPacketsMeter = metrics.NewMeter("eth/prop/hashes/in/packets") - propHashInTrafficMeter = metrics.NewMeter("eth/prop/hashes/in/traffic") - propHashOutPacketsMeter = metrics.NewMeter("eth/prop/hashes/out/packets") - propHashOutTrafficMeter = metrics.NewMeter("eth/prop/hashes/out/traffic") - propBlockInPacketsMeter = metrics.NewMeter("eth/prop/blocks/in/packets") - propBlockInTrafficMeter = metrics.NewMeter("eth/prop/blocks/in/traffic") - propBlockOutPacketsMeter = metrics.NewMeter("eth/prop/blocks/out/packets") - propBlockOutTrafficMeter = metrics.NewMeter("eth/prop/blocks/out/traffic") - reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/headers/in/packets") - reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/headers/in/traffic") - reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/headers/out/packets") - reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/headers/out/traffic") - reqBodyInPacketsMeter = metrics.NewMeter("eth/req/bodies/in/packets") - reqBodyInTrafficMeter = metrics.NewMeter("eth/req/bodies/in/traffic") - reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/bodies/out/packets") - reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/bodies/out/traffic") - reqStateInPacketsMeter = metrics.NewMeter("eth/req/states/in/packets") - reqStateInTrafficMeter = metrics.NewMeter("eth/req/states/in/traffic") - reqStateOutPacketsMeter = metrics.NewMeter("eth/req/states/out/packets") - reqStateOutTrafficMeter = metrics.NewMeter("eth/req/states/out/traffic") - reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipts/in/packets") - reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipts/in/traffic") - reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets") - reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic") - miscInPacketsMeter = metrics.NewMeter("eth/misc/in/packets") - miscInTrafficMeter = metrics.NewMeter("eth/misc/in/traffic") - miscOutPacketsMeter = metrics.NewMeter("eth/misc/out/packets") - miscOutTrafficMeter = metrics.NewMeter("eth/misc/out/traffic") -) - -// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of -// accumulating the above defined metrics based on the data stream contents. -type meteredMsgReadWriter struct { - p2p.MsgReadWriter // Wrapped message stream to meter - version int // Protocol version to select correct meters -} - -// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the -// metrics system is disabled, this function returns the original object. -func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter { - if !metrics.Enabled { - return rw - } - return &meteredMsgReadWriter{MsgReadWriter: rw} -} - -// Init sets the protocol version used by the stream to know which meters to -// increment in case of overlapping message ids between protocol versions. -func (rw *meteredMsgReadWriter) Init(version int) { - rw.version = version -} - -func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) { - // Read the message and short circuit in case of an error - msg, err := rw.MsgReadWriter.ReadMsg() - if err != nil { - return msg, err - } - // Account for the data traffic - packets, traffic := miscInPacketsMeter, miscInTrafficMeter - switch { - case msg.Code == BlockHeadersMsg: - packets, traffic = reqHeaderInPacketsMeter, reqHeaderInTrafficMeter - case msg.Code == BlockBodiesMsg: - packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter - - case rw.version >= eth63 && msg.Code == NodeDataMsg: - packets, traffic = reqStateInPacketsMeter, reqStateInTrafficMeter - case rw.version >= eth63 && msg.Code == ReceiptsMsg: - packets, traffic = reqReceiptInPacketsMeter, reqReceiptInTrafficMeter - - case msg.Code == NewBlockHashesMsg: - packets, traffic = propHashInPacketsMeter, propHashInTrafficMeter - case msg.Code == NewBlockMsg: - packets, traffic = propBlockInPacketsMeter, propBlockInTrafficMeter - case msg.Code == TxMsg: - packets, traffic = propTxnInPacketsMeter, propTxnInTrafficMeter - } - packets.Mark(1) - traffic.Mark(int64(msg.Size)) - - return msg, err -} - -func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error { - // Account for the data traffic - packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter - switch { - case msg.Code == BlockHeadersMsg: - packets, traffic = reqHeaderOutPacketsMeter, reqHeaderOutTrafficMeter - case msg.Code == BlockBodiesMsg: - packets, traffic = reqBodyOutPacketsMeter, reqBodyOutTrafficMeter - - case rw.version >= eth63 && msg.Code == NodeDataMsg: - packets, traffic = reqStateOutPacketsMeter, reqStateOutTrafficMeter - case rw.version >= eth63 && msg.Code == ReceiptsMsg: - packets, traffic = reqReceiptOutPacketsMeter, reqReceiptOutTrafficMeter - - case msg.Code == NewBlockHashesMsg: - packets, traffic = propHashOutPacketsMeter, propHashOutTrafficMeter - case msg.Code == NewBlockMsg: - packets, traffic = propBlockOutPacketsMeter, propBlockOutTrafficMeter - case msg.Code == TxMsg: - packets, traffic = propTxnOutPacketsMeter, propTxnOutTrafficMeter - } - packets.Mark(1) - traffic.Mark(int64(msg.Size)) - - // Send the packet to the p2p layer - return rw.MsgReadWriter.WriteMsg(msg) -} diff --git a/exp/peer.go b/exp/peer.go deleted file mode 100644 index 97d6826e8592d..0000000000000 --- a/exp/peer.go +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package exp - -import ( - "errors" - "fmt" - "math/big" - "sync" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rlp" - "gopkg.in/fatih/set.v0" -) - -var ( - errClosed = errors.New("peer set is closed") - errAlreadyRegistered = errors.New("peer is already registered") - errNotRegistered = errors.New("peer is not registered") -) - -const ( - maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS) - maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS) - handshakeTimeout = 5 * time.Second -) - -// PeerInfo represents a short summary of the Expanse sub-protocol metadata known -// about a connected peer. -type PeerInfo struct { - Version int `json:"version"` // Expanse protocol version negotiated - Difficulty *big.Int `json:"difficulty"` // Total difficulty of the peer's blockchain - Head string `json:"head"` // SHA3 hash of the peer's best owned block -} - -type peer struct { - id string - - *p2p.Peer - rw p2p.MsgReadWriter - - version int // Protocol version negotiated - forkDrop *time.Timer // Timed connection dropper if forks aren't validated in time - - head common.Hash - td *big.Int - lock sync.RWMutex - - knownTxs *set.Set // Set of transaction hashes known to be known by this peer - knownBlocks *set.Set // Set of block hashes known to be known by this peer -} - -func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { - id := p.ID() - - return &peer{ - Peer: p, - rw: rw, - version: version, - id: fmt.Sprintf("%x", id[:8]), - knownTxs: set.New(), - knownBlocks: set.New(), - } -} - -// Info gathers and returns a collection of metadata known about a peer. -func (p *peer) Info() *PeerInfo { - hash, td := p.Head() - - return &PeerInfo{ - Version: p.version, - Difficulty: td, - Head: hash.Hex(), - } -} - -// Head retrieves a copy of the current head hash and total difficulty of the -// peer. -func (p *peer) Head() (hash common.Hash, td *big.Int) { - p.lock.RLock() - defer p.lock.RUnlock() - - copy(hash[:], p.head[:]) - return hash, new(big.Int).Set(p.td) -} - -// SetHead updates the head hash and total difficulty of the peer. -func (p *peer) SetHead(hash common.Hash, td *big.Int) { - p.lock.Lock() - defer p.lock.Unlock() - - copy(p.head[:], hash[:]) - p.td.Set(td) -} - -// MarkBlock marks a block as known for the peer, ensuring that the block will -// never be propagated to this particular peer. -func (p *peer) MarkBlock(hash common.Hash) { - // If we reached the memory allowance, drop a previously known block hash - for p.knownBlocks.Size() >= maxKnownBlocks { - p.knownBlocks.Pop() - } - p.knownBlocks.Add(hash) -} - -// MarkTransaction marks a transaction as known for the peer, ensuring that it -// will never be propagated to this particular peer. -func (p *peer) MarkTransaction(hash common.Hash) { - // If we reached the memory allowance, drop a previously known transaction hash - for p.knownTxs.Size() >= maxKnownTxs { - p.knownTxs.Pop() - } - p.knownTxs.Add(hash) -} - -// SendTransactions sends transactions to the peer and includes the hashes -// in its transaction hash set for future reference. -func (p *peer) SendTransactions(txs types.Transactions) error { - for _, tx := range txs { - p.knownTxs.Add(tx.Hash()) - } - return p2p.Send(p.rw, TxMsg, txs) -} - -// SendNewBlockHashes announces the availability of a number of blocks through -// a hash notification. -func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error { - for _, hash := range hashes { - p.knownBlocks.Add(hash) - } - request := make(newBlockHashesData, len(hashes)) - for i := 0; i < len(hashes); i++ { - request[i].Hash = hashes[i] - request[i].Number = numbers[i] - } - return p2p.Send(p.rw, NewBlockHashesMsg, request) -} - -// SendNewBlock propagates an entire block to a remote peer. -func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error { - p.knownBlocks.Add(block.Hash()) - return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td}) -} - -// SendBlockHeaders sends a batch of block headers to the remote peer. -func (p *peer) SendBlockHeaders(headers []*types.Header) error { - return p2p.Send(p.rw, BlockHeadersMsg, headers) -} - -// SendBlockBodies sends a batch of block contents to the remote peer. -func (p *peer) SendBlockBodies(bodies []*blockBody) error { - return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies)) -} - -// SendBlockBodiesRLP sends a batch of block contents to the remote peer from -// an already RLP encoded format. -func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error { - return p2p.Send(p.rw, BlockBodiesMsg, bodies) -} - -// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the -// hashes requested. -func (p *peer) SendNodeData(data [][]byte) error { - return p2p.Send(p.rw, NodeDataMsg, data) -} - -// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the -// ones requested from an already RLP encoded format. -func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error { - return p2p.Send(p.rw, ReceiptsMsg, receipts) -} - -// RequestHeaders is a wrapper around the header query functions to fetch a -// single header. It is used solely by the fetcher. -func (p *peer) RequestOneHeader(hash common.Hash) error { - glog.V(logger.Debug).Infof("%v fetching a single header: %x", p, hash) - return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false}) -} - -// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the -// specified header query, based on the hash of an origin block. -func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { - glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse) - return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) -} - -// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the -// specified header query, based on the number of an origin block. -func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { - glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse) - return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) -} - -// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes -// specified. -func (p *peer) RequestBodies(hashes []common.Hash) error { - glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes)) - return p2p.Send(p.rw, GetBlockBodiesMsg, hashes) -} - -// RequestNodeData fetches a batch of arbitrary data from a node's known state -// data, corresponding to the specified hashes. -func (p *peer) RequestNodeData(hashes []common.Hash) error { - glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(hashes)) - return p2p.Send(p.rw, GetNodeDataMsg, hashes) -} - -// RequestReceipts fetches a batch of transaction receipts from a remote node. -func (p *peer) RequestReceipts(hashes []common.Hash) error { - glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes)) - return p2p.Send(p.rw, GetReceiptsMsg, hashes) -} - -// Handshake executes the eth protocol handshake, negotiating version number, -// network IDs, difficulties, head and genesis blocks. -func (p *peer) Handshake(network int, td *big.Int, head common.Hash, genesis common.Hash) error { - // Send out own handshake in a new thread - errc := make(chan error, 2) - var status statusData // safe to read after two values have been received from errc - - go func() { - errc <- p2p.Send(p.rw, StatusMsg, &statusData{ - ProtocolVersion: uint32(p.version), - NetworkId: uint32(network), - TD: td, - CurrentBlock: head, - GenesisBlock: genesis, - }) - }() - go func() { - errc <- p.readStatus(network, &status, genesis) - }() - timeout := time.NewTimer(handshakeTimeout) - defer timeout.Stop() - for i := 0; i < 2; i++ { - select { - case err := <-errc: - if err != nil { - return err - } - case <-timeout.C: - return p2p.DiscReadTimeout - } - } - p.td, p.head = status.TD, status.CurrentBlock - return nil -} - -func (p *peer) readStatus(network int, status *statusData, genesis common.Hash) (err error) { - msg, err := p.rw.ReadMsg() - if err != nil { - return err - } - if msg.Code != StatusMsg { - return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) - } - if msg.Size > ProtocolMaxMsgSize { - return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) - } - // Decode the handshake and make sure everything matches - if err := msg.Decode(&status); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - if status.GenesisBlock != genesis { - return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock, genesis) - } - if int(status.NetworkId) != network { - return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network) - } - if int(status.ProtocolVersion) != p.version { - return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version) - } - return nil -} - -// String implements fmt.Stringer. -func (p *peer) String() string { - return fmt.Sprintf("Peer %s [%s]", p.id, - fmt.Sprintf("exp/%2d", p.version), - ) -} - -// peerSet represents the collection of active peers currently participating in -// the Expanse sub-protocol. -type peerSet struct { - peers map[string]*peer - lock sync.RWMutex - closed bool -} - -// newPeerSet creates a new peer set to track the active participants. -func newPeerSet() *peerSet { - return &peerSet{ - peers: make(map[string]*peer), - } -} - -// Register injects a new peer into the working set, or returns an error if the -// peer is already known. -func (ps *peerSet) Register(p *peer) error { - ps.lock.Lock() - defer ps.lock.Unlock() - - if ps.closed { - return errClosed - } - if _, ok := ps.peers[p.id]; ok { - return errAlreadyRegistered - } - ps.peers[p.id] = p - return nil -} - -// Unregister removes a remote peer from the active set, disabling any further -// actions to/from that particular entity. -func (ps *peerSet) Unregister(id string) error { - ps.lock.Lock() - defer ps.lock.Unlock() - - if _, ok := ps.peers[id]; !ok { - return errNotRegistered - } - delete(ps.peers, id) - return nil -} - -// Peer retrieves the registered peer with the given id. -func (ps *peerSet) Peer(id string) *peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return ps.peers[id] -} - -// Len returns if the current number of peers in the set. -func (ps *peerSet) Len() int { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return len(ps.peers) -} - -// PeersWithoutBlock retrieves a list of peers that do not have a given block in -// their set of known hashes. -func (ps *peerSet) PeersWithoutBlock(hash common.Hash) []*peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - list := make([]*peer, 0, len(ps.peers)) - for _, p := range ps.peers { - if !p.knownBlocks.Has(hash) { - list = append(list, p) - } - } - return list -} - -// PeersWithoutTx retrieves a list of peers that do not have a given transaction -// in their set of known hashes. -func (ps *peerSet) PeersWithoutTx(hash common.Hash) []*peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - list := make([]*peer, 0, len(ps.peers)) - for _, p := range ps.peers { - if !p.knownTxs.Has(hash) { - list = append(list, p) - } - } - return list -} - -// BestPeer retrieves the known peer with the currently highest total difficulty. -func (ps *peerSet) BestPeer() *peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - var ( - bestPeer *peer - bestTd *big.Int - ) - for _, p := range ps.peers { - if _, td := p.Head(); bestPeer == nil || td.Cmp(bestTd) > 0 { - bestPeer, bestTd = p, td - } - } - return bestPeer -} - -// Close disconnects all peers. -// No new peers can be registered after Close has returned. -func (ps *peerSet) Close() { - ps.lock.Lock() - defer ps.lock.Unlock() - - for _, p := range ps.peers { - p.Disconnect(p2p.DiscQuitting) - } - ps.closed = true -} diff --git a/exp/protocol.go b/exp/protocol.go deleted file mode 100644 index 4853f5f4a3689..0000000000000 --- a/exp/protocol.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package exp - -import ( - "fmt" - "io" - "math/big" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/rlp" -) - -// Constants to match up protocol versions and messages -const ( - eth62 = 62 - eth63 = 63 -) - -// Official short name of the protocol used during capability negotiation. -var ProtocolName = "exp" - -// Supported versions of the eth protocol (first is primary). -var ProtocolVersions = []uint{eth63, eth62} - -// Number of implemented message corresponding to different protocol versions. -var ProtocolLengths = []uint64{17, 8} - -const ( - NetworkId = 1 - ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message -) - -// exp protocol message codes -const ( - // Protocol messages belonging to eth/62 - StatusMsg = 0x00 - NewBlockHashesMsg = 0x01 - TxMsg = 0x02 - GetBlockHeadersMsg = 0x03 - BlockHeadersMsg = 0x04 - GetBlockBodiesMsg = 0x05 - BlockBodiesMsg = 0x06 - NewBlockMsg = 0x07 - - // Protocol messages belonging to eth/63 - GetNodeDataMsg = 0x0d - NodeDataMsg = 0x0e - GetReceiptsMsg = 0x0f - ReceiptsMsg = 0x10 -) - -type errCode int - -const ( - ErrMsgTooLarge = iota - ErrDecode - ErrInvalidMsgCode - ErrProtocolVersionMismatch - ErrNetworkIdMismatch - ErrGenesisBlockMismatch - ErrNoStatusMsg - ErrExtraStatusMsg - ErrSuspendedPeer -) - -func (e errCode) String() string { - return errorToString[int(e)] -} - -// XXX change once legacy code is out -var errorToString = map[int]string{ - ErrMsgTooLarge: "Message too long", - ErrDecode: "Invalid message", - ErrInvalidMsgCode: "Invalid message code", - ErrProtocolVersionMismatch: "Protocol version mismatch", - ErrNetworkIdMismatch: "NetworkId mismatch", - ErrGenesisBlockMismatch: "Genesis block mismatch", - ErrNoStatusMsg: "No status message", - ErrExtraStatusMsg: "Extra status message", - ErrSuspendedPeer: "Suspended peer", -} - -type txPool interface { - // AddBatch should add the given transactions to the pool. - AddBatch([]*types.Transaction) - - // Pending should return pending transactions. - // The slice should be modifiable by the caller. - Pending() map[common.Address]types.Transactions -} - -// statusData is the network packet for the status message. -type statusData struct { - ProtocolVersion uint32 - NetworkId uint32 - TD *big.Int - CurrentBlock common.Hash - GenesisBlock common.Hash -} - -// newBlockHashesData is the network packet for the block announcements. -type newBlockHashesData []struct { - Hash common.Hash // Hash of one particular block being announced - Number uint64 // Number of one particular block being announced -} - -// getBlockHeadersData represents a block header query. -type getBlockHeadersData struct { - Origin hashOrNumber // Block from which to retrieve headers - Amount uint64 // Maximum number of headers to retrieve - Skip uint64 // Blocks to skip between consecutive headers - Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) -} - -// hashOrNumber is a combined field for specifying an origin block. -type hashOrNumber struct { - Hash common.Hash // Block hash from which to retrieve headers (excludes Number) - Number uint64 // Block hash from which to retrieve headers (excludes Hash) -} - -// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the -// two contained union fields. -func (hn *hashOrNumber) EncodeRLP(w io.Writer) error { - if hn.Hash == (common.Hash{}) { - return rlp.Encode(w, hn.Number) - } - if hn.Number != 0 { - return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number) - } - return rlp.Encode(w, hn.Hash) -} - -// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents -// into either a block hash or a block number. -func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - origin, err := s.Raw() - if err == nil { - switch { - case size == 32: - err = rlp.DecodeBytes(origin, &hn.Hash) - case size <= 8: - err = rlp.DecodeBytes(origin, &hn.Number) - default: - err = fmt.Errorf("invalid input size %d for origin", size) - } - } - return err -} - -// newBlockData is the network packet for the block propagation message. -type newBlockData struct { - Block *types.Block - TD *big.Int -} - -// blockBody represents the data content of a single block. -type blockBody struct { - Transactions []*types.Transaction // Transactions contained within a block - Uncles []*types.Header // Uncles contained within a block -} - -// blockBodiesData is the network packet for block content distribution. -type blockBodiesData []*blockBody diff --git a/exp/protocol_test.go b/exp/protocol_test.go deleted file mode 100644 index 4c7666f1e106e..0000000000000 --- a/exp/protocol_test.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package exp - -import ( - "fmt" - "sync" - "testing" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rlp" -) - -func init() { - // glog.SetToStderr(true) - // glog.SetV(6) -} - -var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - -// Tests that handshake failures are detected and reported correctly. -func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) } -func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) } - -func testStatusMsgErrors(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, 0, nil, nil) - td, currentBlock, genesis := pm.blockchain.Status() - defer pm.Stop() - - tests := []struct { - code uint64 - data interface{} - wantError error - }{ - { - code: TxMsg, data: []interface{}{}, - wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), - }, - { - code: StatusMsg, data: statusData{10, NetworkId, td, currentBlock, genesis}, - wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol), - }, - { - code: StatusMsg, data: statusData{uint32(protocol), 999, td, currentBlock, genesis}, - wantError: errResp(ErrNetworkIdMismatch, "999 (!= 1)"), - }, - { - code: StatusMsg, data: statusData{uint32(protocol), NetworkId, td, currentBlock, common.Hash{3}}, - wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis), - }, - } - - for i, test := range tests { - p, errc := newTestPeer("peer", protocol, pm, false) - // The send call might hang until reset because - // the protocol might not read the payload. - go p2p.Send(p.app, test.code, test.data) - - select { - case err := <-errc: - if err == nil { - t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError) - } else if err.Error() != test.wantError.Error() { - t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError) - } - case <-time.After(2 * time.Second): - t.Errorf("protocol did not shut down withing 2 seconds") - } - p.close() - } -} - -// This test checks that received transactions are added to the local pool. -func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) } -func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) } - -func testRecvTransactions(t *testing.T, protocol int) { - txAdded := make(chan []*types.Transaction) - pm := newTestProtocolManagerMust(t, false, 0, nil, txAdded) - pm.synced = 1 // mark synced to accept transactions - p, _ := newTestPeer("peer", protocol, pm, true) - defer pm.Stop() - defer p.close() - - tx := newTestTransaction(testAccount, 0, 0) - if err := p2p.Send(p.app, TxMsg, []interface{}{tx}); err != nil { - t.Fatalf("send error: %v", err) - } - select { - case added := <-txAdded: - if len(added) != 1 { - t.Errorf("wrong number of added transactions: got %d, want 1", len(added)) - } else if added[0].Hash() != tx.Hash() { - t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash()) - } - case <-time.After(2 * time.Second): - t.Errorf("no TxPreEvent received within 2 seconds") - } -} - -// This test checks that pending transactions are sent. -func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) } -func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) } - -func testSendTransactions(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, 0, nil, nil) - defer pm.Stop() - - // Fill the pool with big transactions. - const txsize = txsyncPackSize / 10 - alltxs := make([]*types.Transaction, 100) - for nonce := range alltxs { - alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize) - } - pm.txpool.AddBatch(alltxs) - - // Connect several peers. They should all receive the pending transactions. - var wg sync.WaitGroup - checktxs := func(p *testPeer) { - defer wg.Done() - defer p.close() - seen := make(map[common.Hash]bool) - for _, tx := range alltxs { - seen[tx.Hash()] = false - } - for n := 0; n < len(alltxs) && !t.Failed(); { - var txs []*types.Transaction - msg, err := p.app.ReadMsg() - if err != nil { - t.Errorf("%v: read error: %v", p.Peer, err) - } else if msg.Code != TxMsg { - t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code) - } - if err := msg.Decode(&txs); err != nil { - t.Errorf("%v: %v", p.Peer, err) - } - for _, tx := range txs { - hash := tx.Hash() - seentx, want := seen[hash] - if seentx { - t.Errorf("%v: got tx more than once: %x", p.Peer, hash) - } - if !want { - t.Errorf("%v: got unexpected tx: %x", p.Peer, hash) - } - seen[hash] = true - n++ - } - } - } - for i := 0; i < 3; i++ { - p, _ := newTestPeer(fmt.Sprintf("peer #%d", i), protocol, pm, true) - wg.Add(1) - go checktxs(p) - } - wg.Wait() -} - -// Tests that the custom union field encoder and decoder works correctly. -func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { - // Create a "random" hash for testing - var hash common.Hash - for i, _ := range hash { - hash[i] = byte(i) - } - // Assemble some table driven tests - tests := []struct { - packet *getBlockHeadersData - fail bool - }{ - // Providing the origin as either a hash or a number should both work - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}}}, - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}}}, - - // Providing arbitrary query field should also work - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, - - // Providing both the origin hash and origin number must fail - {fail: true, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash, Number: 314}}}, - } - // Iterate over each of the tests and try to encode and then decode - for i, tt := range tests { - bytes, err := rlp.EncodeToBytes(tt.packet) - if err != nil && !tt.fail { - t.Fatalf("test %d: failed to encode packet: %v", i, err) - } else if err == nil && tt.fail { - t.Fatalf("test %d: encode should have failed", i) - } - if !tt.fail { - packet := new(getBlockHeadersData) - if err := rlp.DecodeBytes(bytes, packet); err != nil { - t.Fatalf("test %d: failed to decode packet: %v", i, err) - } - if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount || - packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse { - t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet) - } - } - } -} diff --git a/exp/sync.go b/exp/sync.go deleted file mode 100644 index 845f98dc71d07..0000000000000 --- a/exp/sync.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package exp - -import ( - "math/rand" - "sync/atomic" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/exp/downloader" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/p2p/discover" -) - -const ( - forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available - minDesiredPeerCount = 5 // Amount of peers desired to start syncing - - // This is the target size for the packs of transactions sent by txsyncLoop. - // A pack can get larger than this if a single transactions exceeds this size. - txsyncPackSize = 100 * 1024 -) - -type txsync struct { - p *peer - txs []*types.Transaction -} - -// syncTransactions starts sending all currently pending transactions to the given peer. -func (pm *ProtocolManager) syncTransactions(p *peer) { - var txs types.Transactions - for _, batch := range pm.txpool.Pending() { - txs = append(txs, batch...) - } - if len(txs) == 0 { - return - } - select { - case pm.txsyncCh <- &txsync{p, txs}: - case <-pm.quitSync: - } -} - -// txsyncLoop takes care of the initial transaction sync for each new -// connection. When a new peer appears, we relay all currently pending -// transactions. In order to minimise egress bandwidth usage, we send -// the transactions in small packs to one peer at a time. -func (pm *ProtocolManager) txsyncLoop() { - var ( - pending = make(map[discover.NodeID]*txsync) - sending = false // whether a send is active - pack = new(txsync) // the pack that is being sent - done = make(chan error, 1) // result of the send - ) - - // send starts a sending a pack of transactions from the sync. - send := func(s *txsync) { - // Fill pack with transactions up to the target size. - size := common.StorageSize(0) - pack.p = s.p - pack.txs = pack.txs[:0] - for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ { - pack.txs = append(pack.txs, s.txs[i]) - size += s.txs[i].Size() - } - // Remove the transactions that will be sent. - s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] - if len(s.txs) == 0 { - delete(pending, s.p.ID()) - } - // Send the pack in the background. - glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size) - sending = true - go func() { done <- pack.p.SendTransactions(pack.txs) }() - } - - // pick chooses the next pending sync. - pick := func() *txsync { - if len(pending) == 0 { - return nil - } - n := rand.Intn(len(pending)) + 1 - for _, s := range pending { - if n--; n == 0 { - return s - } - } - return nil - } - - for { - select { - case s := <-pm.txsyncCh: - pending[s.p.ID()] = s - if !sending { - send(s) - } - case err := <-done: - sending = false - // Stop tracking peers that cause send failures. - if err != nil { - glog.V(logger.Debug).Infof("%v: tx send failed: %v", pack.p.Peer, err) - delete(pending, pack.p.ID()) - } - // Schedule the next send. - if s := pick(); s != nil { - send(s) - } - case <-pm.quitSync: - return - } - } -} - -// syncer is responsible for periodically synchronising with the network, both -// downloading hashes and blocks as well as handling the announcement handler. -func (pm *ProtocolManager) syncer() { - // Start and ensure cleanup of sync mechanisms - pm.fetcher.Start() - defer pm.fetcher.Stop() - defer pm.downloader.Terminate() - - // Wait for different events to fire synchronisation operations - forceSync := time.Tick(forceSyncCycle) - for { - select { - case <-pm.newPeerCh: - // Make sure we have peers to select from, then sync - if pm.peers.Len() < minDesiredPeerCount { - break - } - go pm.synchronise(pm.peers.BestPeer()) - - case <-forceSync: - // Force a sync even if not enough peers are present - go pm.synchronise(pm.peers.BestPeer()) - - case <-pm.noMorePeers: - return - } - } -} - -// synchronise tries to sync up our local block chain with a remote peer. -func (pm *ProtocolManager) synchronise(peer *peer) { - // Short circuit if no peers are available - if peer == nil { - return - } - // Make sure the peer's TD is higher than our own - currentBlock := pm.blockchain.CurrentBlock() - td := pm.blockchain.GetTd(currentBlock.Hash()) - - pHead, pTd := peer.Head() - if pTd.Cmp(td) <= 0 { - return - } - // Otherwise try to sync with the downloader - mode := downloader.FullSync - if atomic.LoadUint32(&pm.fastSync) == 1 { - mode = downloader.FastSync - } - if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil { - return - } - atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done - - // If fast sync was enabled, and we synced up, disable it - if atomic.LoadUint32(&pm.fastSync) == 1 { - // Disable fast sync if we indeed have something in our chain - if pm.blockchain.CurrentBlock().NumberU64() > 0 { - glog.V(logger.Info).Infof("fast sync complete, auto disabling") - atomic.StoreUint32(&pm.fastSync, 0) - } - } -} diff --git a/exp/sync_test.go b/exp/sync_test.go deleted file mode 100644 index 1ff0aa8752edc..0000000000000 --- a/exp/sync_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package exp - -import ( - "sync/atomic" - "testing" - "time" - - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/p2p/discover" -) - -// Tests that fast sync gets disabled as soon as a real block is successfully -// imported into the blockchain. -func TestFastSyncDisabling(t *testing.T) { - // Create a pristine protocol manager, check that fast sync is left enabled - pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil) - if atomic.LoadUint32(&pmEmpty.fastSync) == 0 { - t.Fatalf("fast sync disabled on pristine blockchain") - } - // Create a full protocol manager, check that fast sync gets disabled - pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil) - if atomic.LoadUint32(&pmFull.fastSync) == 1 { - t.Fatalf("fast sync not disabled on non-empty blockchain") - } - // Sync up the two peers - io1, io2 := p2p.MsgPipe() - - go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2)) - go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(discover.NodeID{}, "full", nil), io1)) - - time.Sleep(250 * time.Millisecond) - pmEmpty.synchronise(pmEmpty.peers.BestPeer()) - - // Check that fast sync was disabled - if atomic.LoadUint32(&pmEmpty.fastSync) == 1 { - t.Fatalf("fast sync not disabled after successful synchronisation") - } -} diff --git a/fdtrack/fdtrack.go b/fdtrack/fdtrack.go deleted file mode 100644 index 7c75460e5ca6c..0000000000000 --- a/fdtrack/fdtrack.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Package fdtrack logs statistics about open file descriptors. -package fdtrack - -import ( - "fmt" - "net" - "sort" - "sync" - "time" - - "github.com/expanse-org/go-expanse/logger/glog" -) - -var ( - mutex sync.Mutex - all = make(map[string]int) -) - -func Open(desc string) { - mutex.Lock() - all[desc] += 1 - mutex.Unlock() -} - -func Close(desc string) { - mutex.Lock() - defer mutex.Unlock() - if c, ok := all[desc]; ok { - if c == 1 { - delete(all, desc) - } else { - all[desc]-- - } - } -} - -func WrapListener(desc string, l net.Listener) net.Listener { - Open(desc) - return &wrappedListener{l, desc} -} - -type wrappedListener struct { - net.Listener - desc string -} - -func (w *wrappedListener) Accept() (net.Conn, error) { - c, err := w.Listener.Accept() - if err == nil { - c = WrapConn(w.desc, c) - } - return c, err -} - -func (w *wrappedListener) Close() error { - err := w.Listener.Close() - if err == nil { - Close(w.desc) - } - return err -} - -func WrapConn(desc string, conn net.Conn) net.Conn { - Open(desc) - return &wrappedConn{conn, desc} -} - -type wrappedConn struct { - net.Conn - desc string -} - -func (w *wrappedConn) Close() error { - err := w.Conn.Close() - if err == nil { - Close(w.desc) - } - return err -} - -func Start() { - go func() { - for range time.Tick(15 * time.Second) { - mutex.Lock() - var sum, tracked = 0, []string{} - for what, n := range all { - sum += n - tracked = append(tracked, fmt.Sprintf("%s:%d", what, n)) - } - mutex.Unlock() - used, _ := fdusage() - sort.Strings(tracked) - glog.Infof("fd usage %d/%d, tracked %d %v", used, fdlimit(), sum, tracked) - } - }() -} diff --git a/logger/glog/glog.go b/logger/glog/glog.go deleted file mode 100644 index e4b3f80a1b437..0000000000000 --- a/logger/glog/glog.go +++ /dev/null @@ -1,1223 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name or "glob" pattern matching -// and N is a V level. For instance, -// -// -vmodule=gopher.go=3 -// sets the V level to 3 in all Go files named "gopher.go". -// -// -vmodule=foo=3 -// sets V to 3 in all files of any packages whose import path ends in "foo". -// -// -vmodule=foo/*=3 -// sets V to 3 in all files of any packages whose import path contains "foo". -package glog - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - stdLog "log" - "os" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// these path prefixes are trimmed for display, but not when -// matching vmodule filters. -var trimPrefixes = []string{ - "/github.com/expanse-org/go-expanse", - "/github.com/ethereum/ethash", -} - -func trimToImportPath(file string) string { - if root := strings.LastIndex(file, "src/"); root != 0 { - file = file[root+3:] - } - return file -} - -// SetV sets the global verbosity level -func SetV(v int) { - logging.verbosity.set(Level(v)) -} - -// SetToStderr sets the global output style -func SetToStderr(toStderr bool) { - logging.mu.Lock() - logging.toStderr = toStderr - logging.mu.Unlock() -} - -// GetTraceLocation returns the global TraceLocation flag. -func GetTraceLocation() *TraceLocation { - return &logging.traceLocation -} - -// GetVModule returns the global verbosity pattern flag. -func GetVModule() *moduleSpec { - return &logging.vmodule -} - -// GetVerbosity returns the global verbosity level flag. -func GetVerbosity() *Level { - return &logging.verbosity -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern *regexp.Regexp - level Level -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - re, _ := compileModulePattern(pattern) - filter = append(filter, modulePat{re, Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// compiles a vmodule pattern to a regular expression. -func compileModulePattern(pat string) (*regexp.Regexp, error) { - re := ".*" - for _, comp := range strings.Split(pat, "/") { - if comp == "*" { - re += "(/.*)?" - } else if comp != "" { - // TODO: maybe return error if comp contains * - re += "/" + regexp.QuoteMeta(comp) - } - } - if !strings.HasSuffix(pat, ".go") { - re += "/[^/]+\\.go" - } - return regexp.Compile(re + "$") -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type TraceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *TraceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *TraceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *TraceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *TraceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *TraceLocation) Set(value string) error { - if value == "" { - // Unset. - logging.mu.Lock() - t.line = 0 - t.file = "" - logging.mu.Unlock() - return nil - } - - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - //flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - //flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - //flag.Var(&logging.verbosity, "v", "log level for V logs") - //flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - //flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - //flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - logging.setVState(3, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation TraceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - file = trimToImportPath(file) - for _, p := range trimPrefixes { - if strings.HasPrefix(file, p) { - file = file[len(p):] - break - } - } - file = file[1:] // drop '/' - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.Write(buf.tmp[:22]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printfmt(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - file = trimToImportPath(file) - for _, filter := range l.vmodule.filter { - if filter.pattern.MatchString(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printfmt(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.print(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printfmt(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printfmt(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printfmt(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printfmt(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printfmt(fatalLog, format, args...) -} diff --git a/logger/log.go b/logger/log.go deleted file mode 100644 index 58a8e218b98c3..0000000000000 --- a/logger/log.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package logger - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/expanse-org/go-expanse/common" -) - -func openLogFile(datadir string, filename string) *os.File { - path := common.AbsolutePath(datadir, filename) - file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - panic(fmt.Sprintf("error opening log file '%s': %v", filename, err)) - } - return file -} - -func New(datadir string, logFile string, logLevel int) LogSystem { - var writer io.Writer - if logFile == "" { - writer = os.Stdout - } else { - writer = openLogFile(datadir, logFile) - } - - var sys LogSystem - sys = NewStdLogSystem(writer, log.LstdFlags, LogLevel(logLevel)) - AddLogSystem(sys) - - return sys -} - -func NewJSONsystem(datadir string, logFile string) LogSystem { - var writer io.Writer - if logFile == "-" { - writer = os.Stdout - } else { - writer = openLogFile(datadir, logFile) - } - - var sys LogSystem - sys = NewJsonLogSystem(writer) - AddLogSystem(sys) - - return sys -} diff --git a/p2p/nat/nat.go b/p2p/nat/nat.go index 2d0fbc9702fec..23275d24be0aa 100644 --- a/p2p/nat/nat.go +++ b/p2p/nat/nat.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/expanse-org/go-expanse/log" "github.com/jackpal/go-nat-pmp" ) diff --git a/pow/block.go b/pow/block.go deleted file mode 100644 index 1f7585c444da9..0000000000000 --- a/pow/block.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package pow - -import ( - "math/big" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" -) - -type Block interface { - Difficulty() *big.Int - HashNoNonce() common.Hash - Nonce() uint64 - MixDigest() common.Hash - NumberU64() uint64 -} - -type ChainManager interface { - GetBlockByNumber(uint64) *types.Block - CurrentBlock() *types.Block -} diff --git a/pow/dagger/dagger.go b/pow/dagger/dagger.go deleted file mode 100644 index 507cda95a0f56..0000000000000 --- a/pow/dagger/dagger.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package dagger - -import ( - "hash" - "math/big" - "math/rand" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto/sha3" - "github.com/expanse-org/go-expanse/logger" -) - -var powlogger = logger.NewLogger("POW") - -type Dagger struct { - hash *big.Int - xn *big.Int -} - -var Found bool - -func (dag *Dagger) Find(obj *big.Int, resChan chan int64) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for i := 0; i < 1000; i++ { - rnd := r.Int63() - - res := dag.Eval(big.NewInt(rnd)) - powlogger.Infof("rnd %v\nres %v\nobj %v\n", rnd, res, obj) - if res.Cmp(obj) < 0 { - // Post back result on the channel - resChan <- rnd - // Notify other threads we've found a valid nonce - Found = true - } - - // Break out if found - if Found { - break - } - } - - resChan <- 0 -} - -func (dag *Dagger) Search(hash, diff *big.Int) (uint64, []byte) { - // TODO fix multi threading. Somehow it results in the wrong nonce - amountOfRoutines := 1 - - dag.hash = hash - - obj := common.BigPow(2, 256) - obj = obj.Div(obj, diff) - - Found = false - resChan := make(chan int64, 3) - var res int64 - - for k := 0; k < amountOfRoutines; k++ { - go dag.Find(obj, resChan) - - // Wait for each go routine to finish - } - for k := 0; k < amountOfRoutines; k++ { - // Get the result from the channel. 0 = quit - if r := <-resChan; r != 0 { - res = r - } - } - - return uint64(res), nil -} - -func (dag *Dagger) Verify(hash, diff, nonce *big.Int) bool { - dag.hash = hash - - obj := common.BigPow(2, 256) - obj = obj.Div(obj, diff) - - return dag.Eval(nonce).Cmp(obj) < 0 -} - -func DaggerVerify(hash, diff, nonce *big.Int) bool { - dagger := &Dagger{} - dagger.hash = hash - - obj := common.BigPow(2, 256) - obj = obj.Div(obj, diff) - - return dagger.Eval(nonce).Cmp(obj) < 0 -} - -func (dag *Dagger) Node(L uint64, i uint64) *big.Int { - if L == i { - return dag.hash - } - - var m *big.Int - if L == 9 { - m = big.NewInt(16) - } else { - m = big.NewInt(3) - } - - sha := sha3.NewKeccak256() - sha.Reset() - d := sha3.NewKeccak256() - b := new(big.Int) - ret := new(big.Int) - - for k := 0; k < int(m.Uint64()); k++ { - d.Reset() - d.Write(dag.hash.Bytes()) - d.Write(dag.xn.Bytes()) - d.Write(big.NewInt(int64(L)).Bytes()) - d.Write(big.NewInt(int64(i)).Bytes()) - d.Write(big.NewInt(int64(k)).Bytes()) - - b.SetBytes(Sum(d)) - pk := b.Uint64() & ((1 << ((L - 1) * 3)) - 1) - sha.Write(dag.Node(L-1, pk).Bytes()) - } - - ret.SetBytes(Sum(sha)) - - return ret -} - -func Sum(sha hash.Hash) []byte { - //in := make([]byte, 32) - return sha.Sum(nil) -} - -func (dag *Dagger) Eval(N *big.Int) *big.Int { - pow := common.BigPow(2, 26) - dag.xn = pow.Div(N, pow) - - sha := sha3.NewKeccak256() - sha.Reset() - ret := new(big.Int) - - for k := 0; k < 4; k++ { - d := sha3.NewKeccak256() - b := new(big.Int) - - d.Reset() - d.Write(dag.hash.Bytes()) - d.Write(dag.xn.Bytes()) - d.Write(N.Bytes()) - d.Write(big.NewInt(int64(k)).Bytes()) - - b.SetBytes(Sum(d)) - pk := (b.Uint64() & 0x1ffffff) - - sha.Write(dag.Node(9, pk).Bytes()) - } - - return ret.SetBytes(Sum(sha)) -} diff --git a/pow/dagger/dagger_test.go b/pow/dagger/dagger_test.go deleted file mode 100644 index a1465e1351837..0000000000000 --- a/pow/dagger/dagger_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package dagger - -import ( - "math/big" - "testing" - - "github.com/expanse-org/go-expanse/common" -) - -func BenchmarkDaggerSearch(b *testing.B) { - hash := big.NewInt(0) - diff := common.BigPow(2, 36) - o := big.NewInt(0) // nonce doesn't matter. We're only testing against speed, not validity - - // Reset timer so the big generation isn't included in the benchmark - b.ResetTimer() - // Validate - DaggerVerify(hash, diff, o) -} diff --git a/pow/ezp/pow.go b/pow/ezp/pow.go deleted file mode 100644 index 53cf6601698fe..0000000000000 --- a/pow/ezp/pow.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package ezp - -import ( - "encoding/binary" - "math/big" - "math/rand" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto/sha3" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/pow" -) - -var powlogger = logger.NewLogger("POW") - -type EasyPow struct { - hash *big.Int - HashRate int64 - turbo bool -} - -func New() *EasyPow { - return &EasyPow{turbo: false} -} - -func (pow *EasyPow) GetHashrate() int64 { - return pow.HashRate -} - -func (pow *EasyPow) Turbo(on bool) { - pow.turbo = on -} - -func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - hash := block.HashNoNonce() - diff := block.Difficulty() - //i := int64(0) - // TODO fix offset - i := rand.Int63() - starti := i - start := time.Now().UnixNano() - - defer func() { pow.HashRate = 0 }() - - // Make sure stop is empty -empty: - for { - select { - case <-stop: - default: - break empty - } - } - - for { - select { - case <-stop: - return 0, nil - default: - i++ - - elapsed := time.Now().UnixNano() - start - hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000 - pow.HashRate = int64(hashes) - - sha := uint64(r.Int63()) - if verify(hash, diff, sha) { - return sha, nil - } - } - - if !pow.turbo { - time.Sleep(20 * time.Microsecond) - } - } -} - -func (pow *EasyPow) Verify(block pow.Block) bool { - return Verify(block) -} - -func verify(hash common.Hash, diff *big.Int, nonce uint64) bool { - sha := sha3.NewKeccak256() - n := make([]byte, 8) - binary.PutUvarint(n, nonce) - sha.Write(n) - sha.Write(hash[:]) - verification := new(big.Int).Div(common.BigPow(2, 256), diff) - res := common.BigD(sha.Sum(nil)) - return res.Cmp(verification) <= 0 -} - -func Verify(block pow.Block) bool { - return verify(block.HashNoNonce(), block.Difficulty(), block.Nonce()) -} diff --git a/release/contract.go b/release/contract.go deleted file mode 100644 index e127b1c785ec9..0000000000000 --- a/release/contract.go +++ /dev/null @@ -1,432 +0,0 @@ -// This file is an automatically generated Go binding. Do not modify as any -// change will likely be lost upon the next re-generation! - -package release - -import ( - "math/big" - "strings" - - "github.com/expanse-org/go-expanse/accounts/abi" - "github.com/expanse-org/go-expanse/accounts/abi/bind" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core/types" -) - -// ReleaseOracleABI is the input ABI used to generate the binding from. -const ReleaseOracleABI = `[{"constant":true,"inputs":[],"name":"currentVersion","outputs":[{"name":"major","type":"uint32"},{"name":"minor","type":"uint32"},{"name":"patch","type":"uint32"},{"name":"commit","type":"bytes20"},{"name":"time","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"proposedVersion","outputs":[{"name":"major","type":"uint32"},{"name":"minor","type":"uint32"},{"name":"patch","type":"uint32"},{"name":"commit","type":"bytes20"},{"name":"pass","type":"address[]"},{"name":"fail","type":"address[]"}],"type":"function"},{"constant":true,"inputs":[],"name":"signers","outputs":[{"name":"","type":"address[]"}],"type":"function"},{"constant":true,"inputs":[],"name":"authProposals","outputs":[{"name":"","type":"address[]"}],"type":"function"},{"constant":true,"inputs":[{"name":"user","type":"address"}],"name":"authVotes","outputs":[{"name":"promote","type":"address[]"},{"name":"demote","type":"address[]"}],"type":"function"},{"constant":false,"inputs":[{"name":"user","type":"address"}],"name":"promote","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"user","type":"address"}],"name":"demote","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"major","type":"uint32"},{"name":"minor","type":"uint32"},{"name":"patch","type":"uint32"},{"name":"commit","type":"bytes20"}],"name":"release","outputs":[],"type":"function"},{"constant":false,"inputs":[],"name":"nuke","outputs":[],"type":"function"},{"inputs":[{"name":"signers","type":"address[]"}],"type":"constructor"}]` - -// ReleaseOracleBin is the compiled bytecode used for deploying new contracts. -const ReleaseOracleBin = `0x60606040526040516114033803806114038339810160405280510160008151600014156100ce57600160a060020a0333168152602081905260408120805460ff191660019081179091558054808201808355828183801582901161015e5781836000526020600020918201910161015e91905b8082111561019957600081558401610072565b505050919090600052602060002090016000848481518110156100025790602001906020020151909190916101000a815481600160a060020a0302191690830217905550506001015b815181101561018957600160006000506000848481518110156100025790602001906020020151600160a060020a0316815260200190815260200160002060006101000a81548160ff0219169083021790555060016000508054806001018281815481835581811511610085576000839052610085906000805160206113e3833981519152908101908301610072565b5050506000929092526000805160206113e3833981519152018054600160a060020a03191633179055505b50506112458061019e6000396000f35b50905600606060405236156100775760e060020a600035046326db7648811461007957806346f0975a1461019e5780635c3d005d1461020a57806364ed31fe146102935780639d888e861461038d578063bc8fbbf8146103b2578063bf8ecf9c146103fb578063d0e0813a14610467578063d67cbec914610478575b005b610495604080516020818101835260008083528351808301855281815260045460068054875181870281018701909852808852939687968796879691959463ffffffff818116956401000000008304821695604060020a840490921694606060020a938490049093029390926007929184919083018282801561012657602002820191906000526020600020905b8154600160a060020a0316815260019190910190602001808311610107575b505050505091508080548060200260200160405190810160405280929190818152602001828054801561018357602002820191906000526020600020905b8154600160a060020a0316815260019190910190602001808311610164575b50505050509050955095509550955095509550909192939495565b6040805160208181018352600082526001805484518184028101840190955280855261054894928301828280156101ff57602002820191906000526020600020905b8154600160a060020a03168152600191909101906020018083116101e0575b505050505090505b90565b6100776004356106d78160005b600160a060020a033316600090815260208190526040812054819060ff16156106df57600160a060020a038416815260026020526040812091505b81548110156106e7578154600160a060020a033316908390839081101561000257600091825260209091200154600160a060020a03161415610732576106df565b6105926004356040805160208181018352600080835283518083018552818152600160a060020a038616825260028352908490208054855181850281018501909652808652939491939092600184019291849183018282801561032057602002820191906000526020600020905b8154600160a060020a0316815260019190910190602001808311610301575b505050505091508080548060200260200160405190810160405280929190818152602001828054801561037d57602002820191906000526020600020905b8154600160a060020a031681526001919091019060200180831161035e575b5050505050905091509150915091565b6106176000600060006000600060006008600050805490506000141561064e576106cf565b6100776106e56000808080805b600160a060020a033316600090815260208190526040812054819060ff161561121c57821580156103f1575060065481145b15610ca55761121c565b6040805160208181018352600082526003805484518184028101840190955280855261054894928301828280156101ff57602002820191906000526020600020908154600160a060020a03168152600191909101906020018083116101e0575b50505050509050610207565b6100776004356106d7816001610217565b6100776004356024356044356064356106df8484848460016103bf565b604051808763ffffffff1681526020018663ffffffff1681526020018563ffffffff16815260200184815260200180602001806020018381038352858181518152602001915080519060200190602002808383829060006004602084601f0104600f02600301f1509050018381038252848181518152602001915080519060200190602002808383829060006004602084601f0104600f02600301f1509050019850505050505050505060405180910390f35b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600f02600301f1509050019250505060405180910390f35b6040518080602001806020018381038352858181518152602001915080519060200190602002808383829060006004602084601f0104600f02600301f1509050018381038252848181518152602001915080519060200190602002808383829060006004602084601f0104600f02600301f15090500194505050505060405180910390f35b6040805163ffffffff9687168152948616602086015292909416838301526060830152608082019290925290519081900360a00190f35b600880546000198101908110156100025760009182526004027ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30190508054600182015463ffffffff8281169950640100000000830481169850604060020a8304169650606060020a91829004909102945067ffffffffffffffff16925090505b509091929394565b50565b505050505b50505050565b565b5060005b600182015481101561073a5733600160a060020a03168260010160005082815481101561000257600091825260209091200154600160a060020a03161415610784576106df565b600101610252565b8154600014801561074f575060018201546000145b156107ac576003805460018101808355828183801582901161078c5781836000526020600020918201910161078c9190610834565b6001016106eb565b5050506000928352506020909120018054600160a060020a031916851790555b821561084c578154600181018084558391908281838015829011610881578183600052602060002091820191016108819190610834565b5050506000928352506020909120018054600160a060020a031916851790555b600160a060020a038416600090815260026020908152604082208054838255818452918320909291610b5c91908101905b808211156108485760008155600101610834565b5090565b816001016000508054806001018281815481835581811511610933578183600052602060002091820191016109339190610834565b5050506000928352506020909120018054600160a060020a031916331790556001548254600290910490116108b5576106df565b8280156108db5750600160a060020a03841660009081526020819052604090205460ff16155b1561096a57600160a060020a0384166000908152602081905260409020805460ff19166001908117909155805480820180835582818380158290116107e3578183600052602060002091820191016107e39190610834565b5050506000928352506020909120018054600160a060020a031916331790556001805490830154600290910490116108b5576106df565b821580156109905750600160a060020a03841660009081526020819052604090205460ff165b156108035750600160a060020a0383166000908152602081905260408120805460ff191690555b6001548110156108035783600160a060020a03166001600050828154811015610002576000919091527fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf60154600160a060020a03161415610ad057600180546000198101908110156100025760009182527fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf601909054906101000a9004600160a060020a03166001600050828154811015610002577fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6018054600160a060020a03191690921790915580546000198101808355909190828015829011610ad857818360005260206000209182019101610ad89190610834565b6001016109b7565b5050600060048181556005805467ffffffffffffffff19169055600680548382558184529194509192508290610b32907ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f90810190610834565b5060018201805460008083559182526020909120610b5291810190610834565b5050505050610803565b5060018201805460008083559182526020909120610b7c91810190610834565b506000925050505b6003548110156106df5783600160a060020a03166003600050828154811015610002576000919091527fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b0154600160a060020a03161415610c9d57600380546000198101908110156100025760009182527fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b01909054906101000a9004600160a060020a03166003600050828154811015610002577fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b018054600160a060020a031916909217909155805460001981018083559091908280158290116106da578183600052602060002091820191016106da9190610834565b600101610b84565b60065460001415610d03576004805463ffffffff1916881767ffffffff0000000019166401000000008802176bffffffff00000000000000001916604060020a8702176bffffffffffffffffffffffff16606060020a808704021790555b828015610d6d575060045463ffffffff8881169116141580610d395750600454640100000000900463ffffffff90811690871614155b80610d56575060045463ffffffff868116604060020a9092041614155b80610d6d5750600454606060020a90819004028414155b15610d775761121c565b506006905060005b8154811015610dc0578154600160a060020a033316908390839081101561000257600091825260209091200154600160a060020a03161415610e0b5761121c565b5060005b6001820154811015610e135733600160a060020a03168260010160005082815481101561000257600091825260209091200154600160a060020a03161415610e485761121c565b600101610d7f565b8215610e50578154600181018084558391908281838015829011610e8557600083815260209020610e85918101908301610834565b600101610dc4565b816001016000508054806001018281815481835581811511610f0857818360005260206000209182019101610f089190610834565b5050506000928352506020909120018054600160a060020a03191633179055600154825460029091049011610eb95761121c565b8215610f3f576005805467ffffffffffffffff19164217905560088054600181018083558281838015829011610f9457600402816004028360005260206000209182019101610f9491906110ae565b5050506000928352506020909120018054600160a060020a03191633179055600180549083015460029091049011610eb95761121c565b600060048181556005805467ffffffffffffffff19169055600680548382558184529192918290611225907ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f90810190610834565b5050509190906000526020600020906004020160005060048054825463ffffffff191663ffffffff9182161780845582546401000000009081900483160267ffffffff000000001991909116178084558254604060020a908190049092169091026bffffffff00000000000000001991909116178083558154606060020a908190048102819004026bffffffffffffffffffffffff9190911617825560055460018301805467ffffffffffffffff191667ffffffffffffffff9290921691909117905560068054600284018054828255600082815260209020949594919283929182019185821561110d5760005260206000209182015b8281111561110d57825482559160010191906001019061108b565b505050506004015b8082111561084857600080825560018201805467ffffffffffffffff191690556002820180548282558183526020832083916110ed9190810190610834565b50600182018054600080835591825260209091206110a691810190610834565b506111339291505b80821115610848578054600160a060020a0319168155600101611115565b50506001818101805491840180548083556000838152602090209293830192909182156111815760005260206000209182015b82811115611181578254825591600101919060010190611166565b5061118d929150611115565b5050600060048181556005805467ffffffffffffffff19169055600680548382558184529197509195509093508492506111ec91507ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f90810190610834565b506001820180546000808355918252602090912061120c91810190610834565b505050505061121c565b50505050505b50505050505050565b50600182018054600080835591825260209091206112169181019061083456b10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6` - -// DeployReleaseOracle deploys a new Expanse contract, binding an instance of ReleaseOracle to it. -func DeployReleaseOracle(auth *bind.TransactOpts, backend bind.ContractBackend, signers []common.Address) (common.Address, *types.Transaction, *ReleaseOracle, error) { - parsed, err := abi.JSON(strings.NewReader(ReleaseOracleABI)) - if err != nil { - return common.Address{}, nil, nil, err - } - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ReleaseOracleBin), backend, signers) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &ReleaseOracle{ReleaseOracleCaller: ReleaseOracleCaller{contract: contract}, ReleaseOracleTransactor: ReleaseOracleTransactor{contract: contract}}, nil -} - -// ReleaseOracle is an auto generated Go binding around an Expanse contract. -type ReleaseOracle struct { - ReleaseOracleCaller // Read-only binding to the contract - ReleaseOracleTransactor // Write-only binding to the contract -} - -// ReleaseOracleCaller is an auto generated read-only Go binding around an Expanse contract. -type ReleaseOracleCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// ReleaseOracleTransactor is an auto generated write-only Go binding around an Expanse contract. -type ReleaseOracleTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// ReleaseOracleSession is an auto generated Go binding around an Expanse contract, -// with pre-set call and transact options. -type ReleaseOracleSession struct { - Contract *ReleaseOracle // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// ReleaseOracleCallerSession is an auto generated read-only Go binding around an Expanse contract, -// with pre-set call options. -type ReleaseOracleCallerSession struct { - Contract *ReleaseOracleCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// ReleaseOracleTransactorSession is an auto generated write-only Go binding around an Expanse contract, -// with pre-set transact options. -type ReleaseOracleTransactorSession struct { - Contract *ReleaseOracleTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// ReleaseOracleRaw is an auto generated low-level Go binding around an Expanse contract. -type ReleaseOracleRaw struct { - Contract *ReleaseOracle // Generic contract binding to access the raw methods on -} - -// ReleaseOracleCallerRaw is an auto generated low-level read-only Go binding around an Expanse contract. -type ReleaseOracleCallerRaw struct { - Contract *ReleaseOracleCaller // Generic read-only contract binding to access the raw methods on -} - -// ReleaseOracleTransactorRaw is an auto generated low-level write-only Go binding around an Expanse contract. -type ReleaseOracleTransactorRaw struct { - Contract *ReleaseOracleTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewReleaseOracle creates a new instance of ReleaseOracle, bound to a specific deployed contract. -func NewReleaseOracle(address common.Address, backend bind.ContractBackend) (*ReleaseOracle, error) { - contract, err := bindReleaseOracle(address, backend.(bind.ContractCaller), backend.(bind.ContractTransactor)) - if err != nil { - return nil, err - } - return &ReleaseOracle{ReleaseOracleCaller: ReleaseOracleCaller{contract: contract}, ReleaseOracleTransactor: ReleaseOracleTransactor{contract: contract}}, nil -} - -// NewReleaseOracleCaller creates a new read-only instance of ReleaseOracle, bound to a specific deployed contract. -func NewReleaseOracleCaller(address common.Address, caller bind.ContractCaller) (*ReleaseOracleCaller, error) { - contract, err := bindReleaseOracle(address, caller, nil) - if err != nil { - return nil, err - } - return &ReleaseOracleCaller{contract: contract}, nil -} - -// NewReleaseOracleTransactor creates a new write-only instance of ReleaseOracle, bound to a specific deployed contract. -func NewReleaseOracleTransactor(address common.Address, transactor bind.ContractTransactor) (*ReleaseOracleTransactor, error) { - contract, err := bindReleaseOracle(address, nil, transactor) - if err != nil { - return nil, err - } - return &ReleaseOracleTransactor{contract: contract}, nil -} - -// bindReleaseOracle binds a generic wrapper to an already deployed contract. -func bindReleaseOracle(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(ReleaseOracleABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_ReleaseOracle *ReleaseOracleRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { - return _ReleaseOracle.Contract.ReleaseOracleCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_ReleaseOracle *ReleaseOracleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _ReleaseOracle.Contract.ReleaseOracleTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_ReleaseOracle *ReleaseOracleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _ReleaseOracle.Contract.ReleaseOracleTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_ReleaseOracle *ReleaseOracleCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { - return _ReleaseOracle.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_ReleaseOracle *ReleaseOracleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _ReleaseOracle.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_ReleaseOracle *ReleaseOracleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _ReleaseOracle.Contract.contract.Transact(opts, method, params...) -} - -// AuthProposals is a free data retrieval call binding the contract method 0xbf8ecf9c. -// -// Solidity: function authProposals() constant returns(address[]) -func (_ReleaseOracle *ReleaseOracleCaller) AuthProposals(opts *bind.CallOpts) ([]common.Address, error) { - var ( - ret0 = new([]common.Address) - ) - out := ret0 - err := _ReleaseOracle.contract.Call(opts, out, "authProposals") - return *ret0, err -} - -// AuthProposals is a free data retrieval call binding the contract method 0xbf8ecf9c. -// -// Solidity: function authProposals() constant returns(address[]) -func (_ReleaseOracle *ReleaseOracleSession) AuthProposals() ([]common.Address, error) { - return _ReleaseOracle.Contract.AuthProposals(&_ReleaseOracle.CallOpts) -} - -// AuthProposals is a free data retrieval call binding the contract method 0xbf8ecf9c. -// -// Solidity: function authProposals() constant returns(address[]) -func (_ReleaseOracle *ReleaseOracleCallerSession) AuthProposals() ([]common.Address, error) { - return _ReleaseOracle.Contract.AuthProposals(&_ReleaseOracle.CallOpts) -} - -// AuthVotes is a free data retrieval call binding the contract method 0x64ed31fe. -// -// Solidity: function authVotes(user address) constant returns(promote address[], demote address[]) -func (_ReleaseOracle *ReleaseOracleCaller) AuthVotes(opts *bind.CallOpts, user common.Address) (struct { - Promote []common.Address - Demote []common.Address -}, error) { - ret := new(struct { - Promote []common.Address - Demote []common.Address - }) - out := ret - err := _ReleaseOracle.contract.Call(opts, out, "authVotes", user) - return *ret, err -} - -// AuthVotes is a free data retrieval call binding the contract method 0x64ed31fe. -// -// Solidity: function authVotes(user address) constant returns(promote address[], demote address[]) -func (_ReleaseOracle *ReleaseOracleSession) AuthVotes(user common.Address) (struct { - Promote []common.Address - Demote []common.Address -}, error) { - return _ReleaseOracle.Contract.AuthVotes(&_ReleaseOracle.CallOpts, user) -} - -// AuthVotes is a free data retrieval call binding the contract method 0x64ed31fe. -// -// Solidity: function authVotes(user address) constant returns(promote address[], demote address[]) -func (_ReleaseOracle *ReleaseOracleCallerSession) AuthVotes(user common.Address) (struct { - Promote []common.Address - Demote []common.Address -}, error) { - return _ReleaseOracle.Contract.AuthVotes(&_ReleaseOracle.CallOpts, user) -} - -// CurrentVersion is a free data retrieval call binding the contract method 0x9d888e86. -// -// Solidity: function currentVersion() constant returns(major uint32, minor uint32, patch uint32, commit bytes20, time uint256) -func (_ReleaseOracle *ReleaseOracleCaller) CurrentVersion(opts *bind.CallOpts) (struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Time *big.Int -}, error) { - ret := new(struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Time *big.Int - }) - out := ret - err := _ReleaseOracle.contract.Call(opts, out, "currentVersion") - return *ret, err -} - -// CurrentVersion is a free data retrieval call binding the contract method 0x9d888e86. -// -// Solidity: function currentVersion() constant returns(major uint32, minor uint32, patch uint32, commit bytes20, time uint256) -func (_ReleaseOracle *ReleaseOracleSession) CurrentVersion() (struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Time *big.Int -}, error) { - return _ReleaseOracle.Contract.CurrentVersion(&_ReleaseOracle.CallOpts) -} - -// CurrentVersion is a free data retrieval call binding the contract method 0x9d888e86. -// -// Solidity: function currentVersion() constant returns(major uint32, minor uint32, patch uint32, commit bytes20, time uint256) -func (_ReleaseOracle *ReleaseOracleCallerSession) CurrentVersion() (struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Time *big.Int -}, error) { - return _ReleaseOracle.Contract.CurrentVersion(&_ReleaseOracle.CallOpts) -} - -// ProposedVersion is a free data retrieval call binding the contract method 0x26db7648. -// -// Solidity: function proposedVersion() constant returns(major uint32, minor uint32, patch uint32, commit bytes20, pass address[], fail address[]) -func (_ReleaseOracle *ReleaseOracleCaller) ProposedVersion(opts *bind.CallOpts) (struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Pass []common.Address - Fail []common.Address -}, error) { - ret := new(struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Pass []common.Address - Fail []common.Address - }) - out := ret - err := _ReleaseOracle.contract.Call(opts, out, "proposedVersion") - return *ret, err -} - -// ProposedVersion is a free data retrieval call binding the contract method 0x26db7648. -// -// Solidity: function proposedVersion() constant returns(major uint32, minor uint32, patch uint32, commit bytes20, pass address[], fail address[]) -func (_ReleaseOracle *ReleaseOracleSession) ProposedVersion() (struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Pass []common.Address - Fail []common.Address -}, error) { - return _ReleaseOracle.Contract.ProposedVersion(&_ReleaseOracle.CallOpts) -} - -// ProposedVersion is a free data retrieval call binding the contract method 0x26db7648. -// -// Solidity: function proposedVersion() constant returns(major uint32, minor uint32, patch uint32, commit bytes20, pass address[], fail address[]) -func (_ReleaseOracle *ReleaseOracleCallerSession) ProposedVersion() (struct { - Major uint32 - Minor uint32 - Patch uint32 - Commit [20]byte - Pass []common.Address - Fail []common.Address -}, error) { - return _ReleaseOracle.Contract.ProposedVersion(&_ReleaseOracle.CallOpts) -} - -// Signers is a free data retrieval call binding the contract method 0x46f0975a. -// -// Solidity: function signers() constant returns(address[]) -func (_ReleaseOracle *ReleaseOracleCaller) Signers(opts *bind.CallOpts) ([]common.Address, error) { - var ( - ret0 = new([]common.Address) - ) - out := ret0 - err := _ReleaseOracle.contract.Call(opts, out, "signers") - return *ret0, err -} - -// Signers is a free data retrieval call binding the contract method 0x46f0975a. -// -// Solidity: function signers() constant returns(address[]) -func (_ReleaseOracle *ReleaseOracleSession) Signers() ([]common.Address, error) { - return _ReleaseOracle.Contract.Signers(&_ReleaseOracle.CallOpts) -} - -// Signers is a free data retrieval call binding the contract method 0x46f0975a. -// -// Solidity: function signers() constant returns(address[]) -func (_ReleaseOracle *ReleaseOracleCallerSession) Signers() ([]common.Address, error) { - return _ReleaseOracle.Contract.Signers(&_ReleaseOracle.CallOpts) -} - -// Demote is a paid mutator transaction binding the contract method 0x5c3d005d. -// -// Solidity: function demote(user address) returns() -func (_ReleaseOracle *ReleaseOracleTransactor) Demote(opts *bind.TransactOpts, user common.Address) (*types.Transaction, error) { - return _ReleaseOracle.contract.Transact(opts, "demote", user) -} - -// Demote is a paid mutator transaction binding the contract method 0x5c3d005d. -// -// Solidity: function demote(user address) returns() -func (_ReleaseOracle *ReleaseOracleSession) Demote(user common.Address) (*types.Transaction, error) { - return _ReleaseOracle.Contract.Demote(&_ReleaseOracle.TransactOpts, user) -} - -// Demote is a paid mutator transaction binding the contract method 0x5c3d005d. -// -// Solidity: function demote(user address) returns() -func (_ReleaseOracle *ReleaseOracleTransactorSession) Demote(user common.Address) (*types.Transaction, error) { - return _ReleaseOracle.Contract.Demote(&_ReleaseOracle.TransactOpts, user) -} - -// Nuke is a paid mutator transaction binding the contract method 0xbc8fbbf8. -// -// Solidity: function nuke() returns() -func (_ReleaseOracle *ReleaseOracleTransactor) Nuke(opts *bind.TransactOpts) (*types.Transaction, error) { - return _ReleaseOracle.contract.Transact(opts, "nuke") -} - -// Nuke is a paid mutator transaction binding the contract method 0xbc8fbbf8. -// -// Solidity: function nuke() returns() -func (_ReleaseOracle *ReleaseOracleSession) Nuke() (*types.Transaction, error) { - return _ReleaseOracle.Contract.Nuke(&_ReleaseOracle.TransactOpts) -} - -// Nuke is a paid mutator transaction binding the contract method 0xbc8fbbf8. -// -// Solidity: function nuke() returns() -func (_ReleaseOracle *ReleaseOracleTransactorSession) Nuke() (*types.Transaction, error) { - return _ReleaseOracle.Contract.Nuke(&_ReleaseOracle.TransactOpts) -} - -// Promote is a paid mutator transaction binding the contract method 0xd0e0813a. -// -// Solidity: function promote(user address) returns() -func (_ReleaseOracle *ReleaseOracleTransactor) Promote(opts *bind.TransactOpts, user common.Address) (*types.Transaction, error) { - return _ReleaseOracle.contract.Transact(opts, "promote", user) -} - -// Promote is a paid mutator transaction binding the contract method 0xd0e0813a. -// -// Solidity: function promote(user address) returns() -func (_ReleaseOracle *ReleaseOracleSession) Promote(user common.Address) (*types.Transaction, error) { - return _ReleaseOracle.Contract.Promote(&_ReleaseOracle.TransactOpts, user) -} - -// Promote is a paid mutator transaction binding the contract method 0xd0e0813a. -// -// Solidity: function promote(user address) returns() -func (_ReleaseOracle *ReleaseOracleTransactorSession) Promote(user common.Address) (*types.Transaction, error) { - return _ReleaseOracle.Contract.Promote(&_ReleaseOracle.TransactOpts, user) -} - -// Release is a paid mutator transaction binding the contract method 0xd67cbec9. -// -// Solidity: function release(major uint32, minor uint32, patch uint32, commit bytes20) returns() -func (_ReleaseOracle *ReleaseOracleTransactor) Release(opts *bind.TransactOpts, major uint32, minor uint32, patch uint32, commit [20]byte) (*types.Transaction, error) { - return _ReleaseOracle.contract.Transact(opts, "release", major, minor, patch, commit) -} - -// Release is a paid mutator transaction binding the contract method 0xd67cbec9. -// -// Solidity: function release(major uint32, minor uint32, patch uint32, commit bytes20) returns() -func (_ReleaseOracle *ReleaseOracleSession) Release(major uint32, minor uint32, patch uint32, commit [20]byte) (*types.Transaction, error) { - return _ReleaseOracle.Contract.Release(&_ReleaseOracle.TransactOpts, major, minor, patch, commit) -} - -// Release is a paid mutator transaction binding the contract method 0xd67cbec9. -// -// Solidity: function release(major uint32, minor uint32, patch uint32, commit bytes20) returns() -func (_ReleaseOracle *ReleaseOracleTransactorSession) Release(major uint32, minor uint32, patch uint32, commit [20]byte) (*types.Transaction, error) { - return _ReleaseOracle.Contract.Release(&_ReleaseOracle.TransactOpts, major, minor, patch, commit) -} diff --git a/release/contract_test.go b/release/contract_test.go deleted file mode 100644 index 6349a25817cbd..0000000000000 --- a/release/contract_test.go +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package release - -import ( - "crypto/ecdsa" - "math/big" - "testing" - - "github.com/expanse-org/go-expanse/accounts/abi/bind" - "github.com/expanse-org/go-expanse/accounts/abi/bind/backends" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/crypto" -) - -// setupReleaseTest creates a blockchain simulator and deploys a version oracle -// contract for testing. -func setupReleaseTest(t *testing.T, prefund ...*ecdsa.PrivateKey) (*ecdsa.PrivateKey, *ReleaseOracle, *backends.SimulatedBackend) { - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) - - accounts := []core.GenesisAccount{{Address: auth.From, Balance: big.NewInt(10000000000)}} - for _, key := range prefund { - accounts = append(accounts, core.GenesisAccount{Address: crypto.PubkeyToAddress(key.PublicKey), Balance: big.NewInt(10000000000)}) - } - sim := backends.NewSimulatedBackend(accounts...) - - // Deploy a version oracle contract, commit and return - _, _, oracle, err := DeployReleaseOracle(auth, sim, []common.Address{auth.From}) - if err != nil { - t.Fatalf("Failed to deploy version contract: %v", err) - } - sim.Commit() - - return key, oracle, sim -} - -// Tests that the version contract can be deployed and the creator is assigned -// the sole authorized signer. -func TestContractCreation(t *testing.T) { - key, oracle, _ := setupReleaseTest(t) - - owner := crypto.PubkeyToAddress(key.PublicKey) - signers, err := oracle.Signers(nil) - if err != nil { - t.Fatalf("Failed to retrieve list of signers: %v", err) - } - if len(signers) != 1 || signers[0] != owner { - t.Fatalf("Initial signer mismatch: have %v, want %v", signers, owner) - } -} - -// Tests that subsequent signers can be promoted, each requiring half plus one -// votes for it to pass through. -func TestSignerPromotion(t *testing.T) { - // Prefund a few accounts to authorize with and create the oracle - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - key, oracle, sim := setupReleaseTest(t, keys...) - - // Gradually promote the keys, until all are authorized - keys = append([]*ecdsa.PrivateKey{key}, keys...) - for i := 1; i < len(keys); i++ { - // Check that no votes are accepted from the not yet authed user - if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil { - t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err) - } - sim.Commit() - - pend, err := oracle.AuthProposals(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve active proposals: %v", i, err) - } - if len(pend) != 0 { - t.Fatalf("Iter #%d: proposal count mismatch: have %d, want 0", i, len(pend)) - } - // Promote with half - 1 voters and check that the user's not yet authorized - for j := 0; j < i/2; j++ { - if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err) - } - } - sim.Commit() - - signers, err := oracle.Signers(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", i, err) - } - if len(signers) != i { - t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", i, len(signers), i) - } - // Promote with the last one needed to pass the promotion - if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[i/2]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid promotion completion attempt: %v", i, err) - } - sim.Commit() - - signers, err = oracle.Signers(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", i, err) - } - if len(signers) != i+1 { - t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", i, len(signers), i+1) - } - } -} - -// Tests that subsequent signers can be demoted, each requiring half plus one -// votes for it to pass through. -func TestSignerDemotion(t *testing.T) { - // Prefund a few accounts to authorize with and create the oracle - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - key, oracle, sim := setupReleaseTest(t, keys...) - - // Authorize all the keys as valid signers and verify cardinality - keys = append([]*ecdsa.PrivateKey{key}, keys...) - for i := 1; i < len(keys); i++ { - for j := 0; j <= i/2; j++ { - if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err) - } - } - sim.Commit() - } - signers, err := oracle.Signers(nil) - if err != nil { - t.Fatalf("Failed to retrieve list of signers: %v", err) - } - if len(signers) != len(keys) { - t.Fatalf("Signer count mismatch: have %v, want %v", len(signers), len(keys)) - } - // Gradually demote users until we run out of signers - for i := len(keys) - 1; i >= 0; i-- { - // Demote with half - 1 voters and check that the user's not yet dropped - for j := 0; j < (i+1)/2; j++ { - if _, err = oracle.Demote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid demotion attempt: %v", len(keys)-i, err) - } - } - sim.Commit() - - signers, err := oracle.Signers(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", len(keys)-i, err) - } - if len(signers) != i+1 { - t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", len(keys)-i, len(signers), i+1) - } - // Demote with the last one needed to pass the demotion - if _, err = oracle.Demote(bind.NewKeyedTransactor(keys[(i+1)/2]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid demotion completion attempt: %v", i, err) - } - sim.Commit() - - signers, err = oracle.Signers(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve list of signers: %v", len(keys)-i, err) - } - if len(signers) != i { - t.Fatalf("Iter #%d: signer count mismatch: have %v, want %v", len(keys)-i, len(signers), i) - } - // Check that no votes are accepted from the already demoted users - if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil { - t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err) - } - sim.Commit() - - pend, err := oracle.AuthProposals(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve active proposals: %v", i, err) - } - if len(pend) != 0 { - t.Fatalf("Iter #%d: proposal count mismatch: have %d, want 0", i, len(pend)) - } - } -} - -// Tests that new versions can be released, honouring both voting rights as well -// as the minimum required vote count. -func TestVersionRelease(t *testing.T) { - // Prefund a few accounts to authorize with and create the oracle - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - key, oracle, sim := setupReleaseTest(t, keys...) - - // Track the "current release" - var ( - verMajor = uint32(0) - verMinor = uint32(0) - verPatch = uint32(0) - verCommit = [20]byte{} - ) - // Gradually push releases, always requiring more signers than previously - keys = append([]*ecdsa.PrivateKey{key}, keys...) - for i := 1; i < len(keys); i++ { - // Check that no votes are accepted from the not yet authed user - if _, err := oracle.Release(bind.NewKeyedTransactor(keys[i]), 0, 0, 0, [20]byte{0}); err != nil { - t.Fatalf("Iter #%d: failed invalid release attempt: %v", i, err) - } - sim.Commit() - - prop, err := oracle.ProposedVersion(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve active proposal: %v", i, err) - } - if len(prop.Pass) != 0 { - t.Fatalf("Iter #%d: proposal vote count mismatch: have %d, want 0", i, len(prop.Pass)) - } - // Authorize the user to make releases - for j := 0; j <= i/2; j++ { - if _, err = oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err) - } - } - sim.Commit() - - // Propose release with half voters and check that the release does not yet go through - for j := 0; j < (i+1)/2; j++ { - if _, err = oracle.Release(bind.NewKeyedTransactor(keys[j]), uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)}); err != nil { - t.Fatalf("Iter #%d: failed valid release attempt: %v", i, err) - } - } - sim.Commit() - - ver, err := oracle.CurrentVersion(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve current version: %v", i, err) - } - if ver.Major != verMajor || ver.Minor != verMinor || ver.Patch != verPatch || ver.Commit != verCommit { - t.Fatalf("Iter #%d: version mismatch: have %d.%d.%d-%x, want %d.%d.%d-%x", i, ver.Major, ver.Minor, ver.Patch, ver.Commit, verMajor, verMinor, verPatch, verCommit) - } - - // Pass the release and check that it became the next version - verMajor, verMinor, verPatch, verCommit = uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)} - if _, err = oracle.Release(bind.NewKeyedTransactor(keys[(i+1)/2]), uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)}); err != nil { - t.Fatalf("Iter #%d: failed valid release completion attempt: %v", i, err) - } - sim.Commit() - - ver, err = oracle.CurrentVersion(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve current version: %v", i, err) - } - if ver.Major != verMajor || ver.Minor != verMinor || ver.Patch != verPatch || ver.Commit != verCommit { - t.Fatalf("Iter #%d: version mismatch: have %d.%d.%d-%x, want %d.%d.%d-%x", i, ver.Major, ver.Minor, ver.Patch, ver.Commit, verMajor, verMinor, verPatch, verCommit) - } - } -} - -// Tests that proposed versions can be nuked out of existence. -func TestVersionNuking(t *testing.T) { - // Prefund a few accounts to authorize with and create the oracle - keys := make([]*ecdsa.PrivateKey, 9) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - key, oracle, sim := setupReleaseTest(t, keys...) - - // Authorize all the keys as valid signers - keys = append([]*ecdsa.PrivateKey{key}, keys...) - for i := 1; i < len(keys); i++ { - for j := 0; j <= i/2; j++ { - if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err) - } - } - sim.Commit() - } - // Propose releases with more and more keys, always retaining enough users to nuke the proposals - for i := 1; i < (len(keys)+1)/2; i++ { - // Propose release with an initial set of signers - for j := 0; j < i; j++ { - if _, err := oracle.Release(bind.NewKeyedTransactor(keys[j]), uint32(i), uint32(i+1), uint32(i+2), [20]byte{byte(i + 3)}); err != nil { - t.Fatalf("Iter #%d: failed valid proposal attempt: %v", i, err) - } - } - sim.Commit() - - prop, err := oracle.ProposedVersion(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve active proposal: %v", i, err) - } - if len(prop.Pass) != i { - t.Fatalf("Iter #%d: proposal vote count mismatch: have %d, want %d", i, len(prop.Pass), i) - } - // Nuke the release with half+1 voters - for j := i; j <= i+(len(keys)+1)/2; j++ { - if _, err := oracle.Nuke(bind.NewKeyedTransactor(keys[j])); err != nil { - t.Fatalf("Iter #%d: failed valid nuke attempt: %v", i, err) - } - } - sim.Commit() - - prop, err = oracle.ProposedVersion(nil) - if err != nil { - t.Fatalf("Iter #%d: failed to retrieve active proposal: %v", i, err) - } - if len(prop.Pass) != 0 || len(prop.Fail) != 0 { - t.Fatalf("Iter #%d: proposal vote count mismatch: have %d/%d pass/fail, want 0/0", i, len(prop.Pass), len(prop.Fail)) - } - } -} - -// Tests that demoting a signer will auto-nuke the currently pending release. -func TestVersionAutoNuke(t *testing.T) { - // Prefund a few accounts to authorize with and create the oracle - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - key, oracle, sim := setupReleaseTest(t, keys...) - - // Authorize all the keys as valid signers - keys = append([]*ecdsa.PrivateKey{key}, keys...) - for i := 1; i < len(keys); i++ { - for j := 0; j <= i/2; j++ { - if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[j]), crypto.PubkeyToAddress(keys[i].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid promotion attempt: %v", i, err) - } - } - sim.Commit() - } - // Make a release proposal and check it's existence - if _, err := oracle.Release(bind.NewKeyedTransactor(keys[0]), 1, 2, 3, [20]byte{4}); err != nil { - t.Fatalf("Failed valid proposal attempt: %v", err) - } - sim.Commit() - - prop, err := oracle.ProposedVersion(nil) - if err != nil { - t.Fatalf("Failed to retrieve active proposal: %v", err) - } - if len(prop.Pass) != 1 { - t.Fatalf("Proposal vote count mismatch: have %d, want 1", len(prop.Pass)) - } - // Demote a signer and check release proposal deletion - for i := 0; i <= len(keys)/2; i++ { - if _, err := oracle.Demote(bind.NewKeyedTransactor(keys[i]), crypto.PubkeyToAddress(keys[len(keys)-1].PublicKey)); err != nil { - t.Fatalf("Iter #%d: failed valid demotion attempt: %v", i, err) - } - } - sim.Commit() - - prop, err = oracle.ProposedVersion(nil) - if err != nil { - t.Fatalf("Failed to retrieve active proposal: %v", err) - } - if len(prop.Pass) != 0 { - t.Fatalf("Proposal vote count mismatch: have %d, want 0", len(prop.Pass)) - } -} diff --git a/release/release.go b/release/release.go deleted file mode 100644 index df73944d02475..0000000000000 --- a/release/release.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package release contains the node service that tracks client releases. -package release - -import ( - "fmt" - "strings" - "time" - - "github.com/expanse-org/go-expanse/accounts/abi/bind" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/exp" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/node" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rpc" -) - -// Interval to check for new releases -const releaseRecheckInterval = time.Hour - -// Config contains the configurations of the release service. -type Config struct { - Oracle common.Address // Expanse address of the release oracle - Major uint32 // Major version component of the release - Minor uint32 // Minor version component of the release - Patch uint32 // Patch version component of the release - Commit [20]byte // Git SHA1 commit hash of the release -} - -// ReleaseService is a node service that periodically checks the blockchain for -// newly released versions of the client being run and issues a warning to the -// user about it. -type ReleaseService struct { - config Config // Current version to check releases against - oracle *ReleaseOracle // Native binding to the release oracle contract - quit chan chan error // Quit channel to terminate the version checker -} - -// NewReleaseService creates a new service to periodically check for new client -// releases and notify the user of such. -func NewReleaseService(ctx *node.ServiceContext, config Config) (node.Service, error) { - // Retrieve the Expanse service dependency to access the blockchain - var expanse *exp.Expanse - if err := ctx.Service(&expanse); err != nil { - return nil, err - } - // Construct the release service - contract, err := NewReleaseOracle(config.Oracle, exp.NewContractBackend(expanse)) - if err != nil { - return nil, err - } - return &ReleaseService{ - config: config, - oracle: contract, - quit: make(chan chan error), - }, nil -} - -// Protocols returns an empty list of P2P protocols as the release service does -// not have a networking component. -func (r *ReleaseService) Protocols() []p2p.Protocol { return nil } - -// APIs returns an empty list of RPC descriptors as the release service does not -// expose any functioanlity to the outside world. -func (r *ReleaseService) APIs() []rpc.API { return nil } - -// Start spawns the periodic version checker goroutine -func (r *ReleaseService) Start(server *p2p.Server) error { - go r.checker() - return nil -} - -// Stop terminates all goroutines belonging to the service, blocking until they -// are all terminated. -func (r *ReleaseService) Stop() error { - errc := make(chan error) - r.quit <- errc - return <-errc -} - -// checker runs indefinitely in the background, periodically checking for new -// client releases. -func (r *ReleaseService) checker() { - // Set up the timers to periodically check for releases - timer := time.NewTimer(0) // Immediately fire a version check - defer timer.Stop() - - for { - select { - // If the time arrived, check for a new release - case <-timer.C: - // Rechedule the timer before continuing - timer.Reset(releaseRecheckInterval) - - // Retrieve the current version, and handle missing contracts gracefully - version, err := r.oracle.CurrentVersion(nil) - if err != nil { - if err == bind.ErrNoCode { - glog.V(logger.Debug).Infof("Release oracle not found at %x", r.config.Oracle) - continue - } - glog.V(logger.Error).Infof("Failed to retrieve current release: %v", err) - continue - } - // Version was successfully retrieved, notify if newer than ours - if version.Major > r.config.Major || - (version.Major == r.config.Major && version.Minor > r.config.Minor) || - (version.Major == r.config.Major && version.Minor == r.config.Minor && version.Patch > r.config.Patch) { - - warning := fmt.Sprintf("Client v%d.%d.%d-%x seems older than the latest upstream release v%d.%d.%d-%x", - r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]) - howtofix := fmt.Sprintf("Please check https://github.com/expanse-org/go-expanse/releases for new releases") - separator := strings.Repeat("-", len(warning)) - - glog.V(logger.Warn).Info(separator) - glog.V(logger.Warn).Info(warning) - glog.V(logger.Warn).Info(howtofix) - glog.V(logger.Warn).Info(separator) - } else { - glog.V(logger.Debug).Infof("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x", - r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]) - } - - // If termination was requested, return - case errc := <-r.quit: - errc <- nil - return - } - } -} diff --git a/rpc/notification.go b/rpc/notification.go deleted file mode 100644 index 5f69104922f69..0000000000000 --- a/rpc/notification.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package rpc - -import ( - "errors" - "sync" - "time" - - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "golang.org/x/net/context" -) - -var ( - // ErrNotificationsUnsupported is returned when the connection doesn't support notifications - ErrNotificationsUnsupported = errors.New("notifications not supported") - - // ErrNotificationNotFound is returned when the notification for the given id is not found - ErrNotificationNotFound = errors.New("notification not found") - - // errNotifierStopped is returned when the notifier is stopped (e.g. codec is closed) - errNotifierStopped = errors.New("unable to send notification") - - // errNotificationQueueFull is returns when there are too many notifications in the queue - errNotificationQueueFull = errors.New("too many pending notifications") -) - -// unsubSignal is a signal that the subscription is unsubscribed. It is used to flush buffered -// notifications that might be pending in the internal queue. -var unsubSignal = new(struct{}) - -// UnsubscribeCallback defines a callback that is called when a subcription ends. -// It receives the subscription id as argument. -type UnsubscribeCallback func(id string) - -// notification is a helper object that holds event data for a subscription -type notification struct { - sub *bufferedSubscription // subscription id - data interface{} // event data -} - -// A Notifier type describes the interface for objects that can send create subscriptions -type Notifier interface { - // Create a new subscription. The given callback is called when this subscription - // is cancelled (e.g. client send an unsubscribe, connection closed). - NewSubscription(UnsubscribeCallback) (Subscription, error) - // Cancel subscription - Unsubscribe(id string) error -} - -type notifierKey struct{} - -// NotifierFromContext returns the Notifier value stored in ctx, if any. -func NotifierFromContext(ctx context.Context) (Notifier, bool) { - n, ok := ctx.Value(notifierKey{}).(Notifier) - return n, ok -} - -// Subscription defines the interface for objects that can notify subscribers -type Subscription interface { - // Inform client of an event - Notify(data interface{}) error - // Unique identifier - ID() string - // Cancel subscription - Cancel() error -} - -// bufferedSubscription is a subscription that uses a bufferedNotifier to send -// notifications to subscribers. -type bufferedSubscription struct { - id string - unsubOnce sync.Once // call unsub method once - unsub UnsubscribeCallback // called on Unsubscribed - notifier *bufferedNotifier // forward notifications to - pending chan interface{} // closed when active - flushed chan interface{} // closed when all buffered notifications are send - lastNotification time.Time // last time a notification was send -} - -// ID returns the subscription identifier that the client uses to refer to this instance. -func (s *bufferedSubscription) ID() string { - return s.id -} - -// Cancel informs the notifier that this subscription is cancelled by the API -func (s *bufferedSubscription) Cancel() error { - return s.notifier.Unsubscribe(s.id) -} - -// Notify the subscriber of a particular event. -func (s *bufferedSubscription) Notify(data interface{}) error { - return s.notifier.send(s.id, data) -} - -// bufferedNotifier is a notifier that queues notifications in an internal queue and -// send them as fast as possible to the client from this queue. It will stop if the -// queue grows past a given size. -type bufferedNotifier struct { - codec ServerCodec // underlying connection - mu sync.Mutex // guard internal state - subscriptions map[string]*bufferedSubscription // keep track of subscriptions associated with codec - queueSize int // max number of items in queue - queue chan *notification // notification queue - stopped bool // indication if this notifier is ordered to stop -} - -// newBufferedNotifier returns a notifier that queues notifications in an internal queue -// from which notifications are send as fast as possible to the client. If the queue size -// limit is reached (client is unable to keep up) it will stop and closes the codec. -func newBufferedNotifier(codec ServerCodec, size int) *bufferedNotifier { - notifier := &bufferedNotifier{ - codec: codec, - subscriptions: make(map[string]*bufferedSubscription), - queue: make(chan *notification, size), - queueSize: size, - } - - go notifier.run() - - return notifier -} - -// NewSubscription creates a new subscription that forwards events to this instance internal -// queue. The given callback is called when the subscription is unsubscribed/cancelled. -func (n *bufferedNotifier) NewSubscription(callback UnsubscribeCallback) (Subscription, error) { - id, err := newSubscriptionID() - if err != nil { - return nil, err - } - - n.mu.Lock() - defer n.mu.Unlock() - - if n.stopped { - return nil, errNotifierStopped - } - - sub := &bufferedSubscription{ - id: id, - unsub: callback, - notifier: n, - pending: make(chan interface{}), - flushed: make(chan interface{}), - lastNotification: time.Now(), - } - - n.subscriptions[id] = sub - - return sub, nil -} - -// Remove the given subscription. If subscription is not found notificationNotFoundErr is returned. -func (n *bufferedNotifier) Unsubscribe(subid string) error { - n.mu.Lock() - sub, found := n.subscriptions[subid] - n.mu.Unlock() - - if found { - // send the unsubscribe signal, this will cause the notifier not to accept new events - // for this subscription and will close the flushed channel after the last (buffered) - // notification was send to the client. - if err := n.send(subid, unsubSignal); err != nil { - return err - } - - // wait for confirmation that all (buffered) events are send for this subscription. - // this ensures that the unsubscribe method response is not send before all buffered - // events for this subscription are send. - <-sub.flushed - - return nil - } - - return ErrNotificationNotFound -} - -// Send enques the given data for the subscription with public ID on the internal queue. t returns -// an error when the notifier is stopped or the queue is full. If data is the unsubscribe signal it -// will remove the subscription with the given id from the subscription collection. -func (n *bufferedNotifier) send(id string, data interface{}) error { - n.mu.Lock() - defer n.mu.Unlock() - - if n.stopped { - return errNotifierStopped - } - - var ( - subscription *bufferedSubscription - found bool - ) - - // check if subscription is associated with this connection, it might be cancelled - // (subscribe/connection closed) - if subscription, found = n.subscriptions[id]; !found { - glog.V(logger.Error).Infof("received notification for unknown subscription %s\n", id) - return ErrNotificationNotFound - } - - // received the unsubscribe signal. Add it to the queue to make sure any pending notifications - // for this subscription are send. When the run loop receives this singal it will signal that - // all pending subscriptions are flushed and that the confirmation of the unsubscribe can be - // send to the user. Remove the subscriptions to make sure new notifications are not accepted. - if data == unsubSignal { - delete(n.subscriptions, id) - if subscription.unsub != nil { - subscription.unsubOnce.Do(func() { subscription.unsub(id) }) - } - } - - subscription.lastNotification = time.Now() - - if len(n.queue) >= n.queueSize { - glog.V(logger.Warn).Infoln("too many buffered notifications -> close connection") - n.codec.Close() - return errNotificationQueueFull - } - - n.queue <- ¬ification{subscription, data} - return nil -} - -// run reads notifications from the internal queue and sends them to the client. In case of an -// error, or when the codec is closed it will cancel all active subscriptions and returns. -func (n *bufferedNotifier) run() { - defer func() { - n.mu.Lock() - defer n.mu.Unlock() - - n.stopped = true - close(n.queue) - - // on exit call unsubscribe callback - for id, sub := range n.subscriptions { - if sub.unsub != nil { - sub.unsubOnce.Do(func() { sub.unsub(id) }) - } - close(sub.flushed) - delete(n.subscriptions, id) - } - }() - - for { - select { - case notification := <-n.queue: - // It can happen that an event is raised before the RPC server was able to send the sub - // id to the client. Therefore subscriptions are marked as pending until the sub id was - // send. The RPC server will activate the subscription by closing the pending chan. - <-notification.sub.pending - - if notification.data == unsubSignal { - // unsubSignal is the last accepted message for this subscription. Raise the signal - // that all buffered notifications are sent by closing the flushed channel. This - // indicates that the response for the unsubscribe can be send to the client. - close(notification.sub.flushed) - } else { - msg := n.codec.CreateNotification(notification.sub.id, notification.data) - if err := n.codec.Write(msg); err != nil { - n.codec.Close() - // unable to send notification to client, unsubscribe all subscriptions - glog.V(logger.Warn).Infof("unable to send notification - %v\n", err) - return - } - } - case <-n.codec.Closed(): // connection was closed - glog.V(logger.Debug).Infoln("codec closed, stop subscriptions") - return - } - } -} - -// Marks the subscription as active. This will causes the notifications for this subscription to be -// forwarded to the client. -func (n *bufferedNotifier) activate(subid string) { - n.mu.Lock() - defer n.mu.Unlock() - - if sub, found := n.subscriptions[subid]; found { - close(sub.pending) - } -} diff --git a/tests/files/ansible/test-files/docker-cpp/Dockerfile b/tests/files/ansible/test-files/docker-cpp/Dockerfile index e761866dc967d..7db2667aee023 100755 --- a/tests/files/ansible/test-files/docker-cpp/Dockerfile +++ b/tests/files/ansible/test-files/docker-cpp/Dockerfile @@ -29,3 +29,4 @@ RUN cd cpp-expanse/build && cmake .. -DCMAKE_BUILD_TYPE=Release -DHEADLESS=1 && RUN ldconfig ENTRYPOINT ["/cpp-expanse/build/test/createRandomTest"] + diff --git a/tests/files/ansible/test-files/docker-go/Dockerfile b/tests/files/ansible/test-files/docker-go/Dockerfile index c1db5e2134b36..385e47a103dda 100755 --- a/tests/files/ansible/test-files/docker-go/Dockerfile +++ b/tests/files/ansible/test-files/docker-go/Dockerfile @@ -17,7 +17,7 @@ RUN apt-get install -y git mercurial build-essential software-properties-common ## Install Qt5.4 dependencies from PPA RUN add-apt-repository ppa:beineri/opt-qt54-trusty -y RUN apt-get update -y -RUN apt-get install -y qt54quickcontrols qt54webengine +RUN apt-get install -y qt54quickcontrols qt54webengine ## Build and install latest Go RUN git clone https://go.googlesource.com/go golang diff --git a/tests/files/package.json b/tests/files/package.json deleted file mode 100755 index de9286f8ab9ee..0000000000000 --- a/tests/files/package.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "name": "expanse-tests", - "version": "0.0.6", - "description": "tests for expanse", - "main": "index.js", - "scripts": { - "test": "echo \"There are no tests for there tests\" && exit 1" - }, - "repository": { - "type": "git", - "url": "https://github.com/expanse-org/tests" - }, - "keywords": [ - "tests", - "expanse" - ], - "author": "", - "license": "MIT", - "bugs": { - "url": "https://github.com/expanse-org/tests/issues" - }, - "homepage": "https://github.com/expanse-org/tests", - "dependencies": { - "require-all": "^1.0.0" - } -} diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go index 3628ee45deefc..83a5a542bc893 100644 --- a/tests/transaction_test_util.go +++ b/tests/transaction_test_util.go @@ -27,7 +27,7 @@ import ( "github.com/expanse-org/go-expanse/common/math" "github.com/expanse-org/go-expanse/core/types" "github.com/expanse-org/go-expanse/log" -\ "github.com/expanse-org/go-expanse/params" + "github.com/expanse-org/go-expanse/params" "github.com/expanse-org/go-expanse/rlp" ) diff --git a/trie/sync.go b/trie/sync.go index a9fe460591912..7cbc811ffd954 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/expanse-org/go-expanse/common" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) diff --git a/whisper/api.go b/whisper/api.go deleted file mode 100644 index 80e53eaf6396d..0000000000000 --- a/whisper/api.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package whisper - -import ( - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/rpc" -) - -// PublicWhisperAPI provides the whisper RPC service. -type PublicWhisperAPI struct { - w *Whisper - - messagesMu sync.RWMutex - messages map[int]*whisperFilter -} - -type whisperOfflineError struct{} - -func (e *whisperOfflineError) Error() string { - return "whisper is offline" -} - -// whisperOffLineErr is returned when the node doesn't offer the shh service. -var whisperOffLineErr = new(whisperOfflineError) - -// NewPublicWhisperAPI create a new RPC whisper service. -func NewPublicWhisperAPI(w *Whisper) *PublicWhisperAPI { - return &PublicWhisperAPI{w: w, messages: make(map[int]*whisperFilter)} -} - -// Version returns the Whisper version this node offers. -func (s *PublicWhisperAPI) Version() (*rpc.HexNumber, error) { - if s.w == nil { - return rpc.NewHexNumber(0), whisperOffLineErr - } - return rpc.NewHexNumber(s.w.Version()), nil -} - -// HasIdentity checks if the the whisper node is configured with the private key -// of the specified public pair. -func (s *PublicWhisperAPI) HasIdentity(identity string) (bool, error) { - if s.w == nil { - return false, whisperOffLineErr - } - return s.w.HasIdentity(crypto.ToECDSAPub(common.FromHex(identity))), nil -} - -// NewIdentity generates a new cryptographic identity for the client, and injects -// it into the known identities for message decryption. -func (s *PublicWhisperAPI) NewIdentity() (string, error) { - if s.w == nil { - return "", whisperOffLineErr - } - - identity := s.w.NewIdentity() - return common.ToHex(crypto.FromECDSAPub(&identity.PublicKey)), nil -} - -type NewFilterArgs struct { - To string - From string - Topics [][][]byte -} - -// NewWhisperFilter creates and registers a new message filter to watch for inbound whisper messages. -func (s *PublicWhisperAPI) NewFilter(args NewFilterArgs) (*rpc.HexNumber, error) { - if s.w == nil { - return nil, whisperOffLineErr - } - - var id int - filter := Filter{ - To: crypto.ToECDSAPub(common.FromHex(args.To)), - From: crypto.ToECDSAPub(common.FromHex(args.From)), - Topics: NewFilterTopics(args.Topics...), - Fn: func(message *Message) { - wmsg := NewWhisperMessage(message) - s.messagesMu.RLock() // Only read lock to the filter pool - defer s.messagesMu.RUnlock() - if s.messages[id] != nil { - s.messages[id].insert(wmsg) - } - }, - } - - id = s.w.Watch(filter) - - s.messagesMu.Lock() - s.messages[id] = newWhisperFilter(id, s.w) - s.messagesMu.Unlock() - - return rpc.NewHexNumber(id), nil -} - -// GetFilterChanges retrieves all the new messages matched by a filter since the last retrieval. -func (s *PublicWhisperAPI) GetFilterChanges(filterId rpc.HexNumber) []WhisperMessage { - s.messagesMu.RLock() - defer s.messagesMu.RUnlock() - - if s.messages[filterId.Int()] != nil { - if changes := s.messages[filterId.Int()].retrieve(); changes != nil { - return changes - } - } - return returnWhisperMessages(nil) -} - -// UninstallFilter disables and removes an existing filter. -func (s *PublicWhisperAPI) UninstallFilter(filterId rpc.HexNumber) bool { - s.messagesMu.Lock() - defer s.messagesMu.Unlock() - - if _, ok := s.messages[filterId.Int()]; ok { - delete(s.messages, filterId.Int()) - return true - } - return false -} - -// GetMessages retrieves all the known messages that match a specific filter. -func (s *PublicWhisperAPI) GetMessages(filterId rpc.HexNumber) []WhisperMessage { - // Retrieve all the cached messages matching a specific, existing filter - s.messagesMu.RLock() - defer s.messagesMu.RUnlock() - - var messages []*Message - if s.messages[filterId.Int()] != nil { - messages = s.messages[filterId.Int()].messages() - } - - return returnWhisperMessages(messages) -} - -// returnWhisperMessages converts aNhisper message to a RPC whisper message. -func returnWhisperMessages(messages []*Message) []WhisperMessage { - msgs := make([]WhisperMessage, len(messages)) - for i, msg := range messages { - msgs[i] = NewWhisperMessage(msg) - } - return msgs -} - -type PostArgs struct { - From string `json:"from"` - To string `json:"to"` - Topics [][]byte `json:"topics"` - Payload string `json:"payload"` - Priority int64 `json:"priority"` - TTL int64 `json:"ttl"` -} - -// Post injects a message into the whisper network for distribution. -func (s *PublicWhisperAPI) Post(args PostArgs) (bool, error) { - if s.w == nil { - return false, whisperOffLineErr - } - - // construct whisper message with transmission options - message := NewMessage(common.FromHex(args.Payload)) - options := Options{ - To: crypto.ToECDSAPub(common.FromHex(args.To)), - TTL: time.Duration(args.TTL) * time.Second, - Topics: NewTopics(args.Topics...), - } - - // set sender identity - if len(args.From) > 0 { - if key := s.w.GetIdentity(crypto.ToECDSAPub(common.FromHex(args.From))); key != nil { - options.From = key - } else { - return false, fmt.Errorf("unknown identity to send from: %s", args.From) - } - } - - // Wrap and send the message - pow := time.Duration(args.Priority) * time.Millisecond - envelope, err := message.Wrap(pow, options) - if err != nil { - return false, err - } - - return true, s.w.Send(envelope) -} - -// WhisperMessage is the RPC representation of a whisper message. -type WhisperMessage struct { - ref *Message - - Payload string `json:"payload"` - To string `json:"to"` - From string `json:"from"` - Sent int64 `json:"sent"` - TTL int64 `json:"ttl"` - Hash string `json:"hash"` -} - -func (args *PostArgs) UnmarshalJSON(data []byte) (err error) { - var obj struct { - From string `json:"from"` - To string `json:"to"` - Topics []string `json:"topics"` - Payload string `json:"payload"` - Priority rpc.HexNumber `json:"priority"` - TTL rpc.HexNumber `json:"ttl"` - } - - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - - args.From = obj.From - args.To = obj.To - args.Payload = obj.Payload - args.Priority = obj.Priority.Int64() - args.TTL = obj.TTL.Int64() - - // decode topic strings - args.Topics = make([][]byte, len(obj.Topics)) - for i, topic := range obj.Topics { - args.Topics[i] = common.FromHex(topic) - } - - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface, invoked to convert a -// JSON message blob into a WhisperFilterArgs structure. -func (args *NewFilterArgs) UnmarshalJSON(b []byte) (err error) { - // Unmarshal the JSON message and sanity check - var obj struct { - To interface{} `json:"to"` - From interface{} `json:"from"` - Topics interface{} `json:"topics"` - } - if err := json.Unmarshal(b, &obj); err != nil { - return err - } - - // Retrieve the simple data contents of the filter arguments - if obj.To == nil { - args.To = "" - } else { - argstr, ok := obj.To.(string) - if !ok { - return fmt.Errorf("to is not a string") - } - args.To = argstr - } - if obj.From == nil { - args.From = "" - } else { - argstr, ok := obj.From.(string) - if !ok { - return fmt.Errorf("from is not a string") - } - args.From = argstr - } - // Construct the nested topic array - if obj.Topics != nil { - // Make sure we have an actual topic array - list, ok := obj.Topics.([]interface{}) - if !ok { - return fmt.Errorf("topics is not an array") - } - // Iterate over each topic and handle nil, string or array - topics := make([][]string, len(list)) - for idx, field := range list { - switch value := field.(type) { - case nil: - topics[idx] = []string{} - - case string: - topics[idx] = []string{value} - - case []interface{}: - topics[idx] = make([]string, len(value)) - for i, nested := range value { - switch value := nested.(type) { - case nil: - topics[idx][i] = "" - - case string: - topics[idx][i] = value - - default: - return fmt.Errorf("topic[%d][%d] is not a string", idx, i) - } - } - default: - return fmt.Errorf("topic[%d] not a string or array", idx) - } - } - - topicsDecoded := make([][][]byte, len(topics)) - for i, condition := range topics { - topicsDecoded[i] = make([][]byte, len(condition)) - for j, topic := range condition { - topicsDecoded[i][j] = common.FromHex(topic) - } - } - - args.Topics = topicsDecoded - } - return nil -} - -// whisperFilter is the message cache matching a specific filter, accumulating -// inbound messages until the are requested by the client. -type whisperFilter struct { - id int // Filter identifier for old message retrieval - ref *Whisper // Whisper reference for old message retrieval - - cache []WhisperMessage // Cache of messages not yet polled - skip map[common.Hash]struct{} // List of retrieved messages to avoid duplication - update time.Time // Time of the last message query - - lock sync.RWMutex // Lock protecting the filter internals -} - -// messages retrieves all the cached messages from the entire pool matching the -// filter, resetting the filter's change buffer. -func (w *whisperFilter) messages() []*Message { - w.lock.Lock() - defer w.lock.Unlock() - - w.cache = nil - w.update = time.Now() - - w.skip = make(map[common.Hash]struct{}) - messages := w.ref.Messages(w.id) - for _, message := range messages { - w.skip[message.Hash] = struct{}{} - } - return messages -} - -// insert injects a new batch of messages into the filter cache. -func (w *whisperFilter) insert(messages ...WhisperMessage) { - w.lock.Lock() - defer w.lock.Unlock() - - for _, message := range messages { - if _, ok := w.skip[message.ref.Hash]; !ok { - w.cache = append(w.cache, messages...) - } - } -} - -// retrieve fetches all the cached messages from the filter. -func (w *whisperFilter) retrieve() (messages []WhisperMessage) { - w.lock.Lock() - defer w.lock.Unlock() - - messages, w.cache = w.cache, nil - w.update = time.Now() - - return -} - -// activity returns the last time instance when client requests were executed on -// the filter. -func (w *whisperFilter) activity() time.Time { - w.lock.RLock() - defer w.lock.RUnlock() - - return w.update -} - -// newWhisperFilter creates a new serialized, poll based whisper topic filter. -func newWhisperFilter(id int, ref *Whisper) *whisperFilter { - return &whisperFilter{ - id: id, - ref: ref, - - update: time.Now(), - skip: make(map[common.Hash]struct{}), - } -} - -// NewWhisperMessage converts an internal message into an API version. -func NewWhisperMessage(message *Message) WhisperMessage { - return WhisperMessage{ - ref: message, - - Payload: common.ToHex(message.Payload), - From: common.ToHex(crypto.FromECDSAPub(message.Recover())), - To: common.ToHex(crypto.FromECDSAPub(message.To)), - Sent: message.Sent.Unix(), - TTL: int64(message.TTL / time.Second), - Hash: common.ToHex(message.Hash.Bytes()), - } -} diff --git a/whisper/doc.go b/whisper/doc.go deleted file mode 100644 index 5351fee1a5a60..0000000000000 --- a/whisper/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -/* -Package whisper implements the Whisper PoC-1. - -(https://github.com/expanse-org/wiki/wiki/Whisper-PoC-1-Protocol-Spec) - -Whisper combines aspects of both DHTs and datagram messaging systems (e.g. UDP). -As such it may be likened and compared to both, not dissimilar to the -matter/energy duality (apologies to physicists for the blatant abuse of a -fundamental and beautiful natural principle). - -Whisper is a pure identity-based messaging system. Whisper provides a low-level -(non-application-specific) but easily-accessible API without being based upon -or prejudiced by the low-level hardware attributes and characteristics, -particularly the notion of singular endpoints. -*/ -package whisper diff --git a/whisper/envelope.go b/whisper/envelope.go deleted file mode 100644 index fd8dd8e84824b..0000000000000 --- a/whisper/envelope.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the Whisper protocol Envelope element. For formal details please see -// the specs at https://github.com/expanse-org/wiki/wiki/Whisper-PoC-1-Protocol-Spec#envelopes. - -package whisper - -import ( - "crypto/ecdsa" - "encoding/binary" - "fmt" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/crypto/ecies" - "github.com/expanse-org/go-expanse/rlp" -) - -// Envelope represents a clear-text data packet to transmit through the Whisper -// network. Its contents may or may not be encrypted and signed. -type Envelope struct { - Expiry uint32 // Whisper protocol specifies int32, really should be int64 - TTL uint32 // ^^^^^^ - Topics []Topic - Data []byte - Nonce uint32 - - hash common.Hash // Cached hash of the envelope to avoid rehashing every time -} - -// NewEnvelope wraps a Whisper message with expiration and destination data -// included into an envelope for network forwarding. -func NewEnvelope(ttl time.Duration, topics []Topic, msg *Message) *Envelope { - return &Envelope{ - Expiry: uint32(time.Now().Add(ttl).Unix()), - TTL: uint32(ttl.Seconds()), - Topics: topics, - Data: msg.bytes(), - Nonce: 0, - } -} - -// Seal closes the envelope by spending the requested amount of time as a proof -// of work on hashing the data. -func (self *Envelope) Seal(pow time.Duration) { - d := make([]byte, 64) - copy(d[:32], self.rlpWithoutNonce()) - - finish, bestBit := time.Now().Add(pow).UnixNano(), 0 - for nonce := uint32(0); time.Now().UnixNano() < finish; { - for i := 0; i < 1024; i++ { - binary.BigEndian.PutUint32(d[60:], nonce) - - firstBit := common.FirstBitSet(common.BigD(crypto.Keccak256(d))) - if firstBit > bestBit { - self.Nonce, bestBit = nonce, firstBit - } - nonce++ - } - } -} - -// rlpWithoutNonce returns the RLP encoded envelope contents, except the nonce. -func (self *Envelope) rlpWithoutNonce() []byte { - enc, _ := rlp.EncodeToBytes([]interface{}{self.Expiry, self.TTL, self.Topics, self.Data}) - return enc -} - -// Open extracts the message contained within a potentially encrypted envelope. -func (self *Envelope) Open(key *ecdsa.PrivateKey) (msg *Message, err error) { - // Split open the payload into a message construct - data := self.Data - - message := &Message{ - Flags: data[0], - Sent: time.Unix(int64(self.Expiry-self.TTL), 0), - TTL: time.Duration(self.TTL) * time.Second, - Hash: self.Hash(), - } - data = data[1:] - - if message.Flags&signatureFlag == signatureFlag { - if len(data) < signatureLength { - return nil, fmt.Errorf("unable to open envelope. First bit set but len(data) < len(signature)") - } - message.Signature, data = data[:signatureLength], data[signatureLength:] - } - message.Payload = data - - // Decrypt the message, if requested - if key == nil { - return message, nil - } - err = message.decrypt(key) - switch err { - case nil: - return message, nil - - case ecies.ErrInvalidPublicKey: // Payload isn't encrypted - return message, err - - default: - return nil, fmt.Errorf("unable to open envelope, decrypt failed: %v", err) - } -} - -// Hash returns the SHA3 hash of the envelope, calculating it if not yet done. -func (self *Envelope) Hash() common.Hash { - if (self.hash == common.Hash{}) { - enc, _ := rlp.EncodeToBytes(self) - self.hash = crypto.Keccak256Hash(enc) - } - return self.hash -} - -// DecodeRLP decodes an Envelope from an RLP data stream. -func (self *Envelope) DecodeRLP(s *rlp.Stream) error { - raw, err := s.Raw() - if err != nil { - return err - } - // The decoding of Envelope uses the struct fields but also needs - // to compute the hash of the whole RLP-encoded envelope. This - // type has the same structure as Envelope but is not an - // rlp.Decoder so we can reuse the Envelope struct definition. - type rlpenv Envelope - if err := rlp.DecodeBytes(raw, (*rlpenv)(self)); err != nil { - return err - } - self.hash = crypto.Keccak256Hash(raw) - return nil -} diff --git a/whisper/envelope_test.go b/whisper/envelope_test.go deleted file mode 100644 index 295b3d3ac4001..0000000000000 --- a/whisper/envelope_test.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package whisper - -import ( - "bytes" - "testing" - "time" - - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/crypto/ecies" -) - -func TestEnvelopeOpen(t *testing.T) { - payload := []byte("hello world") - message := NewMessage(payload) - - envelope, err := message.Wrap(DefaultPoW, Options{}) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - opened, err := envelope.Open(nil) - if err != nil { - t.Fatalf("failed to open envelope: %v", err) - } - if opened.Flags != message.Flags { - t.Fatalf("flags mismatch: have %d, want %d", opened.Flags, message.Flags) - } - if bytes.Compare(opened.Signature, message.Signature) != 0 { - t.Fatalf("signature mismatch: have 0x%x, want 0x%x", opened.Signature, message.Signature) - } - if bytes.Compare(opened.Payload, message.Payload) != 0 { - t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, message.Payload) - } - if opened.Sent.Unix() != message.Sent.Unix() { - t.Fatalf("send time mismatch: have %d, want %d", opened.Sent, message.Sent) - } - if opened.TTL/time.Second != DefaultTTL/time.Second { - t.Fatalf("message TTL mismatch: have %v, want %v", opened.TTL, DefaultTTL) - } - - if opened.Hash != envelope.Hash() { - t.Fatalf("message hash mismatch: have 0x%x, want 0x%x", opened.Hash, envelope.Hash()) - } -} - -func TestEnvelopeAnonymousOpenUntargeted(t *testing.T) { - payload := []byte("hello envelope") - envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{}) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - opened, err := envelope.Open(nil) - if err != nil { - t.Fatalf("failed to open envelope: %v", err) - } - if opened.To != nil { - t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) - } - if bytes.Compare(opened.Payload, payload) != 0 { - t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload) - } -} - -func TestEnvelopeAnonymousOpenTargeted(t *testing.T) { - key, err := crypto.GenerateKey() - if err != nil { - t.Fatalf("failed to generate test identity: %v", err) - } - - payload := []byte("hello envelope") - envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{ - To: &key.PublicKey, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - opened, err := envelope.Open(nil) - if err != nil { - t.Fatalf("failed to open envelope: %v", err) - } - if opened.To != nil { - t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) - } - if bytes.Compare(opened.Payload, payload) == 0 { - t.Fatalf("payload match, should have been encrypted: 0x%x", opened.Payload) - } -} - -func TestEnvelopeIdentifiedOpenUntargeted(t *testing.T) { - key, err := crypto.GenerateKey() - if err != nil { - t.Fatalf("failed to generate test identity: %v", err) - } - - payload := []byte("hello envelope") - envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{}) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - opened, err := envelope.Open(key) - switch err { - case nil: - t.Fatalf("envelope opened with bad key: %v", opened) - - case ecies.ErrInvalidPublicKey: - // Ok, key mismatch but opened - - default: - t.Fatalf("failed to open envelope: %v", err) - } - - if opened.To != nil { - t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) - } - if bytes.Compare(opened.Payload, payload) != 0 { - t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload) - } -} - -func TestEnvelopeIdentifiedOpenTargeted(t *testing.T) { - key, err := crypto.GenerateKey() - if err != nil { - t.Fatalf("failed to generate test identity: %v", err) - } - - payload := []byte("hello envelope") - envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{ - To: &key.PublicKey, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - opened, err := envelope.Open(key) - if err != nil { - t.Fatalf("failed to open envelope: %v", err) - } - if opened.To != nil { - t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) - } - if bytes.Compare(opened.Payload, payload) != 0 { - t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload) - } -} diff --git a/whisper/filter.go b/whisper/filter.go deleted file mode 100644 index db4f849a0079f..0000000000000 --- a/whisper/filter.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the message filter for fine grained subscriptions. - -package whisper - -import ( - "crypto/ecdsa" - - "github.com/expanse-org/go-expanse/event/filter" -) - -// Filter is used to subscribe to specific types of whisper messages. -type Filter struct { - To *ecdsa.PublicKey // Recipient of the message - From *ecdsa.PublicKey // Sender of the message - Topics [][]Topic // Topics to filter messages with - Fn func(msg *Message) // Handler in case of a match -} - -// NewFilterTopics creates a 2D topic array used by whisper.Filter from binary -// data elements. -func NewFilterTopics(data ...[][]byte) [][]Topic { - filter := make([][]Topic, len(data)) - for i, condition := range data { - // Handle the special case of condition == [[]byte{}] - if len(condition) == 1 && len(condition[0]) == 0 { - filter[i] = []Topic{} - continue - } - // Otherwise flatten normally - filter[i] = NewTopics(condition...) - } - return filter -} - -// NewFilterTopicsFlat creates a 2D topic array used by whisper.Filter from flat -// binary data elements. -func NewFilterTopicsFlat(data ...[]byte) [][]Topic { - filter := make([][]Topic, len(data)) - for i, element := range data { - // Only add non-wildcard topics - filter[i] = make([]Topic, 0, 1) - if len(element) > 0 { - filter[i] = append(filter[i], NewTopic(element)) - } - } - return filter -} - -// NewFilterTopicsFromStrings creates a 2D topic array used by whisper.Filter -// from textual data elements. -func NewFilterTopicsFromStrings(data ...[]string) [][]Topic { - filter := make([][]Topic, len(data)) - for i, condition := range data { - // Handle the special case of condition == [""] - if len(condition) == 1 && condition[0] == "" { - filter[i] = []Topic{} - continue - } - // Otherwise flatten normally - filter[i] = NewTopicsFromStrings(condition...) - } - return filter -} - -// NewFilterTopicsFromStringsFlat creates a 2D topic array used by whisper.Filter from flat -// binary data elements. -func NewFilterTopicsFromStringsFlat(data ...string) [][]Topic { - filter := make([][]Topic, len(data)) - for i, element := range data { - // Only add non-wildcard topics - filter[i] = make([]Topic, 0, 1) - if element != "" { - filter[i] = append(filter[i], NewTopicFromString(element)) - } - } - return filter -} - -// filterer is the internal, fully initialized filter ready to match inbound -// messages to a variety of criteria. -type filterer struct { - to string // Recipient of the message - from string // Sender of the message - matcher *topicMatcher // Topics to filter messages with - fn func(data interface{}) // Handler in case of a match -} - -// Compare checks if the specified filter matches the current one. -func (self filterer) Compare(f filter.Filter) bool { - filter := f.(filterer) - - // Check the message sender and recipient - if len(self.to) > 0 && self.to != filter.to { - return false - } - if len(self.from) > 0 && self.from != filter.from { - return false - } - // Check the topic filtering - topics := make([]Topic, len(filter.matcher.conditions)) - for i, group := range filter.matcher.conditions { - // Message should contain a single topic entry, extract - for topics[i], _ = range group { - break - } - } - if !self.matcher.Matches(topics) { - return false - } - return true -} - -// Trigger is called when a filter successfully matches an inbound message. -func (self filterer) Trigger(data interface{}) { - self.fn(data) -} diff --git a/whisper/main.go b/whisper/main.go deleted file mode 100644 index 97f8652749f6d..0000000000000 --- a/whisper/main.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// +build none - -// Contains a simple whisper peer setup and self messaging to allow playing -// around with the protocol and API without a fancy client implementation. - -package main - -import ( - "fmt" - "log" - "os" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/p2p/nat" - "github.com/expanse-org/go-expanse/whisper" -) - -func main() { - logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.InfoLevel)) - - // Generate the peer identity - key, err := crypto.GenerateKey() - if err != nil { - fmt.Printf("Failed to generate peer key: %v.\n", err) - os.Exit(-1) - } - name := common.MakeName("whisper-go", "1.0") - shh := whisper.New() - - // Create an Expanse peer to communicate through - server := p2p.Server{ - PrivateKey: key, - MaxPeers: 10, - Name: name, - Protocols: []p2p.Protocol{shh.Protocol()}, - ListenAddr: ":30300", - NAT: nat.Any(), - } - fmt.Println("Starting Expanse peer...") - if err := server.Start(); err != nil { - fmt.Printf("Failed to start Expanse peer: %v.\n", err) - os.Exit(1) - } - - // Send a message to self to check that something works - payload := fmt.Sprintf("Hello world, this is %v. In case you're wondering, the time is %v", name, time.Now()) - if err := selfSend(shh, []byte(payload)); err != nil { - fmt.Printf("Failed to self message: %v.\n", err) - os.Exit(-1) - } -} - -// SendSelf wraps a payload into a Whisper envelope and forwards it to itself. -func selfSend(shh *whisper.Whisper, payload []byte) error { - ok := make(chan struct{}) - - // Start watching for self messages, output any arrivals - id := shh.NewIdentity() - shh.Watch(whisper.Filter{ - To: &id.PublicKey, - Fn: func(msg *whisper.Message) { - fmt.Printf("Message received: %s, signed with 0x%x.\n", string(msg.Payload), msg.Signature) - close(ok) - }, - }) - // Wrap the payload and encrypt it - msg := whisper.NewMessage(payload) - envelope, err := msg.Wrap(whisper.DefaultPoW, whisper.Options{ - From: id, - To: &id.PublicKey, - TTL: whisper.DefaultTTL, - }) - if err != nil { - return fmt.Errorf("failed to seal message: %v", err) - } - // Dump the message into the system and wait for it to pop back out - if err := shh.Send(envelope); err != nil { - return fmt.Errorf("failed to send self-message: %v", err) - } - select { - case <-ok: - case <-time.After(time.Second): - return fmt.Errorf("failed to receive message in time") - } - return nil -} diff --git a/whisper/message.go b/whisper/message.go deleted file mode 100644 index 7bc2880e6f20b..0000000000000 --- a/whisper/message.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the Whisper protocol Message element. For formal details please see -// the specs at https://github.com/expanse-org/wiki/wiki/Whisper-PoC-1-Protocol-Spec#messages. - -package whisper - -import ( - "crypto/ecdsa" - "math/rand" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" -) - -// Message represents an end-user data packet to transmit through the Whisper -// protocol. These are wrapped into Envelopes that need not be understood by -// intermediate nodes, just forwarded. -type Message struct { - Flags byte // First bit is signature presence, rest reserved and should be random - Signature []byte - Payload []byte - - Sent time.Time // Time when the message was posted into the network - TTL time.Duration // Maximum time to live allowed for the message - - To *ecdsa.PublicKey // Message recipient (identity used to decode the message) - Hash common.Hash // Message envelope hash to act as a unique id -} - -// Options specifies the exact way a message should be wrapped into an Envelope. -type Options struct { - From *ecdsa.PrivateKey - To *ecdsa.PublicKey - TTL time.Duration - Topics []Topic -} - -// NewMessage creates and initializes a non-signed, non-encrypted Whisper message. -func NewMessage(payload []byte) *Message { - // Construct an initial flag set: no signature, rest random - flags := byte(rand.Intn(256)) - flags &= ^signatureFlag - - // Assemble and return the message - return &Message{ - Flags: flags, - Payload: payload, - Sent: time.Now(), - } -} - -// Wrap bundles the message into an Envelope to transmit over the network. -// -// pow (Proof Of Work) controls how much time to spend on hashing the message, -// inherently controlling its priority through the network (smaller hash, bigger -// priority). -// -// The user can control the amount of identity, privacy and encryption through -// the options parameter as follows: -// - options.From == nil && options.To == nil: anonymous broadcast -// - options.From != nil && options.To == nil: signed broadcast (known sender) -// - options.From == nil && options.To != nil: encrypted anonymous message -// - options.From != nil && options.To != nil: encrypted signed message -func (self *Message) Wrap(pow time.Duration, options Options) (*Envelope, error) { - // Use the default TTL if non was specified - if options.TTL == 0 { - options.TTL = DefaultTTL - } - self.TTL = options.TTL - - // Sign and encrypt the message if requested - if options.From != nil { - if err := self.sign(options.From); err != nil { - return nil, err - } - } - if options.To != nil { - if err := self.encrypt(options.To); err != nil { - return nil, err - } - } - // Wrap the processed message, seal it and return - envelope := NewEnvelope(options.TTL, options.Topics, self) - envelope.Seal(pow) - - return envelope, nil -} - -// sign calculates and sets the cryptographic signature for the message , also -// setting the sign flag. -func (self *Message) sign(key *ecdsa.PrivateKey) (err error) { - self.Flags |= signatureFlag - self.Signature, err = crypto.Sign(self.hash(), key) - return -} - -// Recover retrieves the public key of the message signer. -func (self *Message) Recover() *ecdsa.PublicKey { - defer func() { recover() }() // in case of invalid signature - - // Short circuit if no signature is present - if self.Signature == nil { - return nil - } - // Otherwise try and recover the signature - pub, err := crypto.SigToPub(self.hash(), self.Signature) - if err != nil { - glog.V(logger.Error).Infof("Could not get public key from signature: %v", err) - return nil - } - return pub -} - -// encrypt encrypts a message payload with a public key. -func (self *Message) encrypt(key *ecdsa.PublicKey) (err error) { - self.Payload, err = crypto.Encrypt(key, self.Payload) - return -} - -// decrypt decrypts an encrypted payload with a private key. -func (self *Message) decrypt(key *ecdsa.PrivateKey) error { - cleartext, err := crypto.Decrypt(key, self.Payload) - if err == nil { - self.Payload = cleartext - } - return err -} - -// hash calculates the SHA3 checksum of the message flags and payload. -func (self *Message) hash() []byte { - return crypto.Keccak256(append([]byte{self.Flags}, self.Payload...)) -} - -// bytes flattens the message contents (flags, signature and payload) into a -// single binary blob. -func (self *Message) bytes() []byte { - return append([]byte{self.Flags}, append(self.Signature, self.Payload...)...) -} diff --git a/whisper/message_test.go b/whisper/message_test.go deleted file mode 100644 index 54134cbf4724a..0000000000000 --- a/whisper/message_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package whisper - -import ( - "bytes" - "crypto/elliptic" - "testing" - "time" - - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/crypto/secp256k1" -) - -// Tests whether a message can be wrapped without any identity or encryption. -func TestMessageSimpleWrap(t *testing.T) { - payload := []byte("hello world") - - msg := NewMessage(payload) - if _, err := msg.Wrap(DefaultPoW, Options{}); err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - if msg.Flags&signatureFlag != 0 { - t.Fatalf("signature flag mismatch: have %d, want %d", msg.Flags&signatureFlag, 0) - } - if len(msg.Signature) != 0 { - t.Fatalf("signature found for simple wrapping: 0x%x", msg.Signature) - } - if bytes.Compare(msg.Payload, payload) != 0 { - t.Fatalf("payload mismatch after wrapping: have 0x%x, want 0x%x", msg.Payload, payload) - } - if msg.TTL/time.Second != DefaultTTL/time.Second { - t.Fatalf("message TTL mismatch: have %v, want %v", msg.TTL, DefaultTTL) - } -} - -// Tests whether a message can be signed, and wrapped in plain-text. -func TestMessageCleartextSignRecover(t *testing.T) { - key, err := crypto.GenerateKey() - if err != nil { - t.Fatalf("failed to create crypto key: %v", err) - } - payload := []byte("hello world") - - msg := NewMessage(payload) - if _, err := msg.Wrap(DefaultPoW, Options{ - From: key, - }); err != nil { - t.Fatalf("failed to sign message: %v", err) - } - if msg.Flags&signatureFlag != signatureFlag { - t.Fatalf("signature flag mismatch: have %d, want %d", msg.Flags&signatureFlag, signatureFlag) - } - if bytes.Compare(msg.Payload, payload) != 0 { - t.Fatalf("payload mismatch after signing: have 0x%x, want 0x%x", msg.Payload, payload) - } - - pubKey := msg.Recover() - if pubKey == nil { - t.Fatalf("failed to recover public key") - } - p1 := elliptic.Marshal(secp256k1.S256(), key.PublicKey.X, key.PublicKey.Y) - p2 := elliptic.Marshal(secp256k1.S256(), pubKey.X, pubKey.Y) - if !bytes.Equal(p1, p2) { - t.Fatalf("public key mismatch: have 0x%x, want 0x%x", p2, p1) - } -} - -// Tests whether a message can be encrypted and decrypted using an anonymous -// sender (i.e. no signature). -func TestMessageAnonymousEncryptDecrypt(t *testing.T) { - key, err := crypto.GenerateKey() - if err != nil { - t.Fatalf("failed to create recipient crypto key: %v", err) - } - payload := []byte("hello world") - - msg := NewMessage(payload) - envelope, err := msg.Wrap(DefaultPoW, Options{ - To: &key.PublicKey, - }) - if err != nil { - t.Fatalf("failed to encrypt message: %v", err) - } - if msg.Flags&signatureFlag != 0 { - t.Fatalf("signature flag mismatch: have %d, want %d", msg.Flags&signatureFlag, 0) - } - if len(msg.Signature) != 0 { - t.Fatalf("signature found for anonymous message: 0x%x", msg.Signature) - } - - out, err := envelope.Open(key) - if err != nil { - t.Fatalf("failed to open encrypted message: %v", err) - } - if !bytes.Equal(out.Payload, payload) { - t.Errorf("payload mismatch: have 0x%x, want 0x%x", out.Payload, payload) - } -} - -// Tests whether a message can be properly signed and encrypted. -func TestMessageFullCrypto(t *testing.T) { - fromKey, err := crypto.GenerateKey() - if err != nil { - t.Fatalf("failed to create sender crypto key: %v", err) - } - toKey, err := crypto.GenerateKey() - if err != nil { - t.Fatalf("failed to create recipient crypto key: %v", err) - } - - payload := []byte("hello world") - msg := NewMessage(payload) - envelope, err := msg.Wrap(DefaultPoW, Options{ - From: fromKey, - To: &toKey.PublicKey, - }) - if err != nil { - t.Fatalf("failed to encrypt message: %v", err) - } - if msg.Flags&signatureFlag != signatureFlag { - t.Fatalf("signature flag mismatch: have %d, want %d", msg.Flags&signatureFlag, signatureFlag) - } - if len(msg.Signature) == 0 { - t.Fatalf("no signature found for signed message") - } - - out, err := envelope.Open(toKey) - if err != nil { - t.Fatalf("failed to open encrypted message: %v", err) - } - if !bytes.Equal(out.Payload, payload) { - t.Errorf("payload mismatch: have 0x%x, want 0x%x", out.Payload, payload) - } - - pubKey := out.Recover() - if pubKey == nil { - t.Fatalf("failed to recover public key") - } - p1 := elliptic.Marshal(secp256k1.S256(), fromKey.PublicKey.X, fromKey.PublicKey.Y) - p2 := elliptic.Marshal(secp256k1.S256(), pubKey.X, pubKey.Y) - if !bytes.Equal(p1, p2) { - t.Fatalf("public key mismatch: have 0x%x, want 0x%x", p2, p1) - } -} diff --git a/whisper/peer.go b/whisper/peer.go deleted file mode 100644 index a93962f5cfe36..0000000000000 --- a/whisper/peer.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package whisper - -import ( - "fmt" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rlp" - "gopkg.in/fatih/set.v0" -) - -// peer represents a whisper protocol peer connection. -type peer struct { - host *Whisper - peer *p2p.Peer - ws p2p.MsgReadWriter - - known *set.Set // Messages already known by the peer to avoid wasting bandwidth - - quit chan struct{} -} - -// newPeer creates a new whisper peer object, but does not run the handshake itself. -func newPeer(host *Whisper, remote *p2p.Peer, rw p2p.MsgReadWriter) *peer { - return &peer{ - host: host, - peer: remote, - ws: rw, - known: set.New(), - quit: make(chan struct{}), - } -} - -// start initiates the peer updater, periodically broadcasting the whisper packets -// into the network. -func (self *peer) start() { - go self.update() - glog.V(logger.Debug).Infof("%v: whisper started", self.peer) -} - -// stop terminates the peer updater, stopping message forwarding to it. -func (self *peer) stop() { - close(self.quit) - glog.V(logger.Debug).Infof("%v: whisper stopped", self.peer) -} - -// handshake sends the protocol initiation status message to the remote peer and -// verifies the remote status too. -func (self *peer) handshake() error { - // Send the handshake status message asynchronously - errc := make(chan error, 1) - go func() { - errc <- p2p.SendItems(self.ws, statusCode, protocolVersion) - }() - // Fetch the remote status packet and verify protocol match - packet, err := self.ws.ReadMsg() - if err != nil { - return err - } - if packet.Code != statusCode { - return fmt.Errorf("peer sent %x before status packet", packet.Code) - } - s := rlp.NewStream(packet.Payload, uint64(packet.Size)) - if _, err := s.List(); err != nil { - return fmt.Errorf("bad status message: %v", err) - } - peerVersion, err := s.Uint() - if err != nil { - return fmt.Errorf("bad status message: %v", err) - } - if peerVersion != protocolVersion { - return fmt.Errorf("protocol version mismatch %d != %d", peerVersion, protocolVersion) - } - // Wait until out own status is consumed too - if err := <-errc; err != nil { - return fmt.Errorf("failed to send status packet: %v", err) - } - return nil -} - -// update executes periodic operations on the peer, including message transmission -// and expiration. -func (self *peer) update() { - // Start the tickers for the updates - expire := time.NewTicker(expirationCycle) - transmit := time.NewTicker(transmissionCycle) - - // Loop and transmit until termination is requested - for { - select { - case <-expire.C: - self.expire() - - case <-transmit.C: - if err := self.broadcast(); err != nil { - glog.V(logger.Info).Infof("%v: broadcast failed: %v", self.peer, err) - return - } - - case <-self.quit: - return - } - } -} - -// mark marks an envelope known to the peer so that it won't be sent back. -func (self *peer) mark(envelope *Envelope) { - self.known.Add(envelope.Hash()) -} - -// marked checks if an envelope is already known to the remote peer. -func (self *peer) marked(envelope *Envelope) bool { - return self.known.Has(envelope.Hash()) -} - -// expire iterates over all the known envelopes in the host and removes all -// expired (unknown) ones from the known list. -func (self *peer) expire() { - // Assemble the list of available envelopes - available := set.NewNonTS() - for _, envelope := range self.host.envelopes() { - available.Add(envelope.Hash()) - } - // Cross reference availability with known status - unmark := make(map[common.Hash]struct{}) - self.known.Each(func(v interface{}) bool { - if !available.Has(v.(common.Hash)) { - unmark[v.(common.Hash)] = struct{}{} - } - return true - }) - // Dump all known but unavailable - for hash, _ := range unmark { - self.known.Remove(hash) - } -} - -// broadcast iterates over the collection of envelopes and transmits yet unknown -// ones over the network. -func (self *peer) broadcast() error { - // Fetch the envelopes and collect the unknown ones - envelopes := self.host.envelopes() - transmit := make([]*Envelope, 0, len(envelopes)) - for _, envelope := range envelopes { - if !self.marked(envelope) { - transmit = append(transmit, envelope) - self.mark(envelope) - } - } - // Transmit the unknown batch (potentially empty) - if err := p2p.Send(self.ws, messagesCode, transmit); err != nil { - return err - } - glog.V(logger.Detail).Infoln(self.peer, "broadcasted", len(transmit), "message(s)") - return nil -} diff --git a/whisper/peer_test.go b/whisper/peer_test.go deleted file mode 100644 index 10fef0b2302f8..0000000000000 --- a/whisper/peer_test.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package whisper - -import ( - "testing" - "time" - - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/p2p/discover" -) - -type testPeer struct { - client *Whisper - stream *p2p.MsgPipeRW - termed chan struct{} -} - -func startTestPeer() *testPeer { - // Create a simulated P2P remote peer and data streams to it - remote := p2p.NewPeer(discover.NodeID{}, "", nil) - tester, tested := p2p.MsgPipe() - - // Create a whisper client and connect with it to the tester peer - client := New() - client.Start(nil) - - termed := make(chan struct{}) - go func() { - defer client.Stop() - defer close(termed) - defer tested.Close() - - client.handlePeer(remote, tested) - }() - - return &testPeer{ - client: client, - stream: tester, - termed: termed, - } -} - -func startTestPeerInited() (*testPeer, error) { - peer := startTestPeer() - - if err := p2p.ExpectMsg(peer.stream, statusCode, []uint64{protocolVersion}); err != nil { - peer.stream.Close() - return nil, err - } - if err := p2p.SendItems(peer.stream, statusCode, protocolVersion); err != nil { - peer.stream.Close() - return nil, err - } - return peer, nil -} - -func TestPeerStatusMessage(t *testing.T) { - tester := startTestPeer() - - // Wait for the handshake status message and check it - if err := p2p.ExpectMsg(tester.stream, statusCode, []uint64{protocolVersion}); err != nil { - t.Fatalf("status message mismatch: %v", err) - } - // Terminate the node - tester.stream.Close() - - select { - case <-tester.termed: - case <-time.After(time.Second): - t.Fatalf("local close timed out") - } -} - -func TestPeerHandshakeFail(t *testing.T) { - tester := startTestPeer() - - // Wait for and check the handshake - if err := p2p.ExpectMsg(tester.stream, statusCode, []uint64{protocolVersion}); err != nil { - t.Fatalf("status message mismatch: %v", err) - } - // Send an invalid handshake status and verify disconnect - if err := p2p.SendItems(tester.stream, messagesCode); err != nil { - t.Fatalf("failed to send malformed status: %v", err) - } - select { - case <-tester.termed: - case <-time.After(time.Second): - t.Fatalf("remote close timed out") - } -} - -func TestPeerHandshakeSuccess(t *testing.T) { - tester := startTestPeer() - - // Wait for and check the handshake - if err := p2p.ExpectMsg(tester.stream, statusCode, []uint64{protocolVersion}); err != nil { - t.Fatalf("status message mismatch: %v", err) - } - // Send a valid handshake status and make sure connection stays live - if err := p2p.SendItems(tester.stream, statusCode, protocolVersion); err != nil { - t.Fatalf("failed to send status: %v", err) - } - select { - case <-tester.termed: - t.Fatalf("valid handshake disconnected") - - case <-time.After(100 * time.Millisecond): - } - // Clean up the test - tester.stream.Close() - - select { - case <-tester.termed: - case <-time.After(time.Second): - t.Fatalf("local close timed out") - } -} - -func TestPeerSend(t *testing.T) { - // Start a tester and execute the handshake - tester, err := startTestPeerInited() - if err != nil { - t.Fatalf("failed to start initialized peer: %v", err) - } - defer tester.stream.Close() - - // Construct a message and inject into the tester - message := NewMessage([]byte("peer broadcast test message")) - envelope, err := message.Wrap(DefaultPoW, Options{ - TTL: DefaultTTL, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - if err := tester.client.Send(envelope); err != nil { - t.Fatalf("failed to send message: %v", err) - } - // Check that the message is eventually forwarded - payload := []interface{}{envelope} - if err := p2p.ExpectMsg(tester.stream, messagesCode, payload); err != nil { - t.Fatalf("message mismatch: %v", err) - } - // Make sure that even with a re-insert, an empty batch is received - if err := tester.client.Send(envelope); err != nil { - t.Fatalf("failed to send message: %v", err) - } - if err := p2p.ExpectMsg(tester.stream, messagesCode, []interface{}{}); err != nil { - t.Fatalf("message mismatch: %v", err) - } -} - -func TestPeerDeliver(t *testing.T) { - // Start a tester and execute the handshake - tester, err := startTestPeerInited() - if err != nil { - t.Fatalf("failed to start initialized peer: %v", err) - } - defer tester.stream.Close() - - // Watch for all inbound messages - arrived := make(chan struct{}, 1) - tester.client.Watch(Filter{ - Fn: func(message *Message) { - arrived <- struct{}{} - }, - }) - // Construct a message and deliver it to the tester peer - message := NewMessage([]byte("peer broadcast test message")) - envelope, err := message.Wrap(DefaultPoW, Options{ - TTL: DefaultTTL, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - if err := p2p.Send(tester.stream, messagesCode, []*Envelope{envelope}); err != nil { - t.Fatalf("failed to transfer message: %v", err) - } - // Check that the message is delivered upstream - select { - case <-arrived: - case <-time.After(time.Second): - t.Fatalf("message delivery timeout") - } - // Check that a resend is not delivered - if err := p2p.Send(tester.stream, messagesCode, []*Envelope{envelope}); err != nil { - t.Fatalf("failed to transfer message: %v", err) - } - select { - case <-time.After(2 * transmissionCycle): - case <-arrived: - t.Fatalf("repeating message arrived") - } -} - -func TestPeerMessageExpiration(t *testing.T) { - // Start a tester and execute the handshake - tester, err := startTestPeerInited() - if err != nil { - t.Fatalf("failed to start initialized peer: %v", err) - } - defer tester.stream.Close() - - // Fetch the peer instance for later inspection - tester.client.peerMu.RLock() - if peers := len(tester.client.peers); peers != 1 { - t.Fatalf("peer pool size mismatch: have %v, want %v", peers, 1) - } - var peer *peer - for peer, _ = range tester.client.peers { - break - } - tester.client.peerMu.RUnlock() - - // Construct a message and pass it through the tester - message := NewMessage([]byte("peer test message")) - envelope, err := message.Wrap(DefaultPoW, Options{ - TTL: time.Second, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - if err := tester.client.Send(envelope); err != nil { - t.Fatalf("failed to send message: %v", err) - } - payload := []interface{}{envelope} - if err := p2p.ExpectMsg(tester.stream, messagesCode, payload); err != nil { - // A premature empty message may have been broadcast, check the next too - if err := p2p.ExpectMsg(tester.stream, messagesCode, payload); err != nil { - t.Fatalf("message mismatch: %v", err) - } - } - // Check that the message is inside the cache - if !peer.known.Has(envelope.Hash()) { - t.Fatalf("message not found in cache") - } - // Discard messages until expiration and check cache again - exp := time.Now().Add(time.Second + 2*expirationCycle + 100*time.Millisecond) - for time.Now().Before(exp) { - if err := p2p.ExpectMsg(tester.stream, messagesCode, []interface{}{}); err != nil { - t.Fatalf("message mismatch: %v", err) - } - } - if peer.known.Has(envelope.Hash()) { - t.Fatalf("message not expired from cache") - } -} diff --git a/whisper/topic.go b/whisper/topic.go deleted file mode 100644 index 3dec788a59ca3..0000000000000 --- a/whisper/topic.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the Whisper protocol Topic element. For formal details please see -// the specs at https://github.com/expanse-org/wiki/wiki/Whisper-PoC-1-Protocol-Spec#topics. - -package whisper - -import "github.com/expanse-org/go-expanse/crypto" - -// Topic represents a cryptographically secure, probabilistic partial -// classifications of a message, determined as the first (left) 4 bytes of the -// SHA3 hash of some arbitrary data given by the original author of the message. -type Topic [4]byte - -// NewTopic creates a topic from the 4 byte prefix of the SHA3 hash of the data. -// -// Note, empty topics are considered the wildcard, and cannot be used in messages. -func NewTopic(data []byte) Topic { - prefix := [4]byte{} - copy(prefix[:], crypto.Keccak256(data)[:4]) - return Topic(prefix) -} - -// NewTopics creates a list of topics from a list of binary data elements, by -// iteratively calling NewTopic on each of them. -func NewTopics(data ...[]byte) []Topic { - topics := make([]Topic, len(data)) - for i, element := range data { - topics[i] = NewTopic(element) - } - return topics -} - -// NewTopicFromString creates a topic using the binary data contents of the -// specified string. -func NewTopicFromString(data string) Topic { - return NewTopic([]byte(data)) -} - -// NewTopicsFromStrings creates a list of topics from a list of textual data -// elements, by iteratively calling NewTopicFromString on each of them. -func NewTopicsFromStrings(data ...string) []Topic { - topics := make([]Topic, len(data)) - for i, element := range data { - topics[i] = NewTopicFromString(element) - } - return topics -} - -// String converts a topic byte array to a string representation. -func (self *Topic) String() string { - return string(self[:]) -} - -// topicMatcher is a filter expression to verify if a list of topics contained -// in an arriving message matches some topic conditions. The topic matcher is -// built up of a list of conditions, each of which must be satisfied by the -// corresponding topic in the message. Each condition may require: a) an exact -// topic match; b) a match from a set of topics; or c) a wild-card matching all. -// -// If a message contains more topics than required by the matcher, those beyond -// the condition count are ignored and assumed to match. -// -// Consider the following sample topic matcher: -// sample := { -// {TopicA1, TopicA2, TopicA3}, -// {TopicB}, -// nil, -// {TopicD1, TopicD2} -// } -// In order for a message to pass this filter, it should enumerate at least 4 -// topics, the first any of [TopicA1, TopicA2, TopicA3], the second mandatory -// "TopicB", the third is ignored by the filter and the fourth either "TopicD1" -// or "TopicD2". If the message contains further topics, the filter will match -// them too. -type topicMatcher struct { - conditions []map[Topic]struct{} -} - -// newTopicMatcher create a topic matcher from a list of topic conditions. -func newTopicMatcher(topics ...[]Topic) *topicMatcher { - matcher := make([]map[Topic]struct{}, len(topics)) - for i, condition := range topics { - matcher[i] = make(map[Topic]struct{}) - for _, topic := range condition { - matcher[i][topic] = struct{}{} - } - } - return &topicMatcher{conditions: matcher} -} - -// newTopicMatcherFromBinary create a topic matcher from a list of binary conditions. -func newTopicMatcherFromBinary(data ...[][]byte) *topicMatcher { - topics := make([][]Topic, len(data)) - for i, condition := range data { - topics[i] = NewTopics(condition...) - } - return newTopicMatcher(topics...) -} - -// newTopicMatcherFromStrings creates a topic matcher from a list of textual -// conditions. -func newTopicMatcherFromStrings(data ...[]string) *topicMatcher { - topics := make([][]Topic, len(data)) - for i, condition := range data { - topics[i] = NewTopicsFromStrings(condition...) - } - return newTopicMatcher(topics...) -} - -// Matches checks if a list of topics matches this particular condition set. -func (self *topicMatcher) Matches(topics []Topic) bool { - // Mismatch if there aren't enough topics - if len(self.conditions) > len(topics) { - return false - } - // Check each topic condition for existence (skip wild-cards) - for i := 0; i < len(topics) && i < len(self.conditions); i++ { - if len(self.conditions[i]) > 0 { - if _, ok := self.conditions[i][topics[i]]; !ok { - return false - } - } - } - return true -} diff --git a/whisper/whisper.go b/whisper/whisper.go deleted file mode 100644 index 9e0594ffe3a59..0000000000000 --- a/whisper/whisper.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package whisper - -import ( - "crypto/ecdsa" - "sync" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/crypto/ecies" - "github.com/expanse-org/go-expanse/event/filter" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rpc" - "gopkg.in/fatih/set.v0" -) - -const ( - statusCode = 0x00 - messagesCode = 0x01 - - protocolVersion uint64 = 0x02 - protocolName = "shh" - - signatureFlag = byte(1 << 7) - signatureLength = 65 - - expirationCycle = 800 * time.Millisecond - transmissionCycle = 300 * time.Millisecond -) - -const ( - DefaultTTL = 50 * time.Second - DefaultPoW = 50 * time.Millisecond -) - -type MessageEvent struct { - To *ecdsa.PrivateKey - From *ecdsa.PublicKey - Message *Message -} - -// Whisper represents a dark communication interface through the Expanse -// network, using its very own P2P communication layer. -type Whisper struct { - protocol p2p.Protocol - filters *filter.Filters - - keys map[string]*ecdsa.PrivateKey - - messages map[common.Hash]*Envelope // Pool of messages currently tracked by this node - expirations map[uint32]*set.SetNonTS // Message expiration pool (TODO: something lighter) - poolMu sync.RWMutex // Mutex to sync the message and expiration pools - - peers map[*peer]struct{} // Set of currently active peers - peerMu sync.RWMutex // Mutex to sync the active peer set - - quit chan struct{} -} - -// New creates a Whisper client ready to communicate through the Expanse P2P -// network. -func New() *Whisper { - whisper := &Whisper{ - filters: filter.New(), - keys: make(map[string]*ecdsa.PrivateKey), - messages: make(map[common.Hash]*Envelope), - expirations: make(map[uint32]*set.SetNonTS), - peers: make(map[*peer]struct{}), - quit: make(chan struct{}), - } - whisper.filters.Start() - - // p2p whisper sub protocol handler - whisper.protocol = p2p.Protocol{ - Name: protocolName, - Version: uint(protocolVersion), - Length: 2, - Run: whisper.handlePeer, - } - - return whisper -} - -// APIs returns the RPC descriptors the Whisper implementation offers -func (s *Whisper) APIs() []rpc.API { - return []rpc.API{ - { - Namespace: "shh", - Version: "1.0", - Service: NewPublicWhisperAPI(s), - Public: true, - }, - } -} - -// Protocols returns the whisper sub-protocols ran by this particular client. -func (self *Whisper) Protocols() []p2p.Protocol { - return []p2p.Protocol{self.protocol} -} - -// Version returns the whisper sub-protocols version number. -func (self *Whisper) Version() uint { - return self.protocol.Version -} - -// NewIdentity generates a new cryptographic identity for the client, and injects -// it into the known identities for message decryption. -func (self *Whisper) NewIdentity() *ecdsa.PrivateKey { - key, err := crypto.GenerateKey() - if err != nil { - panic(err) - } - self.keys[string(crypto.FromECDSAPub(&key.PublicKey))] = key - - return key -} - -// HasIdentity checks if the the whisper node is configured with the private key -// of the specified public pair. -func (self *Whisper) HasIdentity(key *ecdsa.PublicKey) bool { - return self.keys[string(crypto.FromECDSAPub(key))] != nil -} - -// GetIdentity retrieves the private key of the specified public identity. -func (self *Whisper) GetIdentity(key *ecdsa.PublicKey) *ecdsa.PrivateKey { - return self.keys[string(crypto.FromECDSAPub(key))] -} - -// Watch installs a new message handler to run in case a matching packet arrives -// from the whisper network. -func (self *Whisper) Watch(options Filter) int { - filter := filterer{ - to: string(crypto.FromECDSAPub(options.To)), - from: string(crypto.FromECDSAPub(options.From)), - matcher: newTopicMatcher(options.Topics...), - fn: func(data interface{}) { - options.Fn(data.(*Message)) - }, - } - return self.filters.Install(filter) -} - -// Unwatch removes an installed message handler. -func (self *Whisper) Unwatch(id int) { - self.filters.Uninstall(id) -} - -// Send injects a message into the whisper send queue, to be distributed in the -// network in the coming cycles. -func (self *Whisper) Send(envelope *Envelope) error { - return self.add(envelope) -} - -// Start implements node.Service, starting the background data propagation thread -// of the Whisper protocol. -func (self *Whisper) Start(*p2p.Server) error { - glog.V(logger.Info).Infoln("Whisper started") - go self.update() - return nil -} - -// Stop implements node.Service, stopping the background data propagation thread -// of the Whisper protocol. -func (self *Whisper) Stop() error { - close(self.quit) - glog.V(logger.Info).Infoln("Whisper stopped") - return nil -} - -// Messages retrieves all the currently pooled messages matching a filter id. -func (self *Whisper) Messages(id int) []*Message { - messages := make([]*Message, 0) - if filter := self.filters.Get(id); filter != nil { - for _, envelope := range self.messages { - if message := self.open(envelope); message != nil { - if self.filters.Match(filter, createFilter(message, envelope.Topics)) { - messages = append(messages, message) - } - } - } - } - return messages -} - -// handlePeer is called by the underlying P2P layer when the whisper sub-protocol -// connection is negotiated. -func (self *Whisper) handlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error { - // Create the new peer and start tracking it - whisperPeer := newPeer(self, peer, rw) - - self.peerMu.Lock() - self.peers[whisperPeer] = struct{}{} - self.peerMu.Unlock() - - defer func() { - self.peerMu.Lock() - delete(self.peers, whisperPeer) - self.peerMu.Unlock() - }() - - // Run the peer handshake and state updates - if err := whisperPeer.handshake(); err != nil { - return err - } - whisperPeer.start() - defer whisperPeer.stop() - - // Read and process inbound messages directly to merge into client-global state - for { - // Fetch the next packet and decode the contained envelopes - packet, err := rw.ReadMsg() - if err != nil { - return err - } - var envelopes []*Envelope - if err := packet.Decode(&envelopes); err != nil { - glog.V(logger.Info).Infof("%v: failed to decode envelope: %v", peer, err) - continue - } - // Inject all envelopes into the internal pool - for _, envelope := range envelopes { - if err := self.add(envelope); err != nil { - // TODO Punish peer here. Invalid envelope. - glog.V(logger.Debug).Infof("%v: failed to pool envelope: %v", peer, err) - } - whisperPeer.mark(envelope) - } - } -} - -// add inserts a new envelope into the message pool to be distributed within the -// whisper network. It also inserts the envelope into the expiration pool at the -// appropriate time-stamp. -func (self *Whisper) add(envelope *Envelope) error { - self.poolMu.Lock() - defer self.poolMu.Unlock() - - // short circuit when a received envelope has already expired - if envelope.Expiry < uint32(time.Now().Unix()) { - return nil - } - - // Insert the message into the tracked pool - hash := envelope.Hash() - if _, ok := self.messages[hash]; ok { - glog.V(logger.Detail).Infof("whisper envelope already cached: %x\n", envelope) - return nil - } - self.messages[hash] = envelope - - // Insert the message into the expiration pool for later removal - if self.expirations[envelope.Expiry] == nil { - self.expirations[envelope.Expiry] = set.NewNonTS() - } - if !self.expirations[envelope.Expiry].Has(hash) { - self.expirations[envelope.Expiry].Add(hash) - - // Notify the local node of a message arrival - go self.postEvent(envelope) - } - glog.V(logger.Detail).Infof("cached whisper envelope %x\n", envelope) - return nil -} - -// postEvent opens an envelope with the configured identities and delivers the -// message upstream from application processing. -func (self *Whisper) postEvent(envelope *Envelope) { - if message := self.open(envelope); message != nil { - self.filters.Notify(createFilter(message, envelope.Topics), message) - } -} - -// open tries to decrypt a whisper envelope with all the configured identities, -// returning the decrypted message and the key used to achieve it. If not keys -// are configured, open will return the payload as if non encrypted. -func (self *Whisper) open(envelope *Envelope) *Message { - // Short circuit if no identity is set, and assume clear-text - if len(self.keys) == 0 { - if message, err := envelope.Open(nil); err == nil { - return message - } - } - // Iterate over the keys and try to decrypt the message - for _, key := range self.keys { - message, err := envelope.Open(key) - if err == nil { - message.To = &key.PublicKey - return message - } else if err == ecies.ErrInvalidPublicKey { - return message - } - } - // Failed to decrypt, don't return anything - return nil -} - -// createFilter creates a message filter to check against installed handlers. -func createFilter(message *Message, topics []Topic) filter.Filter { - matcher := make([][]Topic, len(topics)) - for i, topic := range topics { - matcher[i] = []Topic{topic} - } - return filterer{ - to: string(crypto.FromECDSAPub(message.To)), - from: string(crypto.FromECDSAPub(message.Recover())), - matcher: newTopicMatcher(matcher...), - } -} - -// update loops until the lifetime of the whisper node, updating its internal -// state by expiring stale messages from the pool. -func (self *Whisper) update() { - // Start a ticker to check for expirations - expire := time.NewTicker(expirationCycle) - - // Repeat updates until termination is requested - for { - select { - case <-expire.C: - self.expire() - - case <-self.quit: - return - } - } -} - -// expire iterates over all the expiration timestamps, removing all stale -// messages from the pools. -func (self *Whisper) expire() { - self.poolMu.Lock() - defer self.poolMu.Unlock() - - now := uint32(time.Now().Unix()) - for then, hashSet := range self.expirations { - // Short circuit if a future time - if then > now { - continue - } - // Dump all expired messages and remove timestamp - hashSet.Each(func(v interface{}) bool { - delete(self.messages, v.(common.Hash)) - return true - }) - self.expirations[then].Clear() - } -} - -// envelopes retrieves all the messages currently pooled by the node. -func (self *Whisper) envelopes() []*Envelope { - self.poolMu.RLock() - defer self.poolMu.RUnlock() - - envelopes := make([]*Envelope, 0, len(self.messages)) - for _, envelope := range self.messages { - envelopes = append(envelopes, envelope) - } - return envelopes -} diff --git a/whisper/whisper_test.go b/whisper/whisper_test.go deleted file mode 100644 index 59ba94621049e..0000000000000 --- a/whisper/whisper_test.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package whisper - -import ( - "testing" - "time" - - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/p2p/discover" -) - -func startTestCluster(n int) []*Whisper { - // Create the batch of simulated peers - nodes := make([]*p2p.Peer, n) - for i := 0; i < n; i++ { - nodes[i] = p2p.NewPeer(discover.NodeID{}, "", nil) - } - whispers := make([]*Whisper, n) - for i := 0; i < n; i++ { - whispers[i] = New() - whispers[i].Start(nil) - } - // Wire all the peers to the root one - for i := 1; i < n; i++ { - src, dst := p2p.MsgPipe() - - go whispers[0].handlePeer(nodes[i], src) - go whispers[i].handlePeer(nodes[0], dst) - } - return whispers -} - -func TestSelfMessage(t *testing.T) { - // Start the single node cluster - client := startTestCluster(1)[0] - - // Start watching for self messages, signal any arrivals - self := client.NewIdentity() - done := make(chan struct{}) - - client.Watch(Filter{ - To: &self.PublicKey, - Fn: func(msg *Message) { - close(done) - }, - }) - // Send a dummy message to oneself - msg := NewMessage([]byte("self whisper")) - envelope, err := msg.Wrap(DefaultPoW, Options{ - From: self, - To: &self.PublicKey, - TTL: DefaultTTL, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - // Dump the message into the system and wait for it to pop back out - if err := client.Send(envelope); err != nil { - t.Fatalf("failed to send self-message: %v", err) - } - select { - case <-done: - case <-time.After(time.Second): - t.Fatalf("self-message receive timeout") - } -} - -func TestDirectMessage(t *testing.T) { - // Start the sender-recipient cluster - cluster := startTestCluster(2) - - sender := cluster[0] - senderId := sender.NewIdentity() - - recipient := cluster[1] - recipientId := recipient.NewIdentity() - - // Watch for arriving messages on the recipient - done := make(chan struct{}) - recipient.Watch(Filter{ - To: &recipientId.PublicKey, - Fn: func(msg *Message) { - close(done) - }, - }) - // Send a dummy message from the sender - msg := NewMessage([]byte("direct whisper")) - envelope, err := msg.Wrap(DefaultPoW, Options{ - From: senderId, - To: &recipientId.PublicKey, - TTL: DefaultTTL, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - if err := sender.Send(envelope); err != nil { - t.Fatalf("failed to send direct message: %v", err) - } - // Wait for an arrival or a timeout - select { - case <-done: - case <-time.After(time.Second): - t.Fatalf("direct message receive timeout") - } -} - -func TestAnonymousBroadcast(t *testing.T) { - testBroadcast(true, t) -} - -func TestIdentifiedBroadcast(t *testing.T) { - testBroadcast(false, t) -} - -func testBroadcast(anonymous bool, t *testing.T) { - // Start the single sender multi recipient cluster - cluster := startTestCluster(3) - - sender := cluster[1] - targets := cluster[1:] - for _, target := range targets { - if !anonymous { - target.NewIdentity() - } - } - // Watch for arriving messages on the recipients - dones := make([]chan struct{}, len(targets)) - for i := 0; i < len(targets); i++ { - done := make(chan struct{}) // need for the closure - dones[i] = done - - targets[i].Watch(Filter{ - Topics: NewFilterTopicsFromStringsFlat("broadcast topic"), - Fn: func(msg *Message) { - close(done) - }, - }) - } - // Send a dummy message from the sender - msg := NewMessage([]byte("broadcast whisper")) - envelope, err := msg.Wrap(DefaultPoW, Options{ - Topics: NewTopicsFromStrings("broadcast topic"), - TTL: DefaultTTL, - }) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - if err := sender.Send(envelope); err != nil { - t.Fatalf("failed to send broadcast message: %v", err) - } - // Wait for an arrival on each recipient, or timeouts - timeout := time.After(time.Second) - for _, done := range dones { - select { - case <-done: - case <-timeout: - t.Fatalf("broadcast message receive timeout") - } - } -} - -func TestMessageExpiration(t *testing.T) { - // Start the single node cluster and inject a dummy message - node := startTestCluster(1)[0] - - message := NewMessage([]byte("expiring message")) - envelope, err := message.Wrap(DefaultPoW, Options{TTL: time.Second}) - if err != nil { - t.Fatalf("failed to wrap message: %v", err) - } - if err := node.Send(envelope); err != nil { - t.Fatalf("failed to inject message: %v", err) - } - // Check that the message is inside the cache - node.poolMu.RLock() - _, found := node.messages[envelope.Hash()] - node.poolMu.RUnlock() - - if !found { - t.Fatalf("message not found in cache") - } - // Wait for expiration and check cache again - time.Sleep(time.Second) // wait for expiration - time.Sleep(2 * expirationCycle) // wait for cleanup cycle - - node.poolMu.RLock() - _, found = node.messages[envelope.Hash()] - node.poolMu.RUnlock() - if found { - t.Fatalf("message not expired from cache") - } - - // Check that adding an expired envelope doesn't do anything. - node.add(envelope) - node.poolMu.RLock() - _, found = node.messages[envelope.Hash()] - node.poolMu.RUnlock() - if found { - t.Fatalf("message was added to cache") - } -} diff --git a/xeth/types.go b/xeth/types.go deleted file mode 100644 index c2dc74a7130b7..0000000000000 --- a/xeth/types.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -package xeth - -import ( - "bytes" - "fmt" - "math/big" - "strings" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/p2p" - "github.com/expanse-org/go-expanse/rlp" -) - -type Object struct { - *state.StateObject -} - -func NewObject(state *state.StateObject) *Object { - return &Object{state} -} - -func (self *Object) StorageString(str string) []byte { - if common.IsHex(str) { - return self.storage(common.Hex2Bytes(str[2:])) - } else { - return self.storage(common.RightPadBytes([]byte(str), 32)) - } -} - -func (self *Object) StorageValue(addr *common.Value) []byte { - return self.storage(addr.Bytes()) -} - -func (self *Object) storage(addr []byte) []byte { - return self.StateObject.GetState(common.BytesToHash(addr)).Bytes() -} - -func (self *Object) Storage() (storage map[string]string) { - storage = make(map[string]string) - - it := self.StateObject.Trie().Iterator() - for it.Next() { - var data []byte - rlp.Decode(bytes.NewReader(it.Value), &data) - storage[common.ToHex(self.Trie().GetKey(it.Key))] = common.ToHex(data) - } - - return -} - -// Block interface exposed to QML -type Block struct { - //Transactions string `json:"transactions"` - ref *types.Block - Size string `json:"size"` - Number int `json:"number"` - Hash string `json:"hash"` - Transactions *common.List `json:"transactions"` - Uncles *common.List `json:"uncles"` - Time *big.Int `json:"time"` - Coinbase string `json:"coinbase"` - Name string `json:"name"` - GasLimit string `json:"gasLimit"` - GasUsed string `json:"gasUsed"` - PrevHash string `json:"prevHash"` - Bloom string `json:"bloom"` - Raw string `json:"raw"` -} - -// Creates a new QML Block from a chain block -func NewBlock(block *types.Block) *Block { - if block == nil { - return &Block{} - } - - ptxs := make([]*Transaction, len(block.Transactions())) - /* - for i, tx := range block.Transactions() { - ptxs[i] = NewTx(tx) - } - */ - txlist := common.NewList(ptxs) - - puncles := make([]*Block, len(block.Uncles())) - /* - for i, uncle := range block.Uncles() { - puncles[i] = NewBlock(types.NewBlockWithHeader(uncle)) - } - */ - ulist := common.NewList(puncles) - - return &Block{ - ref: block, Size: block.Size().String(), - Number: int(block.NumberU64()), GasUsed: block.GasUsed().String(), - GasLimit: block.GasLimit().String(), Hash: block.Hash().Hex(), - Transactions: txlist, Uncles: ulist, - Time: block.Time(), - Coinbase: block.Coinbase().Hex(), - PrevHash: block.ParentHash().Hex(), - Bloom: common.ToHex(block.Bloom().Bytes()), - Raw: block.String(), - } -} - -func (self *Block) ToString() string { - if self.ref != nil { - return self.ref.String() - } - - return "" -} - -func (self *Block) GetTransaction(hash string) *Transaction { - tx := self.ref.Transaction(common.HexToHash(hash)) - if tx == nil { - return nil - } - - return NewTx(tx) -} - -type Transaction struct { - ref *types.Transaction - - Value string `json:"value"` - Gas string `json:"gas"` - GasPrice string `json:"gasPrice"` - Hash string `json:"hash"` - Address string `json:"address"` - Sender string `json:"sender"` - RawData string `json:"rawData"` - Data string `json:"data"` - Contract bool `json:"isContract"` - CreatesContract bool `json:"createsContract"` - Confirmations int `json:"confirmations"` -} - -func NewTx(tx *types.Transaction) *Transaction { - sender, err := tx.From() - if err != nil { - return nil - } - hash := tx.Hash().Hex() - - var receiver string - if to := tx.To(); to != nil { - receiver = to.Hex() - } else { - from, _ := tx.From() - receiver = crypto.CreateAddress(from, tx.Nonce()).Hex() - } - createsContract := core.MessageCreatesContract(tx) - - var data string - if createsContract { - data = strings.Join(core.Disassemble(tx.Data()), "\n") - } else { - data = common.ToHex(tx.Data()) - } - - return &Transaction{ref: tx, Hash: hash, Value: common.CurrencyToString(tx.Value()), Address: receiver, Contract: createsContract, Gas: tx.Gas().String(), GasPrice: tx.GasPrice().String(), Data: data, Sender: sender.Hex(), CreatesContract: createsContract, RawData: common.ToHex(tx.Data())} -} - -func (self *Transaction) ToString() string { - return self.ref.String() -} - -type PReceipt struct { - CreatedContract bool `json:"createdContract"` - Address string `json:"address"` - Hash string `json:"hash"` - Sender string `json:"sender"` -} - -func NewPReciept(contractCreation bool, creationAddress, hash, address []byte) *PReceipt { - return &PReceipt{ - contractCreation, - common.ToHex(creationAddress), - common.ToHex(hash), - common.ToHex(address), - } -} - -// Peer interface exposed to QML - -type Peer struct { - ref *p2p.Peer - Ip string `json:"ip"` - Version string `json:"version"` - Caps string `json:"caps"` -} - -func NewPeer(peer *p2p.Peer) *Peer { - var caps []string - for _, cap := range peer.Caps() { - caps = append(caps, fmt.Sprintf("%s/%d", cap.Name, cap.Version)) - } - - return &Peer{ - ref: peer, - Ip: fmt.Sprintf("%v", peer.RemoteAddr()), - Version: fmt.Sprintf("%v", peer.ID()), - Caps: fmt.Sprintf("%v", caps), - } -} - -type Receipt struct { - CreatedContract bool `json:"createdContract"` - Address string `json:"address"` - Hash string `json:"hash"` - Sender string `json:"sender"` -} - -func NewReciept(contractCreation bool, creationAddress, hash, address []byte) *Receipt { - return &Receipt{ - contractCreation, - common.ToHex(creationAddress), - common.ToHex(hash), - common.ToHex(address), - } -} diff --git a/xeth/whisper.go b/xeth/whisper.go deleted file mode 100644 index f9aceda8cf865..0000000000000 --- a/xeth/whisper.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the external API to the whisper sub-protocol. - -package xeth - -import ( - "fmt" - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/whisper" -) - -var qlogger = logger.NewLogger("XSHH") - -// Whisper represents the API wrapper around the internal whisper implementation. -type Whisper struct { - *whisper.Whisper -} - -// NewWhisper wraps an internal whisper client into an external API version. -func NewWhisper(w *whisper.Whisper) *Whisper { - return &Whisper{w} -} - -// NewIdentity generates a new cryptographic identity for the client, and injects -// it into the known identities for message decryption. -func (self *Whisper) NewIdentity() string { - identity := self.Whisper.NewIdentity() - return common.ToHex(crypto.FromECDSAPub(&identity.PublicKey)) -} - -// HasIdentity checks if the the whisper node is configured with the private key -// of the specified public pair. -func (self *Whisper) HasIdentity(key string) bool { - return self.Whisper.HasIdentity(crypto.ToECDSAPub(common.FromHex(key))) -} - -// Post injects a message into the whisper network for distribution. -func (self *Whisper) Post(payload string, to, from string, topics []string, priority, ttl uint32) error { - // Decode the topic strings - topicsDecoded := make([][]byte, len(topics)) - for i, topic := range topics { - topicsDecoded[i] = common.FromHex(topic) - } - // Construct the whisper message and transmission options - message := whisper.NewMessage(common.FromHex(payload)) - options := whisper.Options{ - To: crypto.ToECDSAPub(common.FromHex(to)), - TTL: time.Duration(ttl) * time.Second, - Topics: whisper.NewTopics(topicsDecoded...), - } - if len(from) != 0 { - if key := self.Whisper.GetIdentity(crypto.ToECDSAPub(common.FromHex(from))); key != nil { - options.From = key - } else { - return fmt.Errorf("unknown identity to send from: %s", from) - } - } - // Wrap and send the message - pow := time.Duration(priority) * time.Millisecond - envelope, err := message.Wrap(pow, options) - if err != nil { - return err - } - if err := self.Whisper.Send(envelope); err != nil { - return err - } - return nil -} - -// Watch installs a new message handler to run in case a matching packet arrives -// from the whisper network. -func (self *Whisper) Watch(to, from string, topics [][]string, fn func(WhisperMessage)) int { - // Decode the topic strings - topicsDecoded := make([][][]byte, len(topics)) - for i, condition := range topics { - topicsDecoded[i] = make([][]byte, len(condition)) - for j, topic := range condition { - topicsDecoded[i][j] = common.FromHex(topic) - } - } - // Assemble and inject the filter into the whisper client - filter := whisper.Filter{ - To: crypto.ToECDSAPub(common.FromHex(to)), - From: crypto.ToECDSAPub(common.FromHex(from)), - Topics: whisper.NewFilterTopics(topicsDecoded...), - } - filter.Fn = func(message *whisper.Message) { - fn(NewWhisperMessage(message)) - } - return self.Whisper.Watch(filter) -} - -// Messages retrieves all the currently pooled messages matching a filter id. -func (self *Whisper) Messages(id int) []WhisperMessage { - pool := self.Whisper.Messages(id) - - messages := make([]WhisperMessage, len(pool)) - for i, message := range pool { - messages[i] = NewWhisperMessage(message) - } - return messages -} diff --git a/xeth/whisper_filter.go b/xeth/whisper_filter.go deleted file mode 100644 index a3bc6b5bc8a17..0000000000000 --- a/xeth/whisper_filter.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the external API side message filter for watching, pooling and polling -// matched whisper messages, also serializing data access to avoid duplications. - -package xeth - -import ( - "sync" - "time" - - "github.com/expanse-org/go-expanse/common" -) - -// whisperFilter is the message cache matching a specific filter, accumulating -// inbound messages until the are requested by the client. -type whisperFilter struct { - id int // Filter identifier for old message retrieval - ref *Whisper // Whisper reference for old message retrieval - - cache []WhisperMessage // Cache of messages not yet polled - skip map[common.Hash]struct{} // List of retrieved messages to avoid duplication - update time.Time // Time of the last message query - - lock sync.RWMutex // Lock protecting the filter internals -} - -// newWhisperFilter creates a new serialized, poll based whisper topic filter. -func newWhisperFilter(id int, ref *Whisper) *whisperFilter { - return &whisperFilter{ - id: id, - ref: ref, - - update: time.Now(), - skip: make(map[common.Hash]struct{}), - } -} - -// messages retrieves all the cached messages from the entire pool matching the -// filter, resetting the filter's change buffer. -func (w *whisperFilter) messages() []WhisperMessage { - w.lock.Lock() - defer w.lock.Unlock() - - w.cache = nil - w.update = time.Now() - - w.skip = make(map[common.Hash]struct{}) - messages := w.ref.Messages(w.id) - for _, message := range messages { - w.skip[message.ref.Hash] = struct{}{} - } - return messages -} - -// insert injects a new batch of messages into the filter cache. -func (w *whisperFilter) insert(messages ...WhisperMessage) { - w.lock.Lock() - defer w.lock.Unlock() - - for _, message := range messages { - if _, ok := w.skip[message.ref.Hash]; !ok { - w.cache = append(w.cache, messages...) - } - } -} - -// retrieve fetches all the cached messages from the filter. -func (w *whisperFilter) retrieve() (messages []WhisperMessage) { - w.lock.Lock() - defer w.lock.Unlock() - - messages, w.cache = w.cache, nil - w.update = time.Now() - - return -} - -// activity returns the last time instance when client requests were executed on -// the filter. -func (w *whisperFilter) activity() time.Time { - w.lock.RLock() - defer w.lock.RUnlock() - - return w.update -} diff --git a/xeth/whisper_message.go b/xeth/whisper_message.go deleted file mode 100644 index 2fec991c3957e..0000000000000 --- a/xeth/whisper_message.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015 The go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Contains the external API representation of a whisper message. - -package xeth - -import ( - "time" - - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/whisper" -) - -// WhisperMessage is the external API representation of a whisper.Message. -type WhisperMessage struct { - ref *whisper.Message - - Payload string `json:"payload"` - To string `json:"to"` - From string `json:"from"` - Sent int64 `json:"sent"` - TTL int64 `json:"ttl"` - Hash string `json:"hash"` -} - -// NewWhisperMessage converts an internal message into an API version. -func NewWhisperMessage(message *whisper.Message) WhisperMessage { - return WhisperMessage{ - ref: message, - - Payload: common.ToHex(message.Payload), - From: common.ToHex(crypto.FromECDSAPub(message.Recover())), - To: common.ToHex(crypto.FromECDSAPub(message.To)), - Sent: message.Sent.Unix(), - TTL: int64(message.TTL / time.Second), - Hash: common.ToHex(message.Hash.Bytes()), - } -} diff --git a/xeth/xeth.go b/xeth/xeth.go deleted file mode 100644 index f0d38b44cf168..0000000000000 --- a/xeth/xeth.go +++ /dev/null @@ -1,1110 +0,0 @@ -// Copyright 2014 The go-ethereum Authors && Copyright 2015 go-expanse Authors -// This file is part of the go-expanse library. -// -// The go-expanse library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-expanse library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-expanse library. If not, see . - -// Package xeth is the interface to all Expanse functionality. -package xeth - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math/big" - "regexp" - "sync" - "time" - - "github.com/expanse-org/go-expanse/accounts" - "github.com/expanse-org/go-expanse/common" - "github.com/expanse-org/go-expanse/common/compiler" - "github.com/expanse-org/go-expanse/core" - "github.com/expanse-org/go-expanse/core/state" - "github.com/expanse-org/go-expanse/core/types" - "github.com/expanse-org/go-expanse/core/vm" - "github.com/expanse-org/go-expanse/crypto" - "github.com/expanse-org/go-expanse/exp" - "github.com/expanse-org/go-expanse/exp/filters" - "github.com/expanse-org/go-expanse/logger" - "github.com/expanse-org/go-expanse/logger/glog" - "github.com/expanse-org/go-expanse/miner" - "github.com/expanse-org/go-expanse/rlp" -) - -var ( - filterTickerTime = 5 * time.Minute - defaultGasPrice = big.NewInt(10000000000000) //150000000000 - defaultGas = big.NewInt(90000) //500000 - dappStorePre = []byte("dapp-") - addrReg = regexp.MustCompile(`^(0x)?[a-fA-F0-9]{40}$`) -) - -// byte will be inferred -const ( - UnknownFilterTy = iota - BlockFilterTy - TransactionFilterTy - LogFilterTy -) - -type XEth struct { - quit chan struct{} - - logMu sync.RWMutex - logQueue map[int]*logQueue - - blockMu sync.RWMutex - blockQueue map[int]*hashQueue - - transactionMu sync.RWMutex - transactionQueue map[int]*hashQueue - - messagesMu sync.RWMutex - messages map[int]*whisperFilter - - transactMu sync.Mutex - - // read-only fields - backend *exp.Expanse - frontend Frontend - agent *miner.RemoteAgent - gpo *exp.GasPriceOracle - state *State - whisper *Whisper - filterManager *filters.FilterSystem -} - -func NewTest(exp *exp.Expanse, frontend Frontend) *XEth { - return &XEth{backend: exp, frontend: frontend} -} - -// New creates an XEth that uses the given frontend. -// If a nil Frontend is provided, a default frontend which -// confirms all transactions will be used. -func New(expanse *exp.Expanse, frontend Frontend) *XEth { - xeth := &XEth{ - backend: expanse, - frontend: frontend, - quit: make(chan struct{}), - filterManager: filters.NewFilterSystem(expanse.EventMux()), - logQueue: make(map[int]*logQueue), - blockQueue: make(map[int]*hashQueue), - transactionQueue: make(map[int]*hashQueue), - messages: make(map[int]*whisperFilter), - agent: miner.NewRemoteAgent(), - gpo: exp.NewGasPriceOracle(expanse), - } - if expanse.Whisper() != nil { - xexp.whisper = NewWhisper(expanse.Whisper()) - } - expanse.Miner().Register(xexp.agent) - if frontend == nil { - xexp.frontend = dummyFrontend{} - } - state, _ := xexp.backend.BlockChain().State() - xexp.state = NewState(xeth, state) - go xexp.start() - return xeth -} - -func (self *XEth) start() { - timer := time.NewTicker(2 * time.Second) - defer timer.Stop() -done: - for { - select { - case <-timer.C: - self.logMu.Lock() - for id, filter := range self.logQueue { - if time.Since(filter.timeout) > filterTickerTime { - self.filterManager.Remove(id) - delete(self.logQueue, id) - } - } - self.logMu.Unlock() - - self.blockMu.Lock() - for id, filter := range self.blockQueue { - if time.Since(filter.timeout) > filterTickerTime { - self.filterManager.Remove(id) - delete(self.blockQueue, id) - } - } - self.blockMu.Unlock() - - self.transactionMu.Lock() - for id, filter := range self.transactionQueue { - if time.Since(filter.timeout) > filterTickerTime { - self.filterManager.Remove(id) - delete(self.transactionQueue, id) - } - } - self.transactionMu.Unlock() - - self.messagesMu.Lock() - for id, filter := range self.messages { - if time.Since(filter.activity()) > filterTickerTime { - self.Whisper().Unwatch(id) - delete(self.messages, id) - } - } - self.messagesMu.Unlock() - case <-self.quit: - break done - } - } -} - -// Stop releases any resources associated with self. -// It may not be called more than once. -func (self *XEth) Stop() { - close(self.quit) - self.filterManager.Stop() - self.backend.Miner().Unregister(self.agent) -} - -func cAddress(a []string) []common.Address { - bslice := make([]common.Address, len(a)) - for i, addr := range a { - bslice[i] = common.HexToAddress(addr) - } - return bslice -} - -func cTopics(t [][]string) [][]common.Hash { - topics := make([][]common.Hash, len(t)) - for i, iv := range t { - topics[i] = make([]common.Hash, len(iv)) - for j, jv := range iv { - topics[i][j] = common.HexToHash(jv) - } - } - return topics -} - -func DefaultGas() *big.Int { return new(big.Int).Set(defaultGas) } - -func (self *XEth) DefaultGasPrice() *big.Int { - return self.gpo.SuggestPrice() -} - -func (self *XEth) RemoteMining() *miner.RemoteAgent { return self.agent } - -func (self *XEth) AtStateNum(num int64) *XEth { - var st *state.StateDB - var err error - switch num { - case -2: - st = self.backend.Miner().PendingState().Copy() - default: - if block := self.getBlockByHeight(num); block != nil { - st, err = state.New(block.Root(), self.backend.ChainDb()) - if err != nil { - return nil - } - } else { - st, err = state.New(self.backend.BlockChain().GetBlockByNumber(0).Root(), self.backend.ChainDb()) - if err != nil { - return nil - } - } - } - - return self.WithState(st) -} - -func (self *XEth) WithState(statedb *state.StateDB) *XEth { - xeth := &XEth{ - backend: self.backend, - frontend: self.frontend, - gpo: self.gpo, - } - - xexp.state = NewState(xeth, statedb) - return xeth -} - -func (self *XEth) State() *State { return self.state } - -// subscribes to new head block events and -// waits until blockchain height is greater n at any time -// given the current head, waits for the next chain event -// sets the state to the current head -// loop is async and quit by closing the channel -// used in tests and JS console debug module to control advancing private chain manually -// Note: this is not threadsafe, only called in JS single process and tests -func (self *XEth) UpdateState() (wait chan *big.Int) { - wait = make(chan *big.Int) - go func() { - eventSub := self.backend.EventMux().Subscribe(core.ChainHeadEvent{}) - defer eventSub.Unsubscribe() - - var m, n *big.Int - var ok bool - - eventCh := eventSub.Chan() - for { - select { - case event, ok := <-eventCh: - if !ok { - // Event subscription closed, set the channel to nil to stop spinning - eventCh = nil - continue - } - // A real event arrived, process if new head block assignment - if event, ok := event.Data.(core.ChainHeadEvent); ok { - m = event.Block.Number() - if n != nil && n.Cmp(m) < 0 { - wait <- n - n = nil - } - statedb, err := state.New(event.Block.Root(), self.backend.ChainDb()) - if err != nil { - glog.V(logger.Error).Infoln("Could not create new state: %v", err) - return - } - self.state = NewState(self, statedb) - } - case n, ok = <-wait: - if !ok { - return - } - } - } - }() - return -} - -func (self *XEth) Whisper() *Whisper { return self.whisper } - -func (self *XEth) getBlockByHeight(height int64) *types.Block { - var num uint64 - - switch height { - case -2: - return self.backend.Miner().PendingBlock() - case -1: - return self.CurrentBlock() - default: - if height < 0 { - return nil - } - - num = uint64(height) - } - - return self.backend.BlockChain().GetBlockByNumber(num) -} - -func (self *XEth) BlockByHash(strHash string) *Block { - hash := common.HexToHash(strHash) - block := self.backend.BlockChain().GetBlock(hash) - - return NewBlock(block) -} - -func (self *XEth) EthBlockByHash(strHash string) *types.Block { - hash := common.HexToHash(strHash) - block := self.backend.BlockChain().GetBlock(hash) - - return block -} - -func (self *XEth) EthTransactionByHash(hash string) (*types.Transaction, common.Hash, uint64, uint64) { - if tx, hash, number, index := core.GetTransaction(self.backend.ChainDb(), common.HexToHash(hash)); tx != nil { - return tx, hash, number, index - } - return self.backend.TxPool().GetTransaction(common.HexToHash(hash)), common.Hash{}, 0, 0 -} - -func (self *XEth) BlockByNumber(num int64) *Block { - return NewBlock(self.getBlockByHeight(num)) -} - -func (self *XEth) EthBlockByNumber(num int64) *types.Block { - return self.getBlockByHeight(num) -} - -func (self *XEth) Td(hash common.Hash) *big.Int { - return self.backend.BlockChain().GetTd(hash) -} - -func (self *XEth) CurrentBlock() *types.Block { - return self.backend.BlockChain().CurrentBlock() -} - -func (self *XEth) GetBlockReceipts(bhash common.Hash) types.Receipts { - return core.GetBlockReceipts(self.backend.ChainDb(), bhash) -} - -func (self *XEth) GetTxReceipt(txhash common.Hash) *types.Receipt { - return core.GetReceipt(self.backend.ChainDb(), txhash) -} - -func (self *XEth) GasLimit() *big.Int { - return self.backend.BlockChain().GasLimit() -} - -func (self *XEth) Block(v interface{}) *Block { - if n, ok := v.(int32); ok { - return self.BlockByNumber(int64(n)) - } else if str, ok := v.(string); ok { - return self.BlockByHash(str) - } else if f, ok := v.(float64); ok { // JSON numbers are represented as float64 - return self.BlockByNumber(int64(f)) - } - - return nil -} - -func (self *XEth) Accounts() []string { - // TODO: check err? - accounts, _ := self.backend.AccountManager().Accounts() - accountAddresses := make([]string, len(accounts)) - for i, ac := range accounts { - accountAddresses[i] = ac.Address.Hex() - } - return accountAddresses -} - -// accessor for solidity compiler. -// memoized if available, retried on-demand if not -func (self *XEth) Solc() (*compiler.Solidity, error) { - return self.backend.Solc() -} - -// set in js console via admin interface or wrapper from cli flags -func (self *XEth) SetSolc(solcPath string) (*compiler.Solidity, error) { - self.backend.SetSolc(solcPath) - return self.Solc() -} - -// store DApp value in extra database -func (self *XEth) DbPut(key, val []byte) bool { - self.backend.DappDb().Put(append(dappStorePre, key...), val) - return true -} - -// retrieve DApp value from extra database -func (self *XEth) DbGet(key []byte) ([]byte, error) { - val, err := self.backend.DappDb().Get(append(dappStorePre, key...)) - return val, err -} - -func (self *XEth) PeerCount() int { - return self.backend.PeerCount() -} - -func (self *XEth) IsMining() bool { - return self.backend.IsMining() -} - -func (self *XEth) HashRate() int64 { - return self.backend.Miner().HashRate() -} - -func (self *XEth) EthVersion() string { - return fmt.Sprintf("%d", self.backend.EthVersion()) -} - -func (self *XEth) NetworkVersion() string { - return fmt.Sprintf("%d", self.backend.NetVersion()) -} - -func (self *XEth) WhisperVersion() string { - return fmt.Sprintf("%d", self.backend.ShhVersion()) -} - -func (self *XEth) ClientVersion() string { - return self.backend.ClientVersion() -} - -func (self *XEth) SetMining(shouldmine bool, threads int) bool { - ismining := self.backend.IsMining() - if shouldmine && !ismining { - err := self.backend.StartMining(threads, "") - return err == nil - } - if ismining && !shouldmine { - self.backend.StopMining() - } - return self.backend.IsMining() -} - -func (self *XEth) IsListening() bool { - return self.backend.IsListening() -} - -func (self *XEth) Coinbase() string { - eb, err := self.backend.Etherbase() - if err != nil { - return "0x0" - } - return eb.Hex() -} - -func (self *XEth) NumberToHuman(balance string) string { - b := common.Big(balance) - - return common.CurrencyToString(b) -} - -func (self *XEth) StorageAt(addr, storageAddr string) string { - return self.State().state.GetState(common.HexToAddress(addr), common.HexToHash(storageAddr)).Hex() -} - -func (self *XEth) BalanceAt(addr string) string { - return common.ToHex(self.State().state.GetBalance(common.HexToAddress(addr)).Bytes()) -} - -func (self *XEth) TxCountAt(address string) int { - return int(self.State().state.GetNonce(common.HexToAddress(address))) -} - -func (self *XEth) CodeAt(address string) string { - return common.ToHex(self.State().state.GetCode(common.HexToAddress(address))) -} - -func (self *XEth) CodeAtBytes(address string) []byte { - return self.State().SafeGet(address).Code() -} - -func (self *XEth) IsContract(address string) bool { - return len(self.State().SafeGet(address).Code()) > 0 -} - -func (self *XEth) UninstallFilter(id int) bool { - defer self.filterManager.Remove(id) - - if _, ok := self.logQueue[id]; ok { - self.logMu.Lock() - defer self.logMu.Unlock() - delete(self.logQueue, id) - return true - } - if _, ok := self.blockQueue[id]; ok { - self.blockMu.Lock() - defer self.blockMu.Unlock() - delete(self.blockQueue, id) - return true - } - if _, ok := self.transactionQueue[id]; ok { - self.transactionMu.Lock() - defer self.transactionMu.Unlock() - delete(self.transactionQueue, id) - return true - } - - return false -} - -func (self *XEth) NewLogFilter(earliest, latest int64, skip, max int, address []string, topics [][]string) int { - self.logMu.Lock() - defer self.logMu.Unlock() - - filter := filters.New(self.backend.ChainDb()) - id := self.filterManager.Add(filter) - self.logQueue[id] = &logQueue{timeout: time.Now()} - - filter.SetBeginBlock(earliest) - filter.SetEndBlock(latest) - filter.SetAddresses(cAddress(address)) - filter.SetTopics(cTopics(topics)) - filter.LogsCallback = func(logs vm.Logs) { - self.logMu.Lock() - defer self.logMu.Unlock() - - if queue := self.logQueue[id]; queue != nil { - queue.add(logs...) - } - } - - return id -} - -func (self *XEth) NewTransactionFilter() int { - self.transactionMu.Lock() - defer self.transactionMu.Unlock() - - filter := filters.New(self.backend.ChainDb()) - id := self.filterManager.Add(filter) - self.transactionQueue[id] = &hashQueue{timeout: time.Now()} - - filter.TransactionCallback = func(tx *types.Transaction) { - self.transactionMu.Lock() - defer self.transactionMu.Unlock() - - if queue := self.transactionQueue[id]; queue != nil { - queue.add(tx.Hash()) - } - } - return id -} - -func (self *XEth) NewBlockFilter() int { - self.blockMu.Lock() - defer self.blockMu.Unlock() - - filter := filters.New(self.backend.ChainDb()) - id := self.filterManager.Add(filter) - self.blockQueue[id] = &hashQueue{timeout: time.Now()} - - filter.BlockCallback = func(block *types.Block, logs vm.Logs) { - self.blockMu.Lock() - defer self.blockMu.Unlock() - - if queue := self.blockQueue[id]; queue != nil { - queue.add(block.Hash()) - } - } - return id -} - -func (self *XEth) GetFilterType(id int) byte { - if _, ok := self.blockQueue[id]; ok { - return BlockFilterTy - } else if _, ok := self.transactionQueue[id]; ok { - return TransactionFilterTy - } else if _, ok := self.logQueue[id]; ok { - return LogFilterTy - } - - return UnknownFilterTy -} - -func (self *XEth) LogFilterChanged(id int) vm.Logs { - self.logMu.Lock() - defer self.logMu.Unlock() - - if self.logQueue[id] != nil { - return self.logQueue[id].get() - } - return nil -} - -func (self *XEth) BlockFilterChanged(id int) []common.Hash { - self.blockMu.Lock() - defer self.blockMu.Unlock() - - if self.blockQueue[id] != nil { - return self.blockQueue[id].get() - } - return nil -} - -func (self *XEth) TransactionFilterChanged(id int) []common.Hash { - self.blockMu.Lock() - defer self.blockMu.Unlock() - - if self.transactionQueue[id] != nil { - return self.transactionQueue[id].get() - } - return nil -} - -func (self *XEth) Logs(id int) vm.Logs { - filter := self.filterManager.Get(id) - if filter != nil { - return filter.Find() - } - - return nil -} - -func (self *XEth) AllLogs(earliest, latest int64, skip, max int, address []string, topics [][]string) vm.Logs { - filter := filters.New(self.backend.ChainDb()) - filter.SetBeginBlock(earliest) - filter.SetEndBlock(latest) - filter.SetAddresses(cAddress(address)) - filter.SetTopics(cTopics(topics)) - - return filter.Find() -} - -// NewWhisperFilter creates and registers a new message filter to watch for -// inbound whisper messages. All parameters at this point are assumed to be -// HEX encoded. -func (p *XEth) NewWhisperFilter(to, from string, topics [][]string) int { - // Pre-define the id to be filled later - var id int - - // Callback to delegate core whisper messages to this xeth filter - callback := func(msg WhisperMessage) { - p.messagesMu.RLock() // Only read lock to the filter pool - defer p.messagesMu.RUnlock() - p.messages[id].insert(msg) - } - // Initialize the core whisper filter and wrap into xeth - id = p.Whisper().Watch(to, from, topics, callback) - - p.messagesMu.Lock() - p.messages[id] = newWhisperFilter(id, p.Whisper()) - p.messagesMu.Unlock() - - return id -} - -// UninstallWhisperFilter disables and removes an existing filter. -func (p *XEth) UninstallWhisperFilter(id int) bool { - p.messagesMu.Lock() - defer p.messagesMu.Unlock() - - if _, ok := p.messages[id]; ok { - delete(p.messages, id) - return true - } - return false -} - -// WhisperMessages retrieves all the known messages that match a specific filter. -func (self *XEth) WhisperMessages(id int) []WhisperMessage { - self.messagesMu.RLock() - defer self.messagesMu.RUnlock() - - if self.messages[id] != nil { - return self.messages[id].messages() - } - return nil -} - -// WhisperMessagesChanged retrieves all the new messages matched by a filter -// since the last retrieval -func (self *XEth) WhisperMessagesChanged(id int) []WhisperMessage { - self.messagesMu.RLock() - defer self.messagesMu.RUnlock() - - if self.messages[id] != nil { - return self.messages[id].retrieve() - } - return nil -} - -// func (self *XEth) Register(args string) bool { -// self.regmut.Lock() -// defer self.regmut.Unlock() - -// if _, ok := self.register[args]; ok { -// self.register[args] = nil // register with empty -// } -// return true -// } - -// func (self *XEth) Unregister(args string) bool { -// self.regmut.Lock() -// defer self.regmut.Unlock() - -// if _, ok := self.register[args]; ok { -// delete(self.register, args) -// return true -// } - -// return false -// } - -// // TODO improve return type -// func (self *XEth) PullWatchTx(args string) []*interface{} { -// self.regmut.Lock() -// defer self.regmut.Unlock() - -// txs := self.register[args] -// self.register[args] = nil - -// return txs -// } - -type KeyVal struct { - Key string `json:"key"` - Value string `json:"value"` -} - -func (self *XEth) EachStorage(addr string) string { - var values []KeyVal - object := self.State().SafeGet(addr) - it := object.Trie().Iterator() - for it.Next() { - values = append(values, KeyVal{common.ToHex(object.Trie().GetKey(it.Key)), common.ToHex(it.Value)}) - } - - valuesJson, err := json.Marshal(values) - if err != nil { - return "" - } - - return string(valuesJson) -} - -func (self *XEth) ToAscii(str string) string { - padded := common.RightPadBytes([]byte(str), 32) - - return "0x" + common.ToHex(padded) -} - -func (self *XEth) FromAscii(str string) string { - if common.IsHex(str) { - str = str[2:] - } - - return string(bytes.Trim(common.FromHex(str), "\x00")) -} - -func (self *XEth) FromNumber(str string) string { - if common.IsHex(str) { - str = str[2:] - } - - return common.BigD(common.FromHex(str)).String() -} - -func (self *XEth) PushTx(encodedTx string) (string, error) { - tx := new(types.Transaction) - err := rlp.DecodeBytes(common.FromHex(encodedTx), tx) - if err != nil { - glog.V(logger.Error).Infoln(err) - return "", err - } - - err = self.backend.TxPool().Add(tx) - if err != nil { - return "", err - } - - if tx.To() == nil { - from, err := tx.From() - if err != nil { - return "", err - } - - addr := crypto.CreateAddress(from, tx.Nonce()) - glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr) - } else { - glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To()) - } - - return tx.Hash().Hex(), nil -} - -func (self *XEth) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, dataStr string) (string, string, error) { - statedb := self.State().State().Copy() - var from *state.StateObject - if len(fromStr) == 0 { - accounts, err := self.backend.AccountManager().Accounts() - if err != nil || len(accounts) == 0 { - from = statedb.GetOrNewStateObject(common.Address{}) - } else { - from = statedb.GetOrNewStateObject(accounts[0].Address) - } - } else { - from = statedb.GetOrNewStateObject(common.HexToAddress(fromStr)) - } - - from.SetBalance(common.MaxBig) - - msg := callmsg{ - from: from, - gas: common.Big(gasStr), - gasPrice: common.Big(gasPriceStr), - value: common.Big(valueStr), - data: common.FromHex(dataStr), - } - if len(toStr) > 0 { - addr := common.HexToAddress(toStr) - msg.to = &addr - } - - if msg.gas.Cmp(big.NewInt(0)) == 0 { - msg.gas = big.NewInt(50000000) - } - - if msg.gasPrice.Cmp(big.NewInt(0)) == 0 { - msg.gasPrice = self.DefaultGasPrice() - } - - header := self.CurrentBlock().Header() - vmenv := core.NewEnv(statedb, self.backend.BlockChain(), msg, header) - gp := new(core.GasPool).AddGas(common.MaxBig) - res, gas, err := core.ApplyMessage(vmenv, msg, gp) - return common.ToHex(res), gas.String(), err -} - -func (self *XEth) ConfirmTransaction(tx string) bool { - return self.frontend.ConfirmTransaction(tx) -} - -func (self *XEth) doSign(from common.Address, hash common.Hash, didUnlock bool) ([]byte, error) { - sig, err := self.backend.AccountManager().Sign(accounts.Account{Address: from}, hash.Bytes()) - if err == accounts.ErrLocked { - if didUnlock { - return nil, fmt.Errorf("signer account still locked after successful unlock") - } - if !self.frontend.UnlockAccount(from.Bytes()) { - return nil, fmt.Errorf("could not unlock signer account") - } - // retry signing, the account should now be unlocked. - return self.doSign(from, hash, true) - } else if err != nil { - return nil, err - } - return sig, nil -} - -func (self *XEth) Sign(fromStr, hashStr string, didUnlock bool) (string, error) { - var ( - from = common.HexToAddress(fromStr) - hash = common.HexToHash(hashStr) - ) - sig, err := self.doSign(from, hash, didUnlock) - if err != nil { - return "", err - } - return common.ToHex(sig), nil -} - -func isAddress(addr string) bool { - return addrReg.MatchString(addr) -} - -func (self *XEth) Frontend() Frontend { - return self.frontend -} - -func (self *XEth) SignTransaction(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceStr, codeStr string) (*types.Transaction, error) { - if len(toStr) > 0 && toStr != "0x" && !isAddress(toStr) { - return nil, errors.New("Invalid address") - } - - var ( - from = common.HexToAddress(fromStr) - to = common.HexToAddress(toStr) - value = common.Big(valueStr) - gas *big.Int - price *big.Int - data []byte - contractCreation bool - ) - - if len(gasStr) == 0 { - gas = DefaultGas() - } else { - gas = common.Big(gasStr) - } - - if len(gasPriceStr) == 0 { - price = self.DefaultGasPrice() - } else { - price = common.Big(gasPriceStr) - } - - data = common.FromHex(codeStr) - if len(toStr) == 0 { - contractCreation = true - } - - var nonce uint64 - if len(nonceStr) != 0 { - nonce = common.Big(nonceStr).Uint64() - } else { - state := self.backend.TxPool().State() - nonce = state.GetNonce(from) - } - var tx *types.Transaction - if contractCreation { - tx = types.NewContractCreation(nonce, value, gas, price, data) - } else { - tx = types.NewTransaction(nonce, to, value, gas, price, data) - } - - signed, err := self.sign(tx, from, false) - if err != nil { - return nil, err - } - - return signed, nil -} - -func (self *XEth) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceStr, codeStr string) (string, error) { - - // this minimalistic recoding is enough (works for natspec.js) - var jsontx = fmt.Sprintf(`{"params":[{"to":"%s","data": "%s"}]}`, toStr, codeStr) - if !self.ConfirmTransaction(jsontx) { - err := fmt.Errorf("Transaction not confirmed") - return "", err - } - - if len(toStr) > 0 && toStr != "0x" && !isAddress(toStr) { - return "", errors.New("Invalid address") - } - - var ( - from = common.HexToAddress(fromStr) - to = common.HexToAddress(toStr) - value = common.Big(valueStr) - gas *big.Int - price *big.Int - data []byte - contractCreation bool - ) - - if len(gasStr) == 0 { - gas = DefaultGas() - } else { - gas = common.Big(gasStr) - } - - if len(gasPriceStr) == 0 { - price = self.DefaultGasPrice() - } else { - price = common.Big(gasPriceStr) - } - - data = common.FromHex(codeStr) - if len(toStr) == 0 { - contractCreation = true - } - - // 2015-05-18 Is this still needed? - // TODO if no_private_key then - //if _, exists := p.register[args.From]; exists { - // p.register[args.From] = append(p.register[args.From], args) - //} else { - /* - account := accounts.Get(common.FromHex(args.From)) - if account != nil { - if account.Unlocked() { - if !unlockAccount(account) { - return - } - } - - result, _ := account.Transact(common.FromHex(args.To), common.FromHex(args.Value), common.FromHex(args.Gas), common.FromHex(args.GasPrice), common.FromHex(args.Data)) - if len(result) > 0 { - *reply = common.ToHex(result) - } - } else if _, exists := p.register[args.From]; exists { - p.register[ags.From] = append(p.register[args.From], args) - } - */ - - self.transactMu.Lock() - defer self.transactMu.Unlock() - - var nonce uint64 - if len(nonceStr) != 0 { - nonce = common.Big(nonceStr).Uint64() - } else { - state := self.backend.TxPool().State() - nonce = state.GetNonce(from) - } - var tx *types.Transaction - if contractCreation { - tx = types.NewContractCreation(nonce, value, gas, price, data) - } else { - tx = types.NewTransaction(nonce, to, value, gas, price, data) - } - - signed, err := self.sign(tx, from, false) - if err != nil { - return "", err - } - if err = self.backend.TxPool().Add(signed); err != nil { - return "", err - } - - if contractCreation { - addr := crypto.CreateAddress(from, nonce) - glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signed.Hash().Hex(), addr.Hex()) - } else { - glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signed.Hash().Hex(), tx.To().Hex()) - } - - return signed.Hash().Hex(), nil -} - -func (self *XEth) sign(tx *types.Transaction, from common.Address, didUnlock bool) (*types.Transaction, error) { - hash := tx.SigHash() - sig, err := self.doSign(from, hash, didUnlock) - if err != nil { - return tx, err - } - return tx.WithSignature(sig) -} - -// callmsg is the message type used for call transations. -type callmsg struct { - from *state.StateObject - to *common.Address - gas, gasPrice *big.Int - value *big.Int - data []byte -} - -// accessor boilerplate to implement core.Message -func (m callmsg) From() (common.Address, error) { return m.from.Address(), nil } -func (m callmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil } -func (m callmsg) Nonce() uint64 { return m.from.Nonce() } -func (m callmsg) To() *common.Address { return m.to } -func (m callmsg) GasPrice() *big.Int { return m.gasPrice } -func (m callmsg) Gas() *big.Int { return m.gas } -func (m callmsg) Value() *big.Int { return m.value } -func (m callmsg) Data() []byte { return m.data } - -type logQueue struct { - mu sync.Mutex - - logs vm.Logs - timeout time.Time - id int -} - -func (l *logQueue) add(logs ...*vm.Log) { - l.mu.Lock() - defer l.mu.Unlock() - - l.logs = append(l.logs, logs...) -} - -func (l *logQueue) get() vm.Logs { - l.mu.Lock() - defer l.mu.Unlock() - - l.timeout = time.Now() - tmp := l.logs - l.logs = nil - return tmp -} - -type hashQueue struct { - mu sync.Mutex - - hashes []common.Hash - timeout time.Time - id int -} - -func (l *hashQueue) add(hashes ...common.Hash) { - l.mu.Lock() - defer l.mu.Unlock() - - l.hashes = append(l.hashes, hashes...) -} - -func (l *hashQueue) get() []common.Hash { - l.mu.Lock() - defer l.mu.Unlock() - - l.timeout = time.Now() - tmp := l.hashes - l.hashes = nil - return tmp -}