From 35f9293c56e7378367fb77d0faebc1ef3ac52fcb Mon Sep 17 00:00:00 2001 From: atvanguard <3612498+atvanguard@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:31:26 +0000 Subject: [PATCH 1/3] tresor, kitkat, berghain (#171) * tresor, kitkat, berghain * resolve debas comments * chain config * slight refactor * resolve lumos comments --- chain.json | 3 +- go.mod | 2 +- go.sum | 2 - network-configs/aylin/chain_tresor.json | 5 ++ plugin/evm/config.go | 12 ++-- plugin/evm/gossiper_orders.go | 5 ++ plugin/evm/limit_order.go | 73 ++++++++++++++++++++++-- plugin/evm/order_api.go | 1 + plugin/evm/orderbook_test.go | 4 +- plugin/evm/vm.go | 74 ++++++++++--------------- 10 files changed, 115 insertions(+), 66 deletions(-) create mode 100644 network-configs/aylin/chain_tresor.json diff --git a/chain.json b/chain.json index 20a6c96b20..fff6d0f399 100644 --- a/chain.json +++ b/chain.json @@ -7,8 +7,7 @@ "priority-regossip-txs-per-address": 200, "priority-regossip-addresses": ["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC", "0x8db97C7cEcE249c2b98bDC0226Cc4C2A57BF52FC"], "validator-private-key-file": "/tmp/validator.pk", - "is-validator": true, - "trading-api-enabled": true, + "node-type": "kitkat_berghain", "testing-api-enabled": true, "load-from-snapshot-enabled": true, "snapshot-file-path": "/tmp/snapshot", diff --git a/go.mod b/go.mod index 6f38b319ab..baed320753 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 github.com/go-cmd/cmd v1.4.1 + github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 @@ -77,7 +78,6 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect diff --git a/go.sum b/go.sum index 620f90fc9e..ab7db282ae 100644 --- a/go.sum +++ b/go.sum @@ -1049,8 +1049,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/network-configs/aylin/chain_tresor.json b/network-configs/aylin/chain_tresor.json new file mode 100644 index 0000000000..e50a23e7e2 --- /dev/null +++ b/network-configs/aylin/chain_tresor.json @@ -0,0 +1,5 @@ +{ + "state-sync-enabled": true, + "feeRecipient": "0xB3D25D47291D7F8b97FE3884A924541cDDcbB8Be", + "node-type": "tresor" +} diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 5cf2027eab..9e43a43653 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -60,8 +60,7 @@ const ( defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request - defaultIsValidator = false - defaultTradingAPIEnabled = false + defaultNodeType = "tresor" defaultLoadFromSnapshotEnabled = true defaultSnapshotFilePath = "/tmp/snapshot" defaultMakerbookDatabasePath = "/tmp/makerbook" @@ -236,11 +235,9 @@ type Config struct { // Testing apis enabled TestingApiEnabled bool `json:"testing-api-enabled"` - // IsValidator is true if this node is a validator - IsValidator bool `json:"is-validator"` - // TradingAPI is for the sdk - TradingAPIEnabled bool `json:"trading-api-enabled"` + // NodeType is the type of node among the following: "tresor", "kitkat", "berghain", meaning validator only, matching engine, rpc node respectively + NodeType string `json:"node-type"` // LoadFromSnapshotEnabled = true if the node should load the memory db from a snapshot LoadFromSnapshotEnabled bool `json:"load-from-snapshot-enabled"` @@ -311,8 +308,7 @@ func (c *Config) SetDefaults() { c.AcceptedCacheSize = defaultAcceptedCacheSize c.ValidatorPrivateKeyFile = defaultValidatorPrivateKeyFile c.TestingApiEnabled = defaultTestingApiEnabled - c.IsValidator = defaultIsValidator - c.TradingAPIEnabled = defaultTradingAPIEnabled + c.NodeType = defaultNodeType c.LoadFromSnapshotEnabled = defaultLoadFromSnapshotEnabled c.SnapshotFilePath = defaultSnapshotFilePath c.MakerbookDatabasePath = defaultMakerbookDatabasePath diff --git a/plugin/evm/gossiper_orders.go b/plugin/evm/gossiper_orders.go index b4df9971c1..c98017d5a5 100644 --- a/plugin/evm/gossiper_orders.go +++ b/plugin/evm/gossiper_orders.go @@ -125,6 +125,11 @@ func (n *pushGossiper) sendSignedOrders(orders []*hu.SignedOrder) error { // #### HANDLER #### func (h *GossipHandler) HandleSignedOrders(nodeID ids.NodeID, msg message.SignedOrdersGossip) error { + // for vanilla validators we do not care about gossiping orders + if h.vm.limitOrderProcesser.GetNodeType() == Tresor { + return nil + } + h.mu.Lock() defer h.mu.Unlock() diff --git a/plugin/evm/limit_order.go b/plugin/evm/limit_order.go index 3ca4661755..1b2bf0d733 100644 --- a/plugin/evm/limit_order.go +++ b/plugin/evm/limit_order.go @@ -9,6 +9,7 @@ import ( "os" "runtime" "runtime/debug" + "strings" "sync" "time" @@ -32,12 +33,23 @@ const ( snapshotInterval uint64 = 10 // save snapshot every 1000 blocks ) +type NodeType string + +const ( + Tresor NodeType = "tresor" // vanilla validator + Kitkat NodeType = "kitkat" // validator + matching engine + Berghain NodeType = "berghain" // rpc node + Kitkat_Berghain NodeType = "kitkat_berghain" // validator + matching engine + rpc node +) + type LimitOrderProcesser interface { ListenAndProcessTransactions(blockBuilder *blockBuilder) GetOrderBookAPI() *orderbook.OrderBookAPI GetTestingAPI() *orderbook.TestingAPI GetTradingAPI() *orderbook.TradingAPI RunMatchingPipeline() + GetNodeType() NodeType + isMatcherNode() bool } type limitOrderProcesser struct { @@ -56,19 +68,36 @@ type limitOrderProcesser struct { hubbleDB database.Database configService orderbook.IConfigService blockBuilder *blockBuilder - isValidator bool tradingAPIEnabled bool + nodeType NodeType loadFromSnapshotEnabled bool snapshotSavedBlockNumber uint64 snapshotFilePath string tradingAPI *orderbook.TradingAPI } -func NewLimitOrderProcesser(ctx *snow.Context, txPool *txpool.TxPool, shutdownChan <-chan struct{}, shutdownWg *sync.WaitGroup, backend *eth.EthAPIBackend, blockChain *core.BlockChain, hubbleDB database.Database, validatorPrivateKey string, config Config) LimitOrderProcesser { +func NewLimitOrderProcesser(ctx *snow.Context, txPool *txpool.TxPool, shutdownChan <-chan struct{}, shutdownWg *sync.WaitGroup, backend *eth.EthAPIBackend, blockChain *core.BlockChain, hubbleDB database.Database, config Config) (LimitOrderProcesser, error) { log.Info("**** NewLimitOrderProcesser") + configService := orderbook.NewConfigService(blockChain) memoryDb := orderbook.NewInMemoryDatabase(configService) + + nodeType, err := stringToNodeType(config.NodeType) + if err != nil { + return nil, err + } + var validatorPrivateKey string + if nodeType == Kitkat || nodeType == Kitkat_Berghain { + validatorPrivateKey, err = loadPrivateKeyFromFile(config.ValidatorPrivateKeyFile) + if err != nil { + panic(fmt.Sprint("please specify correct path for validator-private-key-file in chain.json ", err)) + } + if validatorPrivateKey == "" { + panic("validator private key is empty") + } + } lotp := orderbook.NewLimitOrderTxProcessor(txPool, memoryDb, backend, validatorPrivateKey) + signedObAddy := configService.GetSignedOrderbookContract() contractEventProcessor := orderbook.NewContractEventsProcessor(memoryDb, signedObAddy) @@ -110,14 +139,40 @@ func NewLimitOrderProcesser(ctx *snow.Context, txPool *txpool.TxPool, shutdownCh matchingPipeline: matchingPipeline, filterAPI: filterAPI, configService: configService, - isValidator: config.IsValidator, - tradingAPIEnabled: config.TradingAPIEnabled, + nodeType: nodeType, loadFromSnapshotEnabled: config.LoadFromSnapshotEnabled, snapshotFilePath: config.SnapshotFilePath, + }, nil +} + +func loadPrivateKeyFromFile(keyFile string) (string, error) { + key, err := os.ReadFile(keyFile) + if err != nil { + return "", err + } + return strings.TrimSuffix(string(key), "\n"), nil +} + +func stringToNodeType(nodeTypeString string) (NodeType, error) { + switch nodeTypeString { + case string(Tresor): + return Tresor, nil + case string(Kitkat): + return Kitkat, nil + case string(Berghain): + return Berghain, nil + case string(Kitkat_Berghain): + return Kitkat_Berghain, nil + default: + return "", fmt.Errorf("unknown NodeType: %s", nodeTypeString) } } func (lop *limitOrderProcesser) ListenAndProcessTransactions(blockBuilder *blockBuilder) { + if lop.nodeType == Tresor { + return + } + lop.mu.Lock() lastAccepted := lop.blockChain.LastAcceptedBlock() @@ -174,7 +229,7 @@ func (lop *limitOrderProcesser) ListenAndProcessTransactions(blockBuilder *block } func (lop *limitOrderProcesser) RunMatchingPipeline() { - if !lop.isValidator { + if !lop.isMatcherNode() { return } executeFuncAndRecoverPanic(func() { @@ -206,6 +261,14 @@ func (lop *limitOrderProcesser) GetTestingAPI() *orderbook.TestingAPI { return orderbook.NewTestingAPI(lop.memoryDb, lop.backend, lop.configService, lop.hubbleDB) } +func (lop *limitOrderProcesser) GetNodeType() NodeType { + return lop.nodeType +} + +func (lop *limitOrderProcesser) isMatcherNode() bool { + return lop.nodeType == Kitkat || lop.nodeType == Kitkat_Berghain +} + func (lop *limitOrderProcesser) listenAndStoreLimitOrderTransactions() { logsCh := make(chan []*types.Log) logsSubscription := lop.backend.SubscribeHubbleLogsEvent(logsCh) diff --git a/plugin/evm/order_api.go b/plugin/evm/order_api.go index 839d3bf442..e4aa4290ba 100644 --- a/plugin/evm/order_api.go +++ b/plugin/evm/order_api.go @@ -60,6 +60,7 @@ func (api *OrderAPI) PlaceSignedOrders(ctx context.Context, input string) (Place continue } + // we ignore the 2nd argument shouldTriggerMatching. since PlaceSignedOrders is only called in API nodes, we do not trigger matching in them orderId, _, err := api.tradingAPI.PlaceOrder(order) orderResponse.OrderId = orderId.String() if err != nil { diff --git a/plugin/evm/orderbook_test.go b/plugin/evm/orderbook_test.go index 1d6c538af0..0e5ef2cdf6 100644 --- a/plugin/evm/orderbook_test.go +++ b/plugin/evm/orderbook_test.go @@ -961,8 +961,8 @@ func TestHubbleLogs(t *testing.T) { // Create two VMs which will agree on block A and then // build the two distinct preferred chains above ctx := context.Background() - issuer1, vm1, _, _ := GenesisVM(t, true, genesisJSON, "{\"pruning-enabled\":true}", "") - issuer2, vm2, _, _ := GenesisVM(t, true, genesisJSON, "{\"pruning-enabled\":true}", "") + issuer1, vm1, _, _ := GenesisVM(t, true, genesisJSON, "{\"pruning-enabled\":true,\"node-type\":\"kitkat\"}", "") + issuer2, vm2, _, _ := GenesisVM(t, true, genesisJSON, "{\"pruning-enabled\":true,\"node-type\":\"kitkat\"}", "") defer func() { if err := vm1.Shutdown(ctx); err != nil { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index e9b1948624..bb7ec14f50 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -567,7 +567,19 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash, ethConfig ethconfig. vm.blockChain = vm.eth.BlockChain() vm.miner = vm.eth.Miner() - vm.limitOrderProcesser = vm.NewLimitOrderProcesser() + vm.limitOrderProcesser, err = NewLimitOrderProcesser( + vm.ctx, + vm.txPool, + vm.shutdownChan, + &vm.shutdownWg, + vm.eth.APIBackend, + vm.blockChain, + vm.hubbleDB, + vm.config, + ) + if err != nil { + return err + } vm.eth.Start() return vm.initChainState(vm.blockChain.LastAcceptedBlock()) } @@ -1008,24 +1020,27 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } enabledAPIs = append(enabledAPIs, "snowman") } - if err := handler.RegisterName("order", NewOrderAPI(vm.limitOrderProcesser.GetTradingAPI(), vm)); err != nil { - return nil, err - } - orderbook.MakerbookDatabaseFile = vm.config.MakerbookDatabasePath - if err := handler.RegisterName("orderbook", vm.limitOrderProcesser.GetOrderBookAPI()); err != nil { - return nil, err - } + if vm.limitOrderProcesser.GetNodeType() != Tresor { + // orderbook and trading APIs should be enabled for kitkat too cuz it's good to have visibility on memory db in those nodes. To understand the validators' memory db. + if err := handler.RegisterName("order", NewOrderAPI(vm.limitOrderProcesser.GetTradingAPI(), vm)); err != nil { + return nil, err + } + orderbook.MakerbookDatabaseFile = vm.config.MakerbookDatabasePath - if vm.config.TradingAPIEnabled { - if err := handler.RegisterName("trading", vm.limitOrderProcesser.GetTradingAPI()); err != nil { + if err := handler.RegisterName("orderbook", vm.limitOrderProcesser.GetOrderBookAPI()); err != nil { return nil, err } - } - if vm.config.TestingApiEnabled { - if err := handler.RegisterName("testing", vm.limitOrderProcesser.GetTestingAPI()); err != nil { + + if err := handler.RegisterName("trading", vm.limitOrderProcesser.GetTradingAPI()); err != nil { return nil, err } + + if vm.config.TestingApiEnabled { + if err := handler.RegisterName("testing", vm.limitOrderProcesser.GetTestingAPI()); err != nil { + return nil, err + } + } } if vm.config.WarpAPIEnabled { @@ -1165,36 +1180,3 @@ func attachEthService(handler *rpc.Server, apis []rpc.API, names []string) error return nil } - -func (vm *VM) NewLimitOrderProcesser() LimitOrderProcesser { - var validatorPrivateKey string - var err error - if vm.config.IsValidator { - validatorPrivateKey, err = loadPrivateKeyFromFile(vm.config.ValidatorPrivateKeyFile) - if err != nil { - panic(fmt.Sprint("please specify correct path for validator-private-key-file in chain.json ", err)) - } - if validatorPrivateKey == "" { - panic("validator private key is empty") - } - } - return NewLimitOrderProcesser( - vm.ctx, - vm.txPool, - vm.shutdownChan, - &vm.shutdownWg, - vm.eth.APIBackend, - vm.blockChain, - vm.hubbleDB, - validatorPrivateKey, - vm.config, - ) -} - -func loadPrivateKeyFromFile(keyFile string) (string, error) { - key, err := os.ReadFile(keyFile) - if err != nil { - return "", err - } - return strings.TrimSuffix(string(key), "\n"), nil -} From 98335662391b00bf787e34f5ffbd9b5c2ebf51f8 Mon Sep 17 00:00:00 2001 From: Hugo Broudeur Date: Mon, 11 Mar 2024 10:25:15 +0000 Subject: [PATCH 2/3] feature/oonodz-integration. Integrate ooNodz workflow and create and push docker images feature/oonodz-integration. Update variables --- .github/workflows/ci-push-image-aylin.yml | 48 +++++++++++++ .github/workflows/push-image-release.yml | 85 +++++++++++++++++++++++ scripts/build_image.sh | 1 + scripts/constants.sh | 3 - 4 files changed, 134 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/ci-push-image-aylin.yml create mode 100644 .github/workflows/push-image-release.yml diff --git a/.github/workflows/ci-push-image-aylin.yml b/.github/workflows/ci-push-image-aylin.yml new file mode 100644 index 0000000000..385c206177 --- /dev/null +++ b/.github/workflows/ci-push-image-aylin.yml @@ -0,0 +1,48 @@ +name: Build + Push aylin image + +on: + push: + branches: + - aylin + +defaults: + run: + shell: bash + +jobs: + build_fuji_image_aylin: + name: Build Docker Image + timeout-minutes: 60 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Get Current Tag + id: get_tag + run: echo ::set-output name=tag::$(git describe --abbrev=0 --tags) + + - name: Login to Docker hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASS }} + + - name: Build Dockerfile and Push it + run: | + TAG_FUJI="fuji-" + TAG_END=$GITHUB_SHA + + if [ -n "$GITHUB_TAG" ]; then + TAG_END=$GITHUB_TAG + fi + + export BUILD_IMAGE_ID="${{ vars.AVALANCHE_VERSION_DEV }}-${TAG_FUJI}${TAG_END}" + + echo "COPY --from=builder /build/jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ /root/.avalanchego/plugins/jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ" >> Dockerfile + ./scripts/build_image.sh + env: + CURRENT_BRANCH: ${{ github.head_ref || github.ref_name }} + PUSH_DOCKER_IMAGE: true + DOCKERHUB_REPO: hubbleexchange/hubblenet + GITHUB_TAG: ${{ steps.get_tag.outputs.tag }} + GITHUB_SHA: ${{ github.sha }} diff --git a/.github/workflows/push-image-release.yml b/.github/workflows/push-image-release.yml new file mode 100644 index 0000000000..fc0946ebc0 --- /dev/null +++ b/.github/workflows/push-image-release.yml @@ -0,0 +1,85 @@ +name: Build + Push release image + +on: + workflow_dispatch: + inputs: + release_tag: + description: 'Release tag' + required: true + type: string + +defaults: + run: + shell: bash + +jobs: + build_release_image: + name: Build Docker Image + timeout-minutes: 60 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Login to Docker hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASS }} + + - name: Create the Dockerfile + env: + HUBBLENET_RELEASE_TAG: ${{ inputs.release_tag }} + AVALANCHE_VERSION: ${{ vars.AVALANCHE_VERSION }} + run: | + if [ "${HUBBLENET_RELEASE_TAG:0:1}" = "v" ]; then + HUBBLENET_VERSION="${HUBBLENET_RELEASE_TAG:1}"; + HUBBLENET_RELEASE_TAG="${HUBBLENET_RELEASE_TAG}"; + else + HUBBLENET_VERSION="${HUBBLENET_RELEASE_TAG}"; + fi + + multiline_text=$(cat < Dockerfile-release + cat Dockerfile-release + + - name: Build and push release image for the mainnet + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile-release + push: true + tags: "hubbleexchange/hubblenet:${{ vars.AVALANCHE_VERSION }}-${{ inputs.release_tag }}" + build-args: | + VM_ID=o1Fg94YujMqL75Ebrdkos95MTVjZpPpdeAp5ocEsp2X9c2FSz + + + - name: Build and push release image for the fuji + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile-release + push: true + tags: "hubbleexchange/hubblenet:${{ vars.AVALANCHE_VERSION }}-fuji-${{ inputs.release_tag }}" + build-args: | + VM_ID=jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ diff --git a/scripts/build_image.sh b/scripts/build_image.sh index 90ecccd522..9d0d00bfc4 100755 --- a/scripts/build_image.sh +++ b/scripts/build_image.sh @@ -17,6 +17,7 @@ source "$SUBNET_EVM_PATH"/scripts/versions.sh source "$SUBNET_EVM_PATH"/scripts/constants.sh BUILD_IMAGE_ID=${BUILD_IMAGE_ID:-"${AVALANCHE_VERSION}-Subnet-EVM-${CURRENT_BRANCH}"} +DOCKERHUB_REPO=${DOCKERHUB_REPO:-"avaplatform/avalanchego"} echo "Building Docker Image: $DOCKERHUB_REPO:$BUILD_IMAGE_ID based of $AVALANCHE_VERSION" docker build -t "$DOCKERHUB_REPO:$BUILD_IMAGE_ID" "$SUBNET_EVM_PATH" -f "$SUBNET_EVM_PATH/Dockerfile" \ diff --git a/scripts/constants.sh b/scripts/constants.sh index 54fe90b254..1c0d3663e2 100644 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -8,9 +8,6 @@ set -euo pipefail # Set the PATHS GOPATH="$(go env GOPATH)" -# Avalabs docker hub -DOCKERHUB_REPO="avaplatform/avalanchego" - # if this isn't a git repository (say building from a release), don't set our git constants. if [ ! -d .git ]; then CURRENT_BRANCH="" From 68e2db851293a7039626ab3b01a1f4887eb37eaa Mon Sep 17 00:00:00 2001 From: Hugo Broudeur Date: Thu, 14 Mar 2024 14:46:03 +0000 Subject: [PATCH 3/3] feature/oonodz-integration. Also produce the aylin image for the mainnet --- .github/workflows/ci-push-image-aylin.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-push-image-aylin.yml b/.github/workflows/ci-push-image-aylin.yml index 385c206177..37c16df2a2 100644 --- a/.github/workflows/ci-push-image-aylin.yml +++ b/.github/workflows/ci-push-image-aylin.yml @@ -29,16 +29,20 @@ jobs: - name: Build Dockerfile and Push it run: | - TAG_FUJI="fuji-" TAG_END=$GITHUB_SHA if [ -n "$GITHUB_TAG" ]; then TAG_END=$GITHUB_TAG fi - export BUILD_IMAGE_ID="${{ vars.AVALANCHE_VERSION_DEV }}-${TAG_FUJI}${TAG_END}" + export BUILD_IMAGE_ID="${{ vars.AVALANCHE_VERSION_DEV }}-aylin-${TAG_END}" + + # Copy binary to the correct Fuji VM ID respository + echo "COPY --from=builder /build/jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ /root/.avalanchego/plugins/jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ" >> Dockerfile + + # Copy binary to the correct Mainnet VM ID respository + echo "COPY --from=builder /build/jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ /root/.avalanchego/plugins/o1Fg94YujMqL75Ebrdkos95MTVjZpPpdeAp5ocEsp2X9c2FSz" >> Dockerfile - echo "COPY --from=builder /build/jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ /root/.avalanchego/plugins/jvrKsTB9MfYGnAXtxbzFYpXKceXr9J8J8ej6uWGrYM5tXswhJ" >> Dockerfile ./scripts/build_image.sh env: CURRENT_BRANCH: ${{ github.head_ref || github.ref_name }}