From 343a86dd7fbd0c2c1dc9bc80b8325e025f2cdfd8 Mon Sep 17 00:00:00 2001 From: Ilya Kreymer Date: Thu, 21 Mar 2024 09:51:52 -0700 Subject: [PATCH 1/6] improvements to 'non-graceful' interrupt to ensure WARCs are still closed gracefully: - immediately exit the browser, and call closeWorkers() - finalize() recorder, finish active WARC records but don't fetch anything else - flush() existing open writer, mark as done, don't write anything else --- src/crawler.ts | 9 ++++- src/util/recorder.ts | 16 ++++---- src/util/warcwriter.ts | 27 ++++++++------ src/util/worker.ts | 85 +++++++++++++++++++++++------------------- 4 files changed, 79 insertions(+), 58 deletions(-) diff --git a/src/crawler.ts b/src/crawler.ts index 58381f249..d56a31929 100644 --- a/src/crawler.ts +++ b/src/crawler.ts @@ -32,7 +32,12 @@ import { Screenshots } from "./util/screenshots.js"; import { parseArgs } from "./util/argParser.js"; import { initRedis } from "./util/redis.js"; import { logger, formatErr } from "./util/logger.js"; -import { WorkerOpts, WorkerState, runWorkers } from "./util/worker.js"; +import { + WorkerOpts, + WorkerState, + closeWorkers, + runWorkers, +} from "./util/worker.js"; import { sleep, timedRun, secondsElapsed } from "./util/timing.js"; import { collectAllFileSources, getInfoString } from "./util/file_reader.js"; @@ -1117,6 +1122,8 @@ self.__bx_behaviors.selectMainBehavior(); await this.serializeConfig(); if (this.interrupted) { + await this.browser.close(); + await closeWorkers(0); await this.setStatusAndExit(13, "interrupted"); } else { await this.setStatusAndExit(0, "done"); diff --git a/src/util/recorder.ts b/src/util/recorder.ts index 400496884..0a47bb24d 100644 --- a/src/util/recorder.ts +++ b/src/util/recorder.ts @@ -803,13 +803,15 @@ export class Recorder { await this.fetcherQ.onIdle(); }; - await timedRun( - finishFetch(), - timeout, - "Finishing Fetch Timed Out", - this.logDetails, - "recorder", - ); + if (timeout > 0) { + await timedRun( + finishFetch(), + timeout, + "Finishing Fetch Timed Out", + this.logDetails, + "recorder", + ); + } logger.debug("Finishing WARC writing", this.logDetails, "recorder"); await this.warcQ.onIdle(); diff --git a/src/util/warcwriter.ts b/src/util/warcwriter.ts index 544c01e1c..b30f886dd 100644 --- a/src/util/warcwriter.ts +++ b/src/util/warcwriter.ts @@ -105,15 +105,6 @@ export class WARCWriter implements IndexerOffsetLength { requestRecord: WARCRecord, responseSerializer: WARCSerializer | undefined = undefined, ) { - if (this.done) { - logger.warn( - "Writer closed, not writing records", - this.logDetails, - "writer", - ); - return; - } - const opts = { gzip: this.gzip }; if (!responseSerializer) { @@ -147,6 +138,15 @@ export class WARCWriter implements IndexerOffsetLength { } private async _writeRecord(record: WARCRecord, serializer: WARCSerializer) { + if (this.done) { + logger.warn( + "Writer closed, not writing records", + this.logDetails, + "writer", + ); + return 0; + } + let total = 0; const url = record.warcTargetURI; @@ -171,6 +171,11 @@ export class WARCWriter implements IndexerOffsetLength { } _writeCDX(record: WARCRecord | null) { + if (this.done) { + logger.warn("Writer closed, not writing CDX", this.logDetails, "writer"); + return; + } + if (this.indexer && this.filename) { const cdx = this.indexer.indexRecord(record, this, this.filename); @@ -183,6 +188,8 @@ export class WARCWriter implements IndexerOffsetLength { } async flush() { + this.done = true; + if (this.fh) { await streamFinish(this.fh); this.fh = null; @@ -194,8 +201,6 @@ export class WARCWriter implements IndexerOffsetLength { await streamFinish(this.cdxFH); this.cdxFH = null; } - - this.done = true; } } diff --git a/src/util/worker.ts b/src/util/worker.ts index a6b399f0b..119bfbe9d 100644 --- a/src/util/worker.ts +++ b/src/util/worker.ts @@ -14,43 +14,6 @@ const NEW_WINDOW_TIMEOUT = 20; const TEARDOWN_TIMEOUT = 10; const FINISHED_TIMEOUT = 60; -// =========================================================================== -export async function runWorkers( - crawler: Crawler, - numWorkers: number, - maxPageTime: number, - collDir: string, -) { - logger.info(`Creating ${numWorkers} workers`, {}, "worker"); - - const workers = []; - let offset = 0; - - // automatically set worker start by ordinal in k8s - // if hostname is "crawl-id-name-N" - // while CRAWL_ID is "crawl-id-name", then set starting - // worker index offset to N * numWorkers - - if (process.env.CRAWL_ID) { - const rx = new RegExp(rxEscape(process.env.CRAWL_ID) + "\\-([\\d]+)$"); - const m = os.hostname().match(rx); - if (m) { - offset = Number(m[1]) * numWorkers; - logger.info("Starting workerid index at " + offset, "worker"); - } - } - - for (let i = 0; i < numWorkers; i++) { - workers.push(new PageWorker(i + offset, crawler, maxPageTime, collDir)); - } - - await Promise.allSettled(workers.map((worker) => worker.run())); - - await crawler.browser.close(); - - await Promise.allSettled(workers.map((worker) => worker.finalize())); -} - // =========================================================================== export type WorkerOpts = { page: Page; @@ -372,9 +335,9 @@ export class PageWorker { } } - async finalize() { + async finalize(waitTime?: number) { if (this.recorder) { - await this.recorder.onDone(this.maxPageTime); + await this.recorder.onDone(waitTime ?? this.maxPageTime); } } @@ -433,3 +396,47 @@ export class PageWorker { } } } + +// =========================================================================== +const workers: PageWorker[] = []; + +// =========================================================================== +export async function runWorkers( + crawler: Crawler, + numWorkers: number, + maxPageTime: number, + collDir: string, +) { + logger.info(`Creating ${numWorkers} workers`, {}, "worker"); + + let offset = 0; + + // automatically set worker start by ordinal in k8s + // if hostname is "crawl-id-name-N" + // while CRAWL_ID is "crawl-id-name", then set starting + // worker index offset to N * numWorkers + + if (process.env.CRAWL_ID) { + const rx = new RegExp(rxEscape(process.env.CRAWL_ID) + "\\-([\\d]+)$"); + const m = os.hostname().match(rx); + if (m) { + offset = Number(m[1]) * numWorkers; + logger.info("Starting workerid index at " + offset, "worker"); + } + } + + for (let i = 0; i < numWorkers; i++) { + workers.push(new PageWorker(i + offset, crawler, maxPageTime, collDir)); + } + + await Promise.allSettled(workers.map((worker) => worker.run())); + + await crawler.browser.close(); + + await closeWorkers(); +} + +// =========================================================================== +export function closeWorkers(waitTime?: number) { + return Promise.allSettled(workers.map((worker) => worker.finalize(waitTime))); +} From 219b339e30e32d9d221c61e290b5add21c288320 Mon Sep 17 00:00:00 2001 From: Ilya Kreymer Date: Thu, 21 Mar 2024 12:08:22 -0700 Subject: [PATCH 2/6] add interruption docs --- docs/docs/user-guide/common-options.md | 30 +++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/docs/docs/user-guide/common-options.md b/docs/docs/user-guide/common-options.md index f94685044..f5529c237 100644 --- a/docs/docs/user-guide/common-options.md +++ b/docs/docs/user-guide/common-options.md @@ -115,7 +115,7 @@ Webhook notification JSON includes: ## Saving Crawl State: Interrupting and Restarting the Crawl -A crawl can be gracefully interrupted with Ctrl-C (SIGINT) or a SIGTERM. +A crawl can be gracefully interrupted with Ctrl-C (SIGINT) or a SIGTERM (see below for more details). When a crawl is interrupted, the current crawl state is written to the `crawls` subdirectory inside the collection directory. The crawl state includes the current YAML config, if any, plus the current state of the crawl. @@ -128,3 +128,31 @@ By default, the crawl state is only written when a crawl is interrupted before c ### Periodic State Saving When the `--saveState` is set to always, Browsertrix Crawler will also save the state automatically during the crawl, as set by the `--saveStateInterval` setting. The crawler will keep the last `--saveStateHistory` save states and delete older ones. This provides extra backup, in the event that the crawl fails unexpectedly or is not terminated via Ctrl-C, several previous crawl states are still available. + +### Note/Caveat on Crawl Interruption + +Browsertrix Crawler has different crawl interruption modes, and does everything it can to ensure the WARC data written is always valid when a crawl is interrupted. The following are three interruption scenarios: + +### 1. Graceful Shutdown + +Initiated when a single SIGINT (Ctrl+C) or SIGTERM (`docker kill -s SIGINT`, `docker kill -s SIGTERM`, `kill`) signal received +The crawler will attempt to finish current pages, finish any pending async requests, write all WARCS, generate WACZ files +and other post-processing, save state from Redis and then exit. + +### 2. Less-Graceful, Quick Shutdown + +If a second SIGINT / SIGTERM is received, the crawler will close the browser immediately, interrupting any on-going network requests. +Any pending network requests will not be awaited. However, anything in the WARC queue will be written and WARC files will be flushed. +WACZ files and other post-processing will not be generated, but the state will be saved in Redis. +WARC files should still be valid. + +### 3. Violent / Immediate Shutdown + +If a crawler is killed, eg. with SIGKILL signal (`docker kill`, `kill -9`), the crawler container / process will be immediately shut down. It will not have a chance to finish any WARC files, and there is no guarantee that WARC files will be valid, but the crawler will of course exit right away. + + +### Recommendations + +It is recommended to graceful stop the crawler by sending a SIGINT or SIGTERM signal, which can be done via Ctrl+C or `docker kill -s SIGINT `. Repeating the command will result in a faster, slightly less-graceful shutdown. +Kubernetes also uses SIGTERM by default when shutting down container pods. Using SIGKILL is not recommended +except for last resort, and only when data is to be discarded. \ No newline at end of file From e99f17df4f8530925309fa8568696ec26f4cbcfd Mon Sep 17 00:00:00 2001 From: Ilya Kreymer Date: Thu, 21 Mar 2024 12:11:01 -0700 Subject: [PATCH 3/6] docs tweak --- docs/docs/user-guide/common-options.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/docs/user-guide/common-options.md b/docs/docs/user-guide/common-options.md index f5529c237..1c1d313b0 100644 --- a/docs/docs/user-guide/common-options.md +++ b/docs/docs/user-guide/common-options.md @@ -129,7 +129,7 @@ By default, the crawl state is only written when a crawl is interrupted before c When the `--saveState` is set to always, Browsertrix Crawler will also save the state automatically during the crawl, as set by the `--saveStateInterval` setting. The crawler will keep the last `--saveStateHistory` save states and delete older ones. This provides extra backup, in the event that the crawl fails unexpectedly or is not terminated via Ctrl-C, several previous crawl states are still available. -### Note/Caveat on Crawl Interruption +## Crawl Interruption Options Browsertrix Crawler has different crawl interruption modes, and does everything it can to ensure the WARC data written is always valid when a crawl is interrupted. The following are three interruption scenarios: @@ -155,4 +155,4 @@ If a crawler is killed, eg. with SIGKILL signal (`docker kill`, `kill -9`), the It is recommended to graceful stop the crawler by sending a SIGINT or SIGTERM signal, which can be done via Ctrl+C or `docker kill -s SIGINT `. Repeating the command will result in a faster, slightly less-graceful shutdown. Kubernetes also uses SIGTERM by default when shutting down container pods. Using SIGKILL is not recommended -except for last resort, and only when data is to be discarded. \ No newline at end of file +except for last resort, and only when data is to be discarded. From 3fefce986fc9dc7def6f6965c19d030236587cfc Mon Sep 17 00:00:00 2001 From: Ilya Kreymer Date: Thu, 21 Mar 2024 13:30:05 -0700 Subject: [PATCH 4/6] Apply suggestions from code review --- docs/docs/user-guide/common-options.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/docs/user-guide/common-options.md b/docs/docs/user-guide/common-options.md index 1c1d313b0..1a1cc6be1 100644 --- a/docs/docs/user-guide/common-options.md +++ b/docs/docs/user-guide/common-options.md @@ -135,16 +135,16 @@ Browsertrix Crawler has different crawl interruption modes, and does everything ### 1. Graceful Shutdown -Initiated when a single SIGINT (Ctrl+C) or SIGTERM (`docker kill -s SIGINT`, `docker kill -s SIGTERM`, `kill`) signal received +Initiated when a single SIGINT (Ctrl+C) or SIGTERM (`docker kill -s SIGINT`, `docker kill -s SIGTERM`, `kill`) signal is received. The crawler will attempt to finish current pages, finish any pending async requests, write all WARCS, generate WACZ files and other post-processing, save state from Redis and then exit. ### 2. Less-Graceful, Quick Shutdown If a second SIGINT / SIGTERM is received, the crawler will close the browser immediately, interrupting any on-going network requests. -Any pending network requests will not be awaited. However, anything in the WARC queue will be written and WARC files will be flushed. -WACZ files and other post-processing will not be generated, but the state will be saved in Redis. -WARC files should still be valid. +Any asynchronous fetching will not be finished. However, anything in the WARC queue will be written and WARC files will be flushed. +WACZ files and other post-processing will not be generated, but the current state from Redis will still be saved if enabled (see above). +WARC records should be fully finished and WARC file should be valid, though not necessarily contain all the data for the pages being processed during the interruption. ### 3. Violent / Immediate Shutdown @@ -153,6 +153,6 @@ If a crawler is killed, eg. with SIGKILL signal (`docker kill`, `kill -9`), the ### Recommendations -It is recommended to graceful stop the crawler by sending a SIGINT or SIGTERM signal, which can be done via Ctrl+C or `docker kill -s SIGINT `. Repeating the command will result in a faster, slightly less-graceful shutdown. -Kubernetes also uses SIGTERM by default when shutting down container pods. Using SIGKILL is not recommended +It is recommended to gracefully stop the crawler by sending a SIGINT or SIGTERM signal, which can be done via Ctrl+C or `docker kill -s SIGINT `. Repeating the command will result in a faster, slightly less-graceful shutdown. +Using SIGKILL is not recommended except for last resort, and only when data is to be discarded. From eca85ec5d5b7b612a32d060f95505b9d578427d0 Mon Sep 17 00:00:00 2001 From: Ilya Kreymer Date: Thu, 21 Mar 2024 13:33:22 -0700 Subject: [PATCH 5/6] more doc tweaks --- docs/docs/user-guide/common-options.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/docs/user-guide/common-options.md b/docs/docs/user-guide/common-options.md index 1a1cc6be1..9971c0f64 100644 --- a/docs/docs/user-guide/common-options.md +++ b/docs/docs/user-guide/common-options.md @@ -154,5 +154,6 @@ If a crawler is killed, eg. with SIGKILL signal (`docker kill`, `kill -9`), the ### Recommendations It is recommended to gracefully stop the crawler by sending a SIGINT or SIGTERM signal, which can be done via Ctrl+C or `docker kill -s SIGINT `. Repeating the command will result in a faster, slightly less-graceful shutdown. -Using SIGKILL is not recommended -except for last resort, and only when data is to be discarded. +Using SIGKILL is not recommended except for last resort, and only when data is to be discarded. + +Note: When using the crawler in the Browsertrix app / in Kubernetes general, stopping a crawl / stopping a pod always results in option #1 (sending a single SIGTERM signal) to the crawler pod(s) From 4ff6c46df744d1155d41fa48b8abd86c10ef87ee Mon Sep 17 00:00:00 2001 From: Ilya Kreymer Date: Thu, 21 Mar 2024 13:49:16 -0700 Subject: [PATCH 6/6] Apply suggestions from code review Co-authored-by: Tessa Walsh --- docs/docs/user-guide/common-options.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/docs/docs/user-guide/common-options.md b/docs/docs/user-guide/common-options.md index 9971c0f64..43d9fb1d9 100644 --- a/docs/docs/user-guide/common-options.md +++ b/docs/docs/user-guide/common-options.md @@ -136,15 +136,12 @@ Browsertrix Crawler has different crawl interruption modes, and does everything ### 1. Graceful Shutdown Initiated when a single SIGINT (Ctrl+C) or SIGTERM (`docker kill -s SIGINT`, `docker kill -s SIGTERM`, `kill`) signal is received. -The crawler will attempt to finish current pages, finish any pending async requests, write all WARCS, generate WACZ files -and other post-processing, save state from Redis and then exit. + +The crawler will attempt to finish current pages, finish any pending async requests, write all WARCS, generate WACZ files and finish other post-processing, save state from Redis, and then exit. ### 2. Less-Graceful, Quick Shutdown -If a second SIGINT / SIGTERM is received, the crawler will close the browser immediately, interrupting any on-going network requests. -Any asynchronous fetching will not be finished. However, anything in the WARC queue will be written and WARC files will be flushed. -WACZ files and other post-processing will not be generated, but the current state from Redis will still be saved if enabled (see above). -WARC records should be fully finished and WARC file should be valid, though not necessarily contain all the data for the pages being processed during the interruption. +If a second SIGINT / SIGTERM is received, the crawler will close the browser immediately, interrupting any on-going network requests. Any asynchronous fetching will not be finished. However, anything in the WARC queue will be written and WARC files will be flushed. WACZ files and other post-processing will not be generated, but the current state from Redis will still be saved if enabled (see above). WARC records should be fully finished and WARC files should be valid, though they may not contain all the data for the pages being processed during the interruption. ### 3. Violent / Immediate Shutdown