This repository has been archived by the owner on Jun 30, 2021. It is now read-only.
/
tasks.py
729 lines (645 loc) · 29.2 KB
/
tasks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
from flask import current_app
from celery import Celery
from simplecoin import db, coinserv, cache
from simplecoin.utils import last_block_time, last_block_share_id
from simplecoin.models import (
Share, Block, OneMinuteShare, Payout, Transaction, Blob, FiveMinuteShare,
Status, OneMinuteReject, OneMinuteTemperature, FiveMinuteReject,
OneMinuteHashrate, Threshold, Event, DonationPercent, BonusPayout)
from sqlalchemy.sql import func
from cryptokit import bits_to_shares, bits_to_difficulty
from bitcoinrpc import CoinRPCException
from celery.utils.log import get_task_logger
from math import ceil, floor
import requests
import json
import sqlalchemy
import logging
import datetime
logger = get_task_logger(__name__)
celery = Celery('simplecoin')
@celery.task(bind=True)
def update_online_workers(self):
"""
Grabs a list of workers from the running powerpool instances and caches
them
"""
try:
users = {}
for i, pp_config in enumerate(current_app.config['monitor_addrs']):
mon_addr = pp_config['mon_address'] + '/clients'
try:
req = requests.get(mon_addr)
data = req.json()
except Exception:
current_app.logger.warn("Unable to connect to {} to gather worker summary."
.format(mon_addr))
else:
for address, workers in data['clients'].iteritems():
users.setdefault('addr_online_' + address, [])
for d in workers:
users['addr_online_' + address].append((d['worker'], i))
cache.set_many(users, timeout=480)
except Exception as exc:
logger.error("Unhandled exception in estimating pplns", exc_info=True)
raise self.retry(exc=exc)
@celery.task(bind=True)
def update_pplns_est(self):
"""
Generates redis cached value for share counts of all users based on PPLNS window
"""
try:
# grab configured N
mult = int(current_app.config['last_n'])
# generate average diff from last 500 blocks
diff = cache.get('difficulty_avg')
if diff is None:
current_app.logger.warn(
"Difficulty average is blank, can't calculate pplns estimate")
return
# Calculate the total shares to that are 'counted'
total_shares = ((float(diff) * (2 ** 16)) * mult)
# Loop through all shares, descending order, until we'd distributed the
# shares
remain = total_shares
user_shares = {}
for share in Share.query.order_by(Share.id.desc()).yield_per(5000):
user_shares.setdefault('pplns_' + share.user, 0)
if remain > share.shares:
user_shares['pplns_' + share.user] += share.shares
remain -= share.shares
else:
user_shares['pplns_' + share.user] += remain
remain = 0
break
cache.set('pplns_total_shares', (total_shares - remain), timeout=40 * 60)
cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
cache.set_many(user_shares, timeout=40 * 60)
cache.set('pplns_user_shares', user_shares, timeout=40 * 60)
except Exception as exc:
logger.error("Unhandled exception in estimating pplns", exc_info=True)
raise self.retry(exc=exc)
@celery.task(bind=True)
def cache_user_donation(self):
"""
Grab all user donations and loop through them then cache donation %
"""
try:
user_donations = {}
# Build a dict of donation % to cache
custom_donations = DonationPercent.query.all()
for donation in custom_donations:
user_donations.setdefault(donation.user, current_app.config['default_perc'])
user_donations[donation.user] = donation.perc
cache.set('user_donations', user_donations, timeout=1440 * 60)
except Exception as exc:
logger.error("Unhandled exception in caching user donations", exc_info=True)
raise self.retry(exc=exc)
@celery.task(bind=True)
def update_coin_transaction(self):
"""
Loops through all immature transactions
"""
try:
# Select all unconfirmed transactions
unconfirmed = Transaction.query.filter_by(confirmed=False)
for tx in unconfirmed:
# Check to see if the transaction hash exists in the block chain
try:
t = coinserv.gettransaction(tx.txid)
if t.get('confirmations', 0) >= 6:
tx.confirmed = True
except CoinRPCException:
tx.confirmed = False
db.session.commit()
except Exception as exc:
logger.error("Unhandled exception in update block status", exc_info=True)
db.session.rollback()
raise self.retry(exc=exc)
@celery.task(bind=True)
def update_block_state(self):
"""
Loops through all immature and non-orphaned blocks.
First checks to see if blocks are orphaned,
then it checks to see if they are now matured.
"""
mature_diff = current_app.config['block_mature_confirms']
try:
# Select all immature & non-orphaned blocks
immature = (Block.query.filter_by(mature=False, orphan=False))
blockheight = coinserv.getblockcount()
for block in immature:
logger.info("Checking block height: {}".format(block.height))
# Check to see if the block hash exists in the block chain
try:
output = coinserv.getblock(block.hash)
logger.debug("Confirms: {}; Height diff: {}"
.format(output['confirmations'],
blockheight - block.height))
except CoinRPCException:
logger.info("Block {}:{} not in coin database, assume orphan!"
.format(block.height, block.hash))
block.orphan = True
else:
if output['confirmations'] > mature_diff:
logger.info("Block {}:{} meets {} confirms, mark mature"
.format(block.height, block.hash, mature_diff))
block.mature = True
elif (blockheight - block.height) > mature_diff and output['confirmations'] < mature_diff:
logger.info("Block {}:{} {} height ago, but not enough confirms. Marking orphan."
.format(block.height, block.hash, mature_diff))
block.orphan = True
db.session.commit()
except Exception as exc:
logger.error("Unhandled exception in update block status", exc_info=True)
db.session.rollback()
raise self.retry(exc=exc)
@celery.task(bind=True)
def add_share(self, user, shares):
"""
Adds a round share to postgresql
user: should be a username/wallet address
shares: should be an integer representation of n1 shares
"""
try:
Share.create(user=user, shares=shares)
db.session.commit()
except Exception as exc:
logger.error("Unhandled exception in add share", exc_info=True)
db.session.rollback()
raise self.retry(exc=exc)
@celery.task(bind=True)
def add_block(self, user, height, total_value, transaction_fees, bits,
hash_hex):
"""
Insert a discovered block & blockchain data
user: should be a username/wallet address of who found block
height: should be the height of the given block in the blockchain
total_value: should be an integer representation of the value of the
newly discovered block. E.G.
DOGE = 2364681.04976814
network_value = 236468104976814
transaction_fees: should be an integer amount awarded due to transactions
handled by the block. E.G.
transaction fees on new block = 6.5
transaction_fees = 650000000
"""
logger.warn(
"Recieved an add block notification!\nUser: {}\nHeight: {}\n"
"Total Height: {}\nTransaction Fees: {}\nBits: {}\nHash Hex: {}"
.format(user, height, total_value, transaction_fees, bits, hash_hex))
try:
last = last_block_share_id()
block = Block.create(user, height, total_value, transaction_fees, bits,
hash_hex, time_started=last_block_time())
db.session.flush()
count = (db.session.query(func.sum(Share.shares)).
filter(Share.id > last).
filter(Share.id <= block.last_share_id).scalar()) or 128
block.shares_to_solve = count
db.session.commit()
payout.delay()
except Exception as exc:
logger.error("Unhandled exception in add_block", exc_info=True)
db.session.rollback()
raise self.retry(exc=exc)
@celery.task(bind=True)
def add_one_minute(self, user, valid_shares, minute, worker='', dup_shares=0,
low_diff_shares=0, stale_shares=0):
"""
Adds a new single minute entry for a user
minute: timestamp (int)
shares: number of shares recieved over the timespan
user: string of the user
"""
def count_share(typ, amount, user_=user):
logger.debug("Adding {} for {} of amount {}"
.format(typ.__name__, user_, amount))
try:
typ.create(user_, amount, minute, worker)
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
typ.add_value(user_, amount, minute, worker)
db.session.commit()
try:
# log their valid shares
if valid_shares:
count_share(OneMinuteShare, valid_shares)
# we want to log how much of each type of reject for the whole pool
if user == "pool":
if low_diff_shares:
count_share(OneMinuteReject, low_diff_shares, user_="pool_low_diff")
if dup_shares:
count_share(OneMinuteReject, dup_shares, user_="pool_dup")
if stale_shares:
count_share(OneMinuteReject, stale_shares, user_="pool_stale")
# only log a total reject on a per-user basis
else:
total_reject = dup_shares + low_diff_shares + stale_shares
if total_reject:
count_share(OneMinuteReject, total_reject)
except Exception as exc:
logger.error("Unhandled exception in add_one_minute", exc_info=True)
db.session.rollback()
raise self.retry(exc=exc)
@celery.task(bind=True)
def new_block(self, blockheight, bits=None, reward=None):
"""
Notification that a new block height has been reached in the network.
Sets some things into the cache for display on the website.
"""
logger.info("Recieved notice of new block height {}".format(blockheight))
cache.set('blockheight', blockheight)
cache.set('difficulty', bits_to_difficulty(bits))
cache.set('reward', reward)
# keep the last 500 blocks in the cache for getting average difficulty
cache.cache._client.lpush('block_cache', bits)
cache.cache._client.ltrim('block_cache', 0, 500)
@celery.task(bind=True)
def cleanup(self, simulate=False):
"""
Finds all the shares that will no longer be used and removes them from
the database.
"""
try:
# find the oldest un-processed block
block = (Block.query.
filter_by(processed=False).
order_by(Block.height).first())
if block is None:
# sloppy hack to get the newest block that is processed
block = (Block.query.
filter_by(processed=True).
order_by(Block.height.desc()).first())
if block is None:
logger.debug("No block found, exiting...")
return
mult = int(current_app.config['last_n'])
# take our standard share count times two for a safe margin. divide
# by 16 to get the number of rows, since a rows minimum share count
# is 16
shares = (bits_to_shares(block.bits) * mult) * 2
id_diff = shares // 16
# compute the id which earlier ones are safe to delete
stale_id = block.last_share.id - id_diff
if simulate:
logger.info("Share for block computed: {}".format(shares // 2))
logger.info("Share total margin computed: {}".format(shares))
logger.info("Id diff computed: {}".format(id_diff))
logger.info("Stale ID computed: {}".format(stale_id))
exit(0)
elif stale_id > 0:
logger.info("Cleaning all shares older than {}".format(stale_id))
# delete all shares that are sufficiently old
Share.query.filter(Share.id < stale_id).delete(
synchronize_session=False)
db.session.commit()
else:
logger.info("Not cleaning anything, stale id less than zero")
except Exception as exc:
logger.error("Unhandled exception in cleanup", exc_info=True)
db.session.rollback()
raise self.retry(exc=exc)
@celery.task(bind=True)
def payout(self, simulate=False):
"""
Calculates payouts for users from share records for found blocks.
"""
try:
if simulate:
logger.debug("Running in simulate mode, no commit will be performed")
logger.setLevel(logging.DEBUG)
# find the oldest un-processed block
block = (Block.query.filter_by(processed=False).
order_by(Block.height).first())
if block is None:
logger.debug("No block found, exiting...")
return
logger.debug("Processing block height {}".format(block.height))
mult = int(current_app.config['last_n'])
# take our standard share count times two for a safe margin. divide
# by 16 to get the number of rows, since a rows minimum share count
# is 16
total_shares = (bits_to_shares(block.bits) * mult)
logger.debug("Looking for up to {} total shares".format(total_shares))
remain = total_shares
start = block.last_share.id
logger.debug("Identified last matching share id as {}".format(start))
user_shares = {}
for share in Share.query.order_by(Share.id.desc()).filter(Share.id <= start).yield_per(100):
user_shares.setdefault(share.user, 0)
if remain > share.shares:
user_shares[share.user] += share.shares
remain -= share.shares
else:
user_shares[share.user] += remain
remain = 0
break
# if we found less than n, use what we found as the total
total_shares -= remain
logger.debug("Found {} shares".format(total_shares))
if simulate:
out = "\n".join(["\t".join((user, str(amount))) for user, amount in user_shares.iteritems()])
logger.debug("Share distribution:\n{}".format(out))
# Calculate the portion going to the miners by truncating the
# fractional portions and giving the remainder to the pool owner
logger.debug("Distribute_amnt: {}".format(block.total_value))
# Below calculates the truncated portion going to each miner. because
# of fractional pieces the total accrued wont equal the disitrubte_amnt
# so we will distribute that round robin the miners in dictionary order
accrued = 0
user_payouts = {}
for user, share_count in user_shares.iteritems():
user_payouts.setdefault(share.user, 0)
user_payouts[user] = (share_count * block.total_value) // total_shares
accrued += user_payouts[user]
logger.debug("Total accrued after trunated iteration {}; {}%"
.format(accrued, (accrued / float(block.total_value)) * 100))
# loop over the dictionary indefinitely until we've distributed
# all the remaining funds
i = 0
while accrued < block.total_value:
for key in user_payouts:
i += 1
user_payouts[key] += 1
accrued += 1
# exit if we've exhausted
if accrued >= block.total_value:
break
logger.debug("Ran round robin distro {} times to finish distrib"
.format(i))
# now handle donation or bonus distribution for each user
donation_total = 0
bonus_total = 0
user_perc_applied = {}
user_perc = {}
default_perc = current_app.config.get('default_perc', 0)
# convert our custom percentages that apply to these users into an
# easy to access dictionary
custom_percs = DonationPercent.query.filter(DonationPercent.user.in_(user_shares.keys()))
custom_percs = {d.user: d.perc for d in custom_percs}
for user, payout in user_payouts.iteritems():
# use the custom perc, or fallback to the default
perc = custom_percs.get(user, default_perc)
user_perc[user] = perc
# if the perc is greater than 0 it's calced as a donation
if perc > 0:
donation = int(ceil((perc / 100.0) * payout))
logger.debug("Donation of\t{}\t({}%)\tcollected from\t{}"
.format(donation / 100000000.0, perc, user))
donation_total += donation
user_payouts[user] -= donation
user_perc_applied[user] = donation
# if less than zero it's a bonus payout
elif perc < 0:
perc *= -1
bonus = int(floor((perc / 100.0) * payout))
logger.debug("Bonus of\t{}\t({}%)\tpaid to\t{}"
.format(bonus / 100000000.0, perc, user))
user_payouts[user] += bonus
bonus_total += bonus
user_perc_applied[user] = -1 * bonus
# percentages of 0 are no-ops
logger.info("Payed out {} in bonus payment"
.format(bonus_total / 100000000.0))
logger.info("Received {} in donation payment"
.format(donation_total / 100000000.0))
logger.info("Net income from block {}"
.format((donation_total - bonus_total) / 100000000.0))
assert accrued == block.total_value
logger.info("Successfully distributed all rewards among {} users."
.format(len(user_payouts)))
# run another safety check
user_sum = sum(user_payouts.values())
assert user_sum == (block.total_value + bonus_total - donation_total)
logger.info("Double check for payout distribution."
" Total user payouts {}, total block value {}."
.format(user_sum, block.total_value))
if simulate:
out = "\n".join(["\t".join((user, str(amount / 100000000.0))) for user, amount in user_payouts.iteritems()])
logger.debug("Payout distribution:\n{}".format(out))
db.session.rollback()
else:
# record the payout for each user
for user, amount in user_payouts.iteritems():
Payout.create(user, amount, block, user_shares[user],
user_perc[user], user_perc_applied.get(user, 0))
# update the block status and collected amounts
block.processed = True
block.donated = donation_total
block.bonus_payed = bonus_total
# record the donations as a bonus payout to the donate address
if donation_total > 0:
donate_address = current_app.config['donate_address']
BonusPayout.create(donate_address, donation_total,
"Total donations from block {}"
.format(block.height))
logger.info("Added bonus payout to donation address {} for {}"
.format(donate_address, donation_total / 100000000.0))
block_bonus = current_app.config.get('block_bonus', 0)
if block_bonus > 0:
BonusPayout.create(block.user, block_bonus,
"Blockfinder bonus for block {}"
.format(block.height))
logger.info("Added bonus payout for blockfinder {} for {}"
.format(block.user, block_bonus / 100000000.0))
db.session.commit()
except Exception as exc:
logger.error("Unhandled exception in payout", exc_info=True)
db.session.rollback()
raise self.retry(exc=exc)
@celery.task(bind=True)
def compress_minute(self):
""" Compresses OneMinute records (for temp, hashrate, shares, rejects) to
FiveMinute """
try:
OneMinuteShare.compress()
OneMinuteReject.compress()
OneMinuteTemperature.compress()
db.session.commit()
except Exception:
logger.error("Unhandled exception in compress_minute", exc_info=True)
db.session.rollback()
@celery.task(bind=True)
def compress_five_minute(self):
try:
FiveMinuteShare.compress()
FiveMinuteReject.compress()
db.session.commit()
except Exception:
logger.error("Unhandled exception in compress_five_minute", exc_info=True)
db.session.rollback()
@celery.task(bind=True)
def general_cleanup(self):
""" Cleans up old database items.
- Event for email rate limiting older than 1 hr.
- Old status messages
"""
try:
now = datetime.datetime.utcnow()
ten_hour_ago = now - datetime.timedelta(hours=12)
one_hour_ago = now - datetime.timedelta(hours=1)
Status.query.filter(Status.time < ten_hour_ago).delete()
Event.query.filter(Event.time < one_hour_ago).delete()
db.session.commit()
except Exception:
logger.error("Unhandled exception in remove_old_statuses", exc_info=True)
db.session.rollback()
@celery.task(bind=True)
def agent_receive(self, address, worker, typ, payload, timestamp):
""" Accepts ppagent data that is forwarded from powerpool and manages
adding it to the database and triggering alerts as needed. """
# convert unix timestamp to datetime
dt = datetime.datetime.utcfromtimestamp(timestamp)
def inject_device_stat(cls, device, value):
if value:
stat = cls(user=address, worker=worker, device=device, value=value,
time=dt)
db.session.merge(stat)
try:
# if they passed a threshold we should update the database object
if typ == "thresholds":
try:
if not payload:
# if they didn't list valid email key we want to remove
Threshold.query.filter_by(worker=worker, user=address).delete()
else:
thresh = Threshold(
worker=worker,
user=address,
green_notif=not payload.get('no_green_notif', False),
temp_thresh=payload.get('overheat'),
hashrate_thresh=payload.get('lowhashrate'),
offline_thresh=payload.get('offline'),
emails=payload['emails'][:4])
db.session.merge(thresh)
except KeyError:
# assume they're trying to remove the thresholds...
Threshold.query.filter_by(worker=worker, user=address).delete()
logger.warn("Bad payload was sent as Threshold data: {}"
.format(payload))
db.session.commit()
return
elif typ == 'status':
ret = (db.session.query(Status).filter_by(user=address, worker=worker).
update({"status": json.dumps(payload), "time": dt}))
# if the update affected nothing
if ret == 0:
new = Status(user=address, worker=worker,
status=json.dumps(payload), time=dt)
db.session.add(new)
db.session.commit()
return
# the two status messages can trigger a threshold condition, so we need
# to load the threshold to check
thresh = Threshold.query.filter_by(worker=worker, user=address).first()
if typ == 'temp':
# track the overheated cards
overheat_cards = []
temps = []
for i, value in enumerate(payload):
inject_device_stat(OneMinuteTemperature, i, value)
# report over temperature
if thresh and value >= thresh.temp_thresh:
overheat_cards.append(str(i))
temps.append(str(value))
if overheat_cards and not thresh.temp_err:
s = "s" if len(overheat_cards) else ""
thresh.report_condition(
"Worker {}, overheat on card{s} {}, temp{s} {}"
.format(worker, ', '.join(overheat_cards), ', '.join(temps),
s=s),
'temp_err', True)
elif not overheat_cards and thresh and thresh.temp_err:
thresh.report_condition(
"Worker {} overheat condition relieved".format(worker),
'temp_err', False)
elif typ == 'hashrate':
for i, value in enumerate(payload):
# multiply by a million to turn megahashes to hashes
inject_device_stat(OneMinuteHashrate, i, value * 1000000)
# do threshold checking if they have one set
if thresh:
hr = sum(payload) * 1000
if int(hr) == 0:
current_app.logger.warn("Entry with 0 hashrate. Worker {}; User {}".format(worker, address))
else:
low_hash = thresh and hr <= thresh.hashrate_thresh
if low_hash and not thresh.hashrate_err:
thresh.report_condition(
"Worker {} low hashrate condition, hashrate {} KH/s"
.format(worker, hr), 'hashrate_err', True)
elif not low_hash and thresh.hashrate_err:
thresh.report_condition(
"Worker {} low hashrate condition resolved, hashrate {} KH/s"
.format(worker, hr), 'hashrate_err', False)
else:
logger.warning("Powerpool sent an unkown agent message of type {}"
.format(typ))
db.session.commit()
except Exception:
logger.error("Unhandled exception in update_status", exc_info=True)
db.session.rollback()
@celery.task(bind=True)
def check_down(self):
"""
Checks for latest OneMinuteShare from users that have a Threshold defined
for their downtime.
"""
try:
for thresh in Threshold.query.filter(Threshold.offline_thresh != None):
last = Status.query.filter_by(worker=thresh.worker, user=thresh.user).first()
if not last:
continue
diff = int((datetime.datetime.utcnow() - last.time).total_seconds() / 60)
if not thresh.offline_err and diff > thresh.offline_thresh:
thresh.report_condition("Worker {} offline for {} minutes"
.format(thresh.worker, diff),
'offline_err',
True)
# if there's an error registered and it's not showing offline
elif thresh.offline_err and diff <= thresh.offline_thresh:
thresh.report_condition("Worker {} now back online"
.format(thresh.worker),
'offline_err',
False)
db.session.commit()
except Exception:
logger.error("Unhandled exception in check_down", exc_info=True)
db.session.rollback()
@celery.task(bind=True)
def server_status(self):
"""
Periodicly poll the backend to get number of workers and throw it in the cache
"""
try:
total_workers = 0
for i, pp_config in enumerate(current_app.config['monitor_addrs']):
mon_addr = pp_config['mon_address']
try:
req = requests.get(mon_addr)
data = req.json()
except Exception:
logger.warn("Couldn't connect to internal monitor at {}"
.format(mon_addr))
continue
else:
cache.set('stratum_workers_' + str(i), data['stratum_clients'])
total_workers += data['stratum_clients']
cache.set('total_workers', total_workers)
except Exception:
logger.error("Unhandled exception in server_status", exc_info=True)
db.session.rollback()
@celery.task(bind=True)
def difficulty_avg(self):
"""
Setup a blob with the average network difficulty for the last 500 blocks
"""
try:
diff_list = cache.cache._client.lrange('block_cache', 0, 500)
total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
cache.set('difficulty_avg', total_diffs / len(diff_list))
except Exception as exc:
logger.warn("Unknown failure in difficulty_avg", exc_info=True)
raise self.retry(exc=exc)