Skip to content

Commit

Permalink
Merge pull request #45 from Koed00/dev
Browse files Browse the repository at this point in the history
Adds `qinfo` management command
  • Loading branch information
Koed00 committed Aug 12, 2015
2 parents d8fd93c + 2043772 commit 1017757
Show file tree
Hide file tree
Showing 11 changed files with 170 additions and 17 deletions.
2 changes: 1 addition & 1 deletion django_q/__init__.py
Expand Up @@ -9,6 +9,6 @@
from .cluster import Cluster
from .monitor import Stat

VERSION = (0, 5, 0)
VERSION = (0, 5, 1)

default_app_config = 'django_q.apps.DjangoQConfig'
12 changes: 12 additions & 0 deletions django_q/management/commands/qinfo.py
@@ -0,0 +1,12 @@
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _

from django_q.monitor import info


class Command(BaseCommand):
# Translators: help text for qinfo management command
help = _('General information over all clusters.')

def handle(self, *args, **options):
info()
1 change: 0 additions & 1 deletion django_q/management/commands/qmonitor.py
@@ -1,4 +1,3 @@
# Django
from optparse import make_option

from django.core.management.base import BaseCommand
Expand Down
108 changes: 98 additions & 10 deletions django_q/monitor.py
@@ -1,26 +1,24 @@
from datetime import timedelta
import socket

# external
from blessed import Terminal

# django
from django.db import connection
from django.db.models import Sum, F
from django.utils import timezone
from django.utils.translation import ugettext as _

# local
import signing
from django_q.conf import Conf, redis_client, logger
from django_q import models


def monitor(run_once=False):
def monitor(run_once=False, r=redis_client):
term = Terminal()
r = redis_client
try:
redis_client.ping()
except Exception as e:
print(term.red('Can not connect to Redis server.'))
logger.exception(e)
raise e
ping_redis(r)
with term.fullscreen(), term.hidden_cursor(), term.cbreak():
val = None
start_width = int(term.width / 8)
Expand Down Expand Up @@ -88,7 +86,6 @@ def monitor(run_once=False):


class Status(object):

"""Cluster status base class."""

def __init__(self, pid):
Expand All @@ -107,7 +104,6 @@ def __init__(self, pid):


class Stat(Status):

"""Status object for Cluster monitoring."""

def __init__(self, sentinel):
Expand Down Expand Up @@ -194,3 +190,95 @@ def __getstate__(self):
state = dict(self.__dict__)
del state['r']
return state


def info(r=redis_client):
term = Terminal()
ping_redis(r)
stat = Stat.get_all(r)
# general stats
clusters = len(stat)
workers = 0
reincarnations = 0
for cluster in stat:
workers += len(cluster.workers)
reincarnations += cluster.reincarnations
# calculate tasks pm and avg exec time
tasks_per = 0
per = _('day')
exec_time = 0
last_tasks = models.Success.objects.filter(stopped__gte=timezone.now() - timedelta(hours=24))
tasks_per_day = last_tasks.count()
if tasks_per_day > 0:
# average execution time over the last 24 hours
if not connection.vendor == 'sqlite':
exec_time = last_tasks.aggregate(time_taken=Sum(F('stopped') - F('started')))
exec_time = exec_time['time_taken'].total_seconds() / tasks_per_day
else:
# can't sum timedeltas on sqlite
for t in last_tasks:
exec_time += t.time_taken()
exec_time = exec_time / tasks_per_day
# tasks per second/minute/hour/day in the last 24 hours
if tasks_per_day > 24 * 60 * 60:
tasks_per = tasks_per_day / (24 * 60 * 60)
per = _('second')
elif tasks_per_day > 24 * 60:
tasks_per = tasks_per_day / (24 * 60)
per = _('minute')
elif tasks_per_day > 24:
tasks_per = tasks_per_day / 24
per = _('hour')
else:
tasks_per = tasks_per_day
# print to terminal
term.clear_eos()
col_width = int(term.width / 6)
print(term.black_on_green(term.center(_('-- {} summary --').format(Conf.PREFIX))))
print(term.cyan(_('Clusters')) +
term.move_x(1 * col_width) +
term.white(str(clusters)) +
term.move_x(2 * col_width) +
term.cyan(_('Workers')) +
term.move_x(3 * col_width) +
term.white(str(workers)) +
term.move_x(4 * col_width) +
term.cyan(_('Restarts')) +
term.move_x(5 * col_width) +
term.white(str(reincarnations))
)
print(term.cyan(_('Queued')) +
term.move_x(1 * col_width) +
term.white(str(r.llen(Conf.Q_LIST))) +
term.move_x(2 * col_width) +
term.cyan(_('Successes')) +
term.move_x(3 * col_width) +
term.white(str(models.Success.objects.count())) +
term.move_x(4 * col_width) +
term.cyan(_('Failures')) +
term.move_x(5 * col_width) +
term.white(str(models.Failure.objects.count()))
)
print(term.cyan(_('Schedules')) +
term.move_x(1 * col_width) +
term.white(str(models.Schedule.objects.count())) +
term.move_x(2 * col_width) +
term.cyan(_('Tasks/{}'.format(per))) +
term.move_x(3 * col_width) +
term.white('{0:.2f}'.format(tasks_per)) +
term.move_x(4 * col_width) +
term.cyan(_('Avg time')) +
term.move_x(5 * col_width) +
term.white('{0:.4f}'.format(exec_time))
)
return True


def ping_redis(r):
try:
r.ping()
except Exception as e:
term = Terminal()
print(term.red('Can not connect to Redis server.'))
logger.exception(e)
raise e
6 changes: 6 additions & 0 deletions django_q/tests/test_commands.py
Expand Up @@ -7,5 +7,11 @@ def test_qcluster():
call_command('qcluster', run_once=True)


@pytest.mark.django_db
def test_qmonitor():
call_command('qmonitor', run_once=True)


@pytest.mark.django_db
def test_qinfo():
call_command('qinfo')
28 changes: 27 additions & 1 deletion django_q/tests/test_monitor.py
@@ -1,7 +1,12 @@
import pytest
import redis

from django_q import async
from django_q.cluster import Cluster
from django_q.monitor import monitor, Stat
from django_q.monitor import monitor, Stat, ping_redis, info


@pytest.mark.django_db
def test_monitor():
assert Stat.get(0).sentinel == 0
c = Cluster()
Expand All @@ -17,3 +22,24 @@ def test_monitor():
assert stat.empty_queues() is True
break
assert found_c is True


@pytest.mark.django_db
def test_info():
info()
do_sync()
info()
for _ in range(24):
do_sync()
info()


def do_sync():
async('django_q.tests.tasks.countdown', 1, sync=True, save=True)


@pytest.mark.django_db
def test_ping_redis():
r = redis.StrictRedis(port=6388)
with pytest.raises(Exception):
ping_redis(r)
6 changes: 6 additions & 0 deletions django_q/tests/test_scheduler.py
Expand Up @@ -90,6 +90,12 @@ def test_scheduler(r):
assert schedule is not None
assert schedule.last_run() is None
scheduler(list_key=list_key)
# via model
Schedule.objects.create(func='django_q.tests.tasks.word_multiply',
args='2',
kwargs='word="django"',
schedule_type=Schedule.DAILY
)
scheduler(list_key=list_key)
# ONCE schedule should be deleted
assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
Expand Down
Binary file added docs/_static/info.png
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion docs/conf.py
Expand Up @@ -72,7 +72,7 @@
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5.0'
release = '0.5.1'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
20 changes: 18 additions & 2 deletions docs/monitor.rst
Expand Up @@ -2,7 +2,7 @@ Monitor
=======
.. py:currentmodule::django_q.monitor
The cluster monitor shows information about all the Q clusters connected to your project.
The cluster monitor shows live information about all the Q clusters connected to your project.

Start the monitor with Django's `manage.py` command::

Expand Down Expand Up @@ -71,11 +71,27 @@ Up

.. centered:: Press `q` to quit the monitor and return to your terminal.

Info
----

If you just want to see a one-off summary of your cluster stats you can use the `qinfo` management command::

$ python manage.py qinfo


.. image:: _static/info.png

All stats are summed over all available clusters.

Task rate is calculated over the last 24 hours and will show the number of tasks per second, minute, hour or day depending on the amount.
Average execution time (`Avg time`) is calculated in seconds over the last 24 hours.

Since some of these numbers are based on what is available in your tasks database, limiting or disabling the result backend will skew them.

Status
------

You can check the status of your clusters straight from your code with :class:`Stat`:
You can check the status of your clusters straight from your code with the :class:`Stat` class:

.. code:: python
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Expand Up @@ -26,7 +26,7 @@ def run(self):

setup(
name='django-q',
version='0.5.0',
version='0.5.1',
author='Ilan Steemers',
author_email='koed00@gmail.com',
keywords='django task queue worker redis multiprocessing',
Expand Down

0 comments on commit 1017757

Please sign in to comment.