Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add randomly generated slug to container names to prevent collisions #6140

Merged
merged 2 commits into from Sep 19, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions compose/const.py
Expand Up @@ -15,6 +15,7 @@
LABEL_SERVICE = 'com.docker.compose.service'
LABEL_NETWORK = 'com.docker.compose.network'
LABEL_VERSION = 'com.docker.compose.version'
LABEL_SLUG = 'com.docker.compose.slug'
LABEL_VOLUME = 'com.docker.compose.volume'
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
NANOCPUS_SCALE = 1000000000
Expand Down
12 changes: 11 additions & 1 deletion compose/container.py
Expand Up @@ -9,7 +9,9 @@
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .const import LABEL_SLUG
from .const import LABEL_VERSION
from .utils import truncate_id
from .version import ComposeVersion


Expand Down Expand Up @@ -80,7 +82,7 @@ def service(self):
@property
def name_without_project(self):
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
return '{0}_{1}'.format(self.service, self.number)
return '{0}_{1}{2}'.format(self.service, self.number, '_' + self.slug if self.slug else '')
else:
return self.name

Expand All @@ -92,6 +94,14 @@ def number(self):
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)

@property
def slug(self):
return truncate_id(self.full_slug)

@property
def full_slug(self):
return self.labels.get(LABEL_SLUG)

@property
def ports(self):
self.inspect_if_not_inspected()
Expand Down
22 changes: 0 additions & 22 deletions compose/project.py
Expand Up @@ -31,7 +31,6 @@
from .service import NetworkMode
from .service import PidMode
from .service import Service
from .service import ServiceName
from .service import ServiceNetworkMode
from .service import ServicePidMode
from .utils import microseconds_from_time_nano
Expand Down Expand Up @@ -198,25 +197,6 @@ def get_services_without_duplicate(self, service_names=None, include_deps=False)
service.remove_duplicate_containers()
return services

def get_scaled_services(self, services, scale_override):
"""
Returns a list of this project's services as scaled ServiceName objects.

services: a list of Service objects
scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
"""
service_names = []
for service in services:
if service.name in scale_override:
scale = scale_override[service.name]
else:
scale = service.scale_num

for i in range(1, scale + 1):
service_names.append(ServiceName(self.name, service.name, i))

return service_names

def get_links(self, service_dict):
links = []
if 'links' in service_dict:
Expand Down Expand Up @@ -494,7 +474,6 @@ def up(self,
svc.ensure_image_exists(do_build=do_build, silent=silent)
plans = self._get_convergence_plans(
services, strategy, always_recreate_deps=always_recreate_deps)
scaled_services = self.get_scaled_services(services, scale_override)

def do(service):

Expand All @@ -505,7 +484,6 @@ def do(service):
scale_override=scale_override.get(service.name),
rescale=rescale,
start=start,
project_services=scaled_services,
reset_container_image=reset_container_image,
renew_anonymous_volumes=renew_anonymous_volumes,
)
Expand Down
78 changes: 45 additions & 33 deletions compose/service.py
Expand Up @@ -40,6 +40,7 @@
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .const import LABEL_SLUG
from .const import LABEL_VERSION
from .const import NANOCPUS_SCALE
from .container import Container
Expand All @@ -49,9 +50,11 @@
from .parallel import parallel_execute
from .progress_stream import stream_output
from .progress_stream import StreamOutputError
from .utils import generate_random_id
from .utils import json_hash
from .utils import parse_bytes
from .utils import parse_seconds_float
from .utils import truncate_id


log = logging.getLogger(__name__)
Expand Down Expand Up @@ -122,7 +125,7 @@ class NoSuchImageError(Exception):
pass


ServiceName = namedtuple('ServiceName', 'project service number')
ServiceName = namedtuple('ServiceName', 'project service number slug')


ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
Expand Down Expand Up @@ -219,7 +222,6 @@ def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""

for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
return container

Expand Down Expand Up @@ -425,27 +427,33 @@ def _containers_have_diverged(self, containers):

return has_diverged

def _execute_convergence_create(self, scale, detached, start, project_services=None):
i = self._next_container_number()
def _execute_convergence_create(self, scale, detached, start):

def create_and_start(service, n):
container = service.create_container(number=n, quiet=True)
if not detached:
container.attach_log_stream()
if start:
self.start_container(container)
return container
i = self._next_container_number()

containers, errors = parallel_execute(
[ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
lambda service_name: create_and_start(self, service_name.number),
lambda service_name: self.get_container_name(service_name.service, service_name.number),
"Creating"
)
for error in errors.values():
raise OperationFailedError(error)
def create_and_start(service, n):
container = service.create_container(number=n, quiet=True)
if not detached:
container.attach_log_stream()
if start:
self.start_container(container)
return container

return containers
containers, errors = parallel_execute(
[
ServiceName(self.project, self.name, index, generate_random_id())
for index in range(i, i + scale)
],
lambda service_name: create_and_start(self, service_name.number),
lambda service_name: self.get_container_name(
service_name.service, service_name.number, service_name.slug
),
"Creating"
)
for error in errors.values():
raise OperationFailedError(error)

return containers

def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
renew_anonymous_volumes):
Expand Down Expand Up @@ -508,8 +516,8 @@ def stop_and_remove(container):

def execute_convergence_plan(self, plan, timeout=None, detached=False,
start=True, scale_override=None,
rescale=True, project_services=None,
reset_container_image=False, renew_anonymous_volumes=False):
rescale=True, reset_container_image=False,
renew_anonymous_volumes=False):
(action, containers) = plan
scale = scale_override if scale_override is not None else self.scale_num
containers = sorted(containers, key=attrgetter('number'))
Expand All @@ -518,7 +526,7 @@ def execute_convergence_plan(self, plan, timeout=None, detached=False,

if action == 'create':
return self._execute_convergence_create(
scale, detached, start, project_services
scale, detached, start
)

# The create action needs always needs an initial scale, but otherwise,
Expand Down Expand Up @@ -568,7 +576,7 @@ def recreate_container(self, container, timeout=None, attach_logs=False, start_n
container.rename_to_tmp_name()
new_container = self.create_container(
previous_container=container if not renew_anonymous_volumes else None,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
number=container.number,
quiet=True,
)
if attach_logs:
Expand Down Expand Up @@ -723,8 +731,6 @@ def get_link_names(self):
def get_volumes_from_names(self):
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]

# TODO: this would benefit from github.com/docker/docker/pull/14699
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
containers = itertools.chain(
self._fetch_containers(
Expand Down Expand Up @@ -813,6 +819,7 @@ def _get_container_create_options(
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
slug = generate_random_id() if previous_container is None else previous_container.full_slug

container_options = dict(
(k, self.options[k])
Expand All @@ -821,7 +828,7 @@ def _get_container_create_options(
container_options.update(override_options)

if not container_options.get('name'):
container_options['name'] = self.get_container_name(self.name, number, one_off)
container_options['name'] = self.get_container_name(self.name, number, slug, one_off)

container_options.setdefault('detach', True)

Expand Down Expand Up @@ -873,7 +880,9 @@ def _get_container_create_options(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number,
self.config_hash if add_config_hash else None)
self.config_hash if add_config_hash else None,
slug
)

# Delete options which are only used in HostConfig
for key in HOST_CONFIG_KEYS:
Expand Down Expand Up @@ -1111,12 +1120,12 @@ def labels(self, one_off=False, legacy=False):
def custom_container_name(self):
return self.options.get('container_name')

def get_container_name(self, service_name, number, one_off=False):
def get_container_name(self, service_name, number, slug, one_off=False):
if self.custom_container_name and not one_off:
return self.custom_container_name

container_name = build_container_name(
self.project, service_name, number, one_off,
self.project, service_name, number, slug, one_off,
)
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
if container_name in ext_links_origins:
Expand Down Expand Up @@ -1373,11 +1382,13 @@ def mode(self):
# Names


def build_container_name(project, service, number, one_off=False):
def build_container_name(project, service, number, slug, one_off=False):
bits = [project.lstrip('-_'), service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
return '_'.join(
bits + ([str(number), truncate_id(slug)] if slug else [str(number)])
)


# Images
Expand Down Expand Up @@ -1558,10 +1569,11 @@ def build_mount(mount_spec):
# Labels


def build_container_labels(label_options, service_labels, number, config_hash):
def build_container_labels(label_options, service_labels, number, config_hash, slug):
labels = dict(label_options or {})
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_SLUG] = slug
labels[LABEL_VERSION] = __version__

if config_hash:
Expand Down
19 changes: 19 additions & 0 deletions compose/utils.py
Expand Up @@ -7,6 +7,7 @@
import json.decoder
import logging
import ntpath
import random

import six
from docker.errors import DockerException
Expand Down Expand Up @@ -151,3 +152,21 @@ def unquote_path(s):
if s[0] == '"' and s[-1] == '"':
return s[1:-1]
return s


def generate_random_id():
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we use such a complicated algorithm to generate a random identifier? And is there any specific reason we don't want a fully numerical ID?

Otherwise uuid.uuid4().hex sounds good to generate a hexadecimal identifier, no?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The code is a transposition of https://github.com/moby/moby/blob/master/pkg/stringid/stringid.go ; we could go with a UUID instead.

while True:
val = hex(random.getrandbits(32 * 8))[2:-1]
try:
int(truncate_id(val))
continue
except ValueError:
return val


def truncate_id(value):
if ':' in value:
value = value[value.index(':') + 1:]
if len(value) > 12:
return value[:12]
return value
1 change: 0 additions & 1 deletion script/test/versions.py
Expand Up @@ -50,7 +50,6 @@ def parse(cls, version):
stage = None
elif '-' in stage:
edition, stage = stage.split('-')

major, minor, patch = version.split('.', 3)
return cls(major, minor, patch, stage, edition)

Expand Down