Update nodepool config syntax

This implements the changes described in:

http://lists.openstack.org/pipermail/openstack-infra/2017-January/005018.html

It also removes some, but not all, extraneous keys from test config files.

Change-Id: Iebc941b4505d6ad46c882799b6230eb23545e5c0
This commit is contained in:
James E. Blair 2017-03-22 15:53:54 -07:00
parent c5c5be30f9
commit dcc3b5e071
38 changed files with 665 additions and 659 deletions

View File

@ -172,30 +172,15 @@ cron:
labels:
- name: centos-7
image: centos-7
min-ready: 1
providers:
- name: devstack
- name: fedora-25
image: fedora-25
min-ready: 1
providers:
- name: devstack
- name: ubuntu-precise
image: ubuntu-precise
min-ready: 1
providers:
- name: devstack
- name: ubuntu-trusty
image: ubuntu-trusty
min-ready: 1
providers:
- name: devstack
- name: ubuntu-xenial
image: ubuntu-xenial
min-ready: 1
providers:
- name: devstack
providers:
- name: devstack
@ -205,29 +190,42 @@ providers:
# Long boot timeout to deal with potentially nested virt.
boot-timeout: 600
launch-timeout: 900
max-servers: 5
rate: 0.25
images:
diskimages:
- name: centos-7
min-ram: 1024
name-filter: 'nodepool'
config-drive: true
- name: fedora-25
min-ram: 1024
name-filter: 'nodepool'
config-drive: true
- name: ubuntu-precise
min-ram: 512
name-filter: 'nodepool'
config-drive: true
- name: ubuntu-trusty
min-ram: 512
name-filter: 'nodepool'
config-drive: true
- name: ubuntu-xenial
min-ram: 512
name-filter: 'nodepool'
config-drive: true
pools:
- name: main
max-servers: 5
labels:
- name: centos-7
diskimage: centos-7
min-ram: 1024
name-filter: 'nodepool'
- name: fedora-25
diskimage: fedora-25
min-ram: 1024
name-filter: 'nodepool'
- name: ubuntu-precise
diskimage: ubuntu-precise
min-ram: 512
name-filter: 'nodepool'
- name: ubuntu-trusty
diskimage: ubuntu-trusty
min-ram: 512
name-filter: 'nodepool'
- name: ubuntu-xenial
diskimage: ubuntu-xenial
min-ram: 512
name-filter: 'nodepool'
diskimages:
- name: centos-7

View File

@ -271,7 +271,7 @@ class CleanupWorker(BaseWorker):
self._deleteUpload(upload)
def _cleanupObsoleteProviderUploads(self, provider, image, build_id):
image_names_for_provider = provider.images.keys()
image_names_for_provider = provider.diskimages.keys()
if image in image_names_for_provider:
# This image is in use for this provider
return
@ -849,7 +849,7 @@ class UploadWorker(BaseWorker):
(build_id, filename, provider.name))
manager = self._config.provider_managers[provider.name]
provider_image = provider.images.get(image_name)
provider_image = provider.diskimages.get(image_name)
if provider_image is None:
raise exceptions.BuilderInvalidCommandError(
"Could not find matching provider image for %s" % image_name
@ -899,7 +899,7 @@ class UploadWorker(BaseWorker):
to providers, do the upload if they are available on the local disk.
'''
for provider in self._config.providers.values():
for image in provider.images.values():
for image in provider.diskimages.values():
uploaded = False
# Check if we've been told to shutdown
@ -931,7 +931,7 @@ class UploadWorker(BaseWorker):
:returns: True if an upload was attempted, False otherwise.
'''
# Check if image uploads are paused.
if provider.images.get(image.name).pause:
if provider.diskimages.get(image.name).pause:
return False
# Search for the most recent 'ready' image build

View File

@ -29,16 +29,6 @@ class ConfigValidator:
'cleanup': str,
}
images = {
'name': str,
'pause': bool,
'min-ram': int,
'name-filter': str,
'diskimage': str,
'meta': dict,
'config-drive': bool,
}
old_network = {
'net-id': str,
'net-label': str,
@ -49,38 +39,53 @@ class ConfigValidator:
'public': bool, # Ignored, but kept for backwards compat
}
providers = {
pool_label = {
v.Required('name'): str,
v.Required('diskimage'): str,
'min-ram': int,
'name-filter': str,
}
pool = {
'name': str,
'networks': [v.Any(old_network, network)],
'max-servers': int,
'labels': [pool_label],
'availability-zones': [str],
}
provider_diskimage = {
'name': str,
'pause': bool,
'meta': dict,
'config-drive': bool,
}
provider = {
'name': str,
'region-name': str,
'availability-zones': [str],
'cloud': str,
'max-servers': int,
'max-concurrency': int,
'pool': str, # Ignored, but kept for backwards compat
'image-type': str,
'networks': [v.Any(old_network, network)],
'ipv6-preferred': bool,
'boot-timeout': int,
'api-timeout': int,
'launch-timeout': int,
'launch-retries': int,
'rate': float,
'images': [images],
'hostname-format': str,
'image-name-format': str,
'clean-floating-ips': bool,
'pools': [pool],
'diskimages': [provider_diskimage],
}
labels = {
label = {
'name': str,
'image': str,
'min-ready': int,
'providers': [{
'name': str,
}],
}
diskimages = {
diskimage = {
'name': str,
'pause': bool,
'elements': [str],
@ -99,9 +104,9 @@ class ConfigValidator:
'chroot': str,
}],
'cron': cron,
'providers': [providers],
'labels': [labels],
'diskimages': [diskimages],
'providers': [provider],
'labels': [label],
'diskimages': [diskimage],
}
log.info("validating %s" % self.config_file)
@ -110,12 +115,3 @@ class ConfigValidator:
# validate the overall schema
schema = v.Schema(top_level)
schema(config)
# labels must list valid providers
all_providers = [p['name'] for p in config['providers']]
for label in config['labels']:
for provider in label['providers']:
if not provider['name'] in all_providers:
raise AssertionError('label %s requests '
'non-existent provider %s'
% (label['name'], provider['name']))

View File

@ -221,7 +221,7 @@ class NodePoolCmd(NodepoolApp):
alien_ids = []
uploads = []
for image in provider.images:
for image in provider.diskimages:
# Build list of provider images as recorded in ZK
for bnum in self.zk.getBuildNumbers(image):
uploads.extend(

View File

@ -40,31 +40,18 @@ class Config(ConfigValue):
class Provider(ConfigValue):
def __eq__(self, other):
if (other.cloud_config != self.cloud_config or
other.max_servers != self.max_servers or
other.pool != self.pool or
other.pools != self.pools or
other.image_type != self.image_type or
other.rate != self.rate or
other.api_timeout != self.api_timeout or
other.boot_timeout != self.boot_timeout or
other.launch_timeout != self.launch_timeout or
other.networks != self.networks or
other.ipv6_preferred != self.ipv6_preferred or
other.clean_floating_ips != self.clean_floating_ips or
other.max_concurrency != self.max_concurrency or
other.azs != self.azs):
other.diskimages != self.diskimages):
return False
new_images = other.images
old_images = self.images
# Check if images have been added or removed
if set(new_images.keys()) != set(old_images.keys()):
return False
# check if existing images have been updated
for k in new_images:
if (new_images[k].min_ram != old_images[k].min_ram or
new_images[k].name_filter != old_images[k].name_filter or
new_images[k].meta != old_images[k].meta or
new_images[k].config_drive != old_images[k].config_drive):
return False
return True
def __ne__(self, other):
@ -74,9 +61,25 @@ class Provider(ConfigValue):
return "<Provider %s>" % self.name
class ProviderImage(ConfigValue):
class ProviderPool(ConfigValue):
def __eq__(self, other):
if (other.labels != self.labels or
other.max_servers != self.max_servers or
other.azs != self.azs or
other.networks != self.networks):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<ProviderImage %s>" % self.name
return "<ProviderPool %s>" % self.name
class ProviderDiskImage(ConfigValue):
def __repr__(self):
return "<ProviderDiskImage %s>" % self.name
class Label(ConfigValue):
@ -84,9 +87,19 @@ class Label(ConfigValue):
return "<Label %s>" % self.name
class LabelProvider(ConfigValue):
class ProviderLabel(ConfigValue):
def __eq__(self, other):
if (other.diskimage != self.diskimage or
other.min_ram != self.min_ram or
other.name_filter != self.name_filter):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<LabelProvider %s>" % self.name
return "<ProviderLabel %s>" % self.name
class Cron(ConfigValue):
@ -95,6 +108,20 @@ class Cron(ConfigValue):
class DiskImage(ConfigValue):
def __eq__(self, other):
if (other.name != self.name or
other.elements != self.elements or
other.release != self.release or
other.rebuild_age != self.rebuild_age or
other.env_vars != self.env_vars or
other.image_types != self.image_types or
other.pause != self.pause):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<DiskImage %s>" % self.name
@ -154,6 +181,34 @@ def loadConfig(config_path):
name = z.host + '_' + str(z.port)
newconfig.zookeeper_servers[name] = z
for diskimage in config.get('diskimages', []):
d = DiskImage()
d.name = diskimage['name']
newconfig.diskimages[d.name] = d
if 'elements' in diskimage:
d.elements = u' '.join(diskimage['elements'])
else:
d.elements = ''
# must be a string, as it's passed as env-var to
# d-i-b, but might be untyped in the yaml and
# interpreted as a number (e.g. "21" for fedora)
d.release = str(diskimage.get('release', ''))
d.rebuild_age = int(diskimage.get('rebuild-age', 86400))
d.env_vars = diskimage.get('env-vars', {})
if not isinstance(d.env_vars, dict):
#self.log.error("%s: ignoring env-vars; "
# "should be a dict" % d.name)
d.env_vars = {}
d.image_types = set(diskimage.get('formats', []))
d.pause = bool(diskimage.get('pause', False))
for label in config.get('labels', []):
l = Label()
l.name = label['name']
newconfig.labels[l.name] = l
l.min_ready = label.get('min-ready', 2)
l.pools = []
for provider in config.get('providers', []):
p = Provider()
p.name = provider['name']
@ -162,30 +217,14 @@ def loadConfig(config_path):
cloud_kwargs = _cloudKwargsFromProvider(provider)
p.cloud_config = _get_one_cloud(cloud_config, cloud_kwargs)
p.region_name = provider.get('region-name')
p.max_servers = provider['max-servers']
p.max_concurrency = provider.get('max-concurrency', -1)
p.pool = provider.get('pool', None)
p.rate = provider.get('rate', 1.0)
p.api_timeout = provider.get('api-timeout')
p.boot_timeout = provider.get('boot-timeout', 60)
p.launch_timeout = provider.get('launch-timeout', 3600)
p.launch_retries = provider.get('launch-retries', 3)
p.networks = []
for network in provider.get('networks', []):
n = Network()
p.networks.append(n)
if 'net-id' in network:
n.id = network['net-id']
n.name = None
elif 'net-label' in network:
n.name = network['net-label']
n.id = None
else:
n.name = network.get('name')
n.id = None
p.ipv6_preferred = provider.get('ipv6-preferred')
p.clean_floating_ips = provider.get('clean-floating-ips')
p.azs = provider.get('availability-zones')
p.hostname_format = provider.get(
'hostname-format',
'{label.name}-{provider.name}-{node.id}'
@ -196,13 +235,15 @@ def loadConfig(config_path):
)
p.image_type = provider.get(
'image-type', p.cloud_config.config['image_format'])
p.images = {}
for image in provider['images']:
i = ProviderImage()
p.diskimages = {}
for image in provider.get('diskimages', []):
i = ProviderDiskImage()
i.name = image['name']
p.images[i.name] = i
i.min_ram = image['min-ram']
i.name_filter = image.get('name-filter', None)
p.diskimages[i.name] = i
diskimage = newconfig.diskimages[i.name]
diskimage.image_types.add(p.image_type)
#i.min_ram = image['min-ram']
#i.name_filter = image.get('name-filter', None)
i.pause = bool(image.get('pause', False))
i.config_drive = image.get('config-drive', None)
@ -219,45 +260,39 @@ def loadConfig(config_path):
#self.log.error("Invalid metadata for %s; ignored"
# % i.name)
i.meta = {}
p.pools = {}
for pool in provider.get('pools', []):
pp = ProviderPool()
pp.name = pool['name']
pp.provider = p
p.pools[pp.name] = pp
pp.max_servers = pool['max-servers']
pp.azs = pool.get('availability-zones')
pp.networks = []
for network in pool.get('networks', []):
n = Network()
pp.networks.append(n)
if 'net-id' in network:
n.id = network['net-id']
n.name = None
elif 'net-label' in network:
n.name = network['net-label']
n.id = None
else:
n.name = network.get('name')
n.id = None
pp.labels = {}
for label in pool.get('labels', []):
pl = ProviderLabel()
pl.name = label['name']
pl.pool = pp
pp.labels[pl.name] = pl
pl.diskimage = newconfig.diskimages[label['diskimage']]
pl.min_ram = label['min-ram']
pl.name_filter = label.get('name-filter', None)
if 'diskimages' in config:
for diskimage in config['diskimages']:
d = DiskImage()
d.name = diskimage['name']
newconfig.diskimages[d.name] = d
if 'elements' in diskimage:
d.elements = u' '.join(diskimage['elements'])
else:
d.elements = ''
# must be a string, as it's passed as env-var to
# d-i-b, but might be untyped in the yaml and
# interpreted as a number (e.g. "21" for fedora)
d.release = str(diskimage.get('release', ''))
d.rebuild_age = int(diskimage.get('rebuild-age', 86400))
d.env_vars = diskimage.get('env-vars', {})
if not isinstance(d.env_vars, dict):
#self.log.error("%s: ignoring env-vars; "
# "should be a dict" % d.name)
d.env_vars = {}
d.image_types = set(diskimage.get('formats', []))
d.pause = bool(diskimage.get('pause', False))
# Do this after providers to build the image-types
for provider in newconfig.providers.values():
for image in provider.images.values():
diskimage = newconfig.diskimages[image.name]
diskimage.image_types.add(provider.image_type)
for label in config.get('labels', []):
l = Label()
l.name = label['name']
newconfig.labels[l.name] = l
l.image = label['image']
l.min_ready = label.get('min-ready', 2)
l.providers = {}
for provider in label['providers']:
p = LabelProvider()
p.name = provider['name']
l.providers[p.name] = p
top_label = newconfig.labels[pl.name]
top_label.pools.append(pp)
return newconfig

View File

@ -65,7 +65,8 @@ class Dummy(object):
def fake_get_one_cloud(cloud_config, cloud_kwargs):
cloud_kwargs['validate'] = False
cloud_kwargs['image_format'] = 'qcow2'
if 'image_format' not in cloud_kwargs:
cloud_kwargs['image_format'] = 'qcow2'
return cloud_config.get_one_cloud(**cloud_kwargs)

View File

@ -229,14 +229,13 @@ class InstanceDeleter(threading.Thread, StatsReporter):
class NodeLauncher(threading.Thread, StatsReporter):
def __init__(self, zk, provider, label, provider_manager, requestor,
def __init__(self, zk, provider_label, provider_manager, requestor,
node, retries):
'''
Initialize the launcher.
:param ZooKeeper zk: A ZooKeeper object.
:param Provider provider: A config Provider object.
:param Label label: The Label object for this node type.
:param ProviderLabel provider: A config ProviderLabel object.
:param ProviderManager provider_manager: The manager object used to
interact with the selected provider.
:param str requestor: Identifier for the request originator.
@ -247,26 +246,24 @@ class NodeLauncher(threading.Thread, StatsReporter):
StatsReporter.__init__(self)
self.log = logging.getLogger("nodepool.NodeLauncher-%s" % node.id)
self._zk = zk
self._provider = provider
self._label = label
self._label = provider_label
self._manager = provider_manager
self._node = node
self._retries = retries
self._image_name = None
self._requestor = requestor
self._pool = self._label.pool
self._provider = self._pool.provider
self._diskimage = self._provider.diskimages[self._label.diskimage.name]
def _launchNode(self):
config_image = self._provider.images[self._label.image]
# Stored for statsd reporting
self._image_name = config_image.name
cloud_image = self._zk.getMostRecentImageUpload(
config_image.name, self._provider.name)
self._diskimage.name, self._provider.name)
if not cloud_image:
raise LaunchNodepoolException(
"Unable to find current cloud image %s in %s" %
(config_image.name, self._provider.name)
(self._diskimage.name, self._provider.name)
)
hostname = self._provider.hostname_format.format(
@ -275,7 +272,8 @@ class NodeLauncher(threading.Thread, StatsReporter):
self.log.info("Creating server with hostname %s in %s from image %s "
"for node id: %s" % (hostname, self._provider.name,
config_image.name, self._node.id))
self._diskimage.name,
self._node.id))
# NOTE: We store the node ID in the server metadata to use for leaked
# instance detection. We cannot use the external server ID for this
@ -284,13 +282,14 @@ class NodeLauncher(threading.Thread, StatsReporter):
server = self._manager.createServer(
hostname,
config_image.min_ram,
self._label.min_ram,
cloud_image.external_id,
name_filter=config_image.name_filter,
name_filter=self._label.name_filter,
az=self._node.az,
config_drive=config_image.config_drive,
config_drive=self._diskimage.config_drive,
nodepool_node_id=self._node.id,
nodepool_image_name=config_image.name)
nodepool_image_name=self._diskimage.name,
networks=self._pool.networks)
self._node.external_id = server.id
self._node.hostname = hostname
@ -417,14 +416,13 @@ class NodeLaunchManager(object):
'''
Handle launching multiple nodes in parallel.
'''
def __init__(self, zk, provider, labels, provider_manager,
def __init__(self, zk, pool, provider_manager,
requestor, retries):
'''
Initialize the launch manager.
:param ZooKeeper zk: A ZooKeeper object.
:param Provider provider: A config Provider object.
:param dict labels: A dict of config Label objects.
:param ProviderPool pool: A config ProviderPool object.
:param ProviderManager provider_manager: The manager object used to
interact with the selected provider.
:param str requestor: Identifier for the request originator.
@ -436,8 +434,7 @@ class NodeLaunchManager(object):
self._ready_nodes = []
self._threads = []
self._zk = zk
self._provider = provider
self._labels = labels
self._pool = pool
self._manager = provider_manager
self._requestor = requestor
@ -468,8 +465,8 @@ class NodeLaunchManager(object):
:param Node node: The node object.
'''
self._nodes.append(node)
label = self._labels[node.type]
t = NodeLauncher(self._zk, self._provider, label, self._manager,
provider_label = self._pool.labels[node.type]
t = NodeLauncher(self._zk, provider_label, self._manager,
self._requestor, node, self._retries)
t.start()
self._threads.append(t)
@ -508,13 +505,13 @@ class NodeRequestHandler(object):
'''
Class to process a single node request.
The ProviderWorker thread will instantiate a class of this type for each
The PoolWorker thread will instantiate a class of this type for each
node request that it pulls from ZooKeeper.
'''
def __init__(self, pw, request):
'''
:param ProviderWorker pw: The parent ProviderWorker object.
:param PoolWorker pw: The parent PoolWorker object.
:param NodeRequest request: The request to handle.
'''
self.log = logging.getLogger("nodepool.NodeRequestHandler")
@ -526,16 +523,16 @@ class NodeRequestHandler(object):
self.chosen_az = None
self.paused = False
def _setFromProviderWorker(self):
def _setFromPoolWorker(self):
'''
Set values that we pull from the parent ProviderWorker.
Set values that we pull from the parent PoolWorker.
We don't do this in __init__ because this class is re-entrant and we
want the updated values.
'''
self.provider = self.pw.getProviderConfig()
self.pool = self.pw.getPoolConfig()
self.zk = self.pw.getZK()
self.labels = self.pw.getLabelsConfig()
self.manager = self.pw.getProviderManager()
self.launcher_id = self.pw.launcher_id
@ -549,11 +546,7 @@ class NodeRequestHandler(object):
:returns: True if it is available, False otherwise.
'''
for label in self.request.node_types:
try:
img = self.labels[label].image
except KeyError:
self.log.error("Node type %s not a defined label", label)
return False
img = self.pool.labels[label].diskimage.name
if not self.zk.getMostRecentImageUpload(img, self.provider.name):
return False
@ -568,12 +561,8 @@ class NodeRequestHandler(object):
'''
invalid = []
for ntype in self.request.node_types:
if ntype not in self.labels:
if ntype not in self.pool.labels:
invalid.append(ntype)
else:
label = self.labels[ntype]
if self.provider.name not in label.providers.keys():
invalid.append(ntype)
return invalid
def _countNodes(self):
@ -584,7 +573,8 @@ class NodeRequestHandler(object):
'''
count = 0
for node in self.zk.nodeIterator():
if node.provider == self.provider.name:
if (node.provider == self.provider.name and
node.pool == self.pool.name):
count += 1
return count
@ -614,7 +604,7 @@ class NodeRequestHandler(object):
'''
if not self.launch_manager:
self.launch_manager = NodeLaunchManager(
self.zk, self.provider, self.labels, self.manager,
self.zk, self.pool, self.manager,
self.request.requestor, retries=self.provider.launch_retries)
# Since this code can be called more than once for the same request,
@ -633,10 +623,12 @@ class NodeRequestHandler(object):
got_a_node = False
if self.request.reuse and ntype in ready_nodes:
for node in ready_nodes[ntype]:
# Only interested in nodes from this provider and within
# the selected AZ.
# Only interested in nodes from this provider and
# pool, and within the selected AZ.
if node.provider != self.provider.name:
continue
if node.pool != self.pool.name:
continue
if self.chosen_az and node.az != self.chosen_az:
continue
@ -669,12 +661,12 @@ class NodeRequestHandler(object):
if not got_a_node:
# Select grouping AZ if we didn't set AZ from a selected,
# pre-existing node
if not self.chosen_az and self.provider.azs:
self.chosen_az = random.choice(self.provider.azs)
if not self.chosen_az and self.pool.azs:
self.chosen_az = random.choice(self.pool.azs)
# If we calculate that we're at capacity, pause until nodes
# are released by Zuul and removed by the DeletedNodeWorker.
if self._countNodes() >= self.provider.max_servers:
if self._countNodes() >= self.pool.max_servers:
if not self.paused:
self.log.debug(
"Pausing request handling to satisfy request %s",
@ -690,6 +682,7 @@ class NodeRequestHandler(object):
node.state = zk.INIT
node.type = ntype
node.provider = self.provider.name
node.pool = self.pool.name
node.az = self.chosen_az
node.launcher = self.launcher_id
node.allocated_to = self.request.id
@ -714,17 +707,17 @@ class NodeRequestHandler(object):
'''
Main body for the NodeRequestHandler.
'''
self._setFromProviderWorker()
self._setFromPoolWorker()
declined_reasons = []
if not self._imagesAvailable():
declined_reasons.append('images are not available')
if len(self.request.node_types) > self.provider.max_servers:
declined_reasons.append('it would exceed quota')
invalid_types = self._invalidNodeTypes()
if invalid_types:
declined_reasons.append('node type(s) [%s] not available' %
','.join(invalid_types))
elif not self._imagesAvailable():
declined_reasons.append('images are not available')
if len(self.request.node_types) > self.pool.max_servers:
declined_reasons.append('it would exceed quota')
if declined_reasons:
self.log.debug("Declining node request %s because %s",
@ -753,6 +746,8 @@ class NodeRequestHandler(object):
@property
def alive_thread_count(self):
if not self.launch_manager:
return 0
return self.launch_manager.alive_thread_count
#----------------------------------------------------------------
@ -858,23 +853,25 @@ class NodeRequestHandler(object):
return True
class ProviderWorker(threading.Thread):
class PoolWorker(threading.Thread):
'''
Class that manages node requests for a single provider.
Class that manages node requests for a single provider pool.
The NodePool thread will instantiate a class of this type for each
provider found in the nodepool configuration file. If the provider to
which this thread is assigned is removed from the configuration file, then
that will be recognized and this thread will shut itself down.
provider pool found in the nodepool configuration file. If the
pool or provider to which this thread is assigned is removed from
the configuration file, then that will be recognized and this
thread will shut itself down.
'''
def __init__(self, nodepool, provider_name):
def __init__(self, nodepool, provider_name, pool_name):
threading.Thread.__init__(
self, name='ProviderWorker.%s' % provider_name
self, name='PoolWorker.%s-%s' % (provider_name, pool_name)
)
self.log = logging.getLogger("nodepool.%s" % self.name)
self.nodepool = nodepool
self.provider_name = provider_name
self.pool_name = pool_name
self.running = False
self.paused_handler = None
self.request_handlers = []
@ -888,19 +885,6 @@ class ProviderWorker(threading.Thread):
# Private methods
#----------------------------------------------------------------
def _activeThreads(self):
'''
Return the number of alive threads in use by this provider.
This is an approximate, top-end number for alive threads, since some
threads obviously may have finished by the time we finish the
calculation.
'''
total = 0
for r in self.request_handlers:
total += r.alive_thread_count
return total
def _assignHandlers(self):
'''
For each request we can grab, create a NodeRequestHandler for it.
@ -917,9 +901,15 @@ class ProviderWorker(threading.Thread):
if self.paused_handler:
return
# Get active threads for all pools for this provider
active_threads = sum([
w.activeThreads() for
w in self.nodepool.getPoolWorkers(self.provider_name)
])
# Short-circuit for limited request handling
if (provider.max_concurrency > 0
and self._activeThreads() >= provider.max_concurrency
if (provider.max_concurrency > 0 and
active_threads >= provider.max_concurrency
):
return
@ -968,18 +958,31 @@ class ProviderWorker(threading.Thread):
# Public methods
#----------------------------------------------------------------
def activeThreads(self):
'''
Return the number of alive threads in use by this provider.
This is an approximate, top-end number for alive threads, since some
threads obviously may have finished by the time we finish the
calculation.
'''
total = 0
for r in self.request_handlers:
total += r.alive_thread_count
return total
def getZK(self):
return self.nodepool.getZK()
def getProviderConfig(self):
return self.nodepool.config.providers[self.provider_name]
def getPoolConfig(self):
return self.getProviderConfig().pools[self.pool_name]
def getProviderManager(self):
return self.nodepool.getProviderManager(self.provider_name)
def getLabelsConfig(self):
return self.nodepool.config.labels
def run(self):
self.running = True
@ -1005,7 +1008,7 @@ class ProviderWorker(threading.Thread):
self._removeCompletedHandlers()
except Exception:
self.log.exception("Error in ProviderWorker:")
self.log.exception("Error in PoolWorker:")
time.sleep(self.watermark_sleep)
# Cleanup on exit
@ -1014,7 +1017,7 @@ class ProviderWorker(threading.Thread):
def stop(self):
'''
Shutdown the ProviderWorker thread.
Shutdown the PoolWorker thread.
Do not wait for the request handlers to finish. Any nodes
that are in the process of launching will be cleaned up on a
@ -1293,7 +1296,7 @@ class NodePool(threading.Thread):
self.config = None
self.zk = None
self.statsd = stats.get_client()
self._provider_threads = {}
self._pool_threads = {}
self._cleanup_thread = None
self._delete_thread = None
self._wake_condition = threading.Condition()
@ -1315,10 +1318,10 @@ class NodePool(threading.Thread):
self._delete_thread.stop()
self._delete_thread.join()
# Don't let stop() return until all provider threads have been
# Don't let stop() return until all pool threads have been
# terminated.
self.log.debug("Stopping provider threads")
for thd in self._provider_threads.values():
self.log.debug("Stopping pool threads")
for thd in self._pool_threads.values():
if thd.isAlive():
thd.stop()
self.log.debug("Waiting for %s" % thd.name)
@ -1361,6 +1364,10 @@ class NodePool(threading.Thread):
def getProviderManager(self, provider_name):
return self.config.provider_managers[provider_name]
def getPoolWorkers(self, provider_name):
return [t for t in self._pool_threads.values() if
t.provider_name == provider_name]
def updateConfig(self):
config = self.loadConfig()
provider_manager.ProviderManager.reconfigure(self.config, config)
@ -1416,6 +1423,13 @@ class NodePool(threading.Thread):
:returns: True if image associated with the label is uploaded and
ready in at least one provider. False otherwise.
'''
for pool in label.pools:
for pool_label in pool.labels.values():
if self.zk.getMostRecentImageUpload(pool_label.diskimage.name,
pool.provider.name):
return True
return False
for provider_name in label.providers.keys():
if self.zk.getMostRecentImageUpload(label.image, provider_name):
return True
@ -1500,27 +1514,34 @@ class NodePool(threading.Thread):
self, self.delete_interval)
self._delete_thread.start()
# Stop any ProviderWorker threads if the provider was removed
# Stop any PoolWorker threads if the pool was removed
# from the config.
for provider_name in self._provider_threads.keys():
if provider_name not in self.config.providers.keys():
self._provider_threads[provider_name].stop()
pool_keys = set()
for provider in self.config.providers.values():
for pool in provider.pools.values():
pool_keys.add(provider.name + '-' + pool.name)
for key in self._pool_threads.keys():
if key not in pool_keys:
self._pool_threads[key].stop()
# Start (or restart) provider threads for each provider in
# the config. Removing a provider from the config and then
# adding it back would cause a restart.
for p in self.config.providers.values():
if p.name not in self._provider_threads.keys():
t = ProviderWorker(self, p.name)
self.log.info( "Starting %s" % t.name)
t.start()
self._provider_threads[p.name] = t
elif not self._provider_threads[p.name].isAlive():
self._provider_threads[p.name].join()
t = ProviderWorker(self, p.name)
self.log.info( "Restarting %s" % t.name)
t.start()
self._provider_threads[p.name] = t
for provider in self.config.providers.values():
for pool in provider.pools.values():
key = provider.name + '-' + pool.name
if key not in self._pool_threads.keys():
t = PoolWorker(self, provider.name, pool.name)
self.log.info( "Starting %s" % t.name)
t.start()
self._pool_threads[key] = t
elif not self._pool_threads[key].isAlive():
self._pool_threads[key].join()
t = PoolWorker(self, provider.name, pool.name)
self.log.info( "Restarting %s" % t.name)
t.start()
self._pool_threads[key] = t
except Exception:
self.log.exception("Exception in main loop:")

View File

@ -44,7 +44,7 @@ class NotFound(Exception):
def get_provider_manager(provider, use_taskmanager):
if (provider.cloud_config.name == 'fake'):
if provider.name.startswith('fake'):
return FakeProviderManager(provider, use_taskmanager)
else:
return ProviderManager(provider, use_taskmanager)
@ -168,7 +168,9 @@ class ProviderManager(object):
def createServer(self, name, min_ram, image_id=None, image_name=None,
az=None, key_name=None, name_filter=None,
config_drive=None, nodepool_node_id=None,
nodepool_image_name=None):
nodepool_image_name=None, networks=None):
if not networks:
networks = []
if image_name:
image = self.findImage(image_name)
else:
@ -183,7 +185,7 @@ class ProviderManager(object):
if az:
create_args['availability_zone'] = az
nics = []
for network in self.provider.networks:
for network in networks:
if network.id:
nics.append({'net-id': network.id})
elif network.name:

View File

@ -126,6 +126,9 @@ class BaseTestCase(testtools.TestCase):
l = logging.getLogger('kazoo')
l.setLevel(logging.INFO)
l.propagate=False
l = logging.getLogger('stevedore')
l.setLevel(logging.INFO)
l.propagate=False
self.useFixture(fixtures.NestedTempfile())
self.subprocesses = []
@ -187,7 +190,7 @@ class BaseTestCase(testtools.TestCase):
continue
if t.name.startswith("CleanupWorker"):
continue
if t.name.startswith("ProviderWorker"):
if t.name.startswith("PoolWorker"):
continue
if t.name not in whitelist:
done = False

View File

@ -1,7 +1,7 @@
clouds:
fake:
auth:
usernmae: 'fake'
username: 'fake'
password: 'fake'
project_id: 'fake'
auth_url: 'fake'

View File

@ -12,38 +12,47 @@ zookeeper-servers:
labels:
- name: trusty
image: trusty
min-ready: 1
providers:
- name: cloud1
- name: cloud2
- name: trusty-2-node
image: trusty
min-ready: 0
providers:
- name: cloud1
- name: cloud2
providers:
- name: cloud1
region-name: 'vanilla'
boot-timeout: 120
max-servers: 184
max-concurrency: 10
launch-retries: 3
rate: 0.001
images:
diskimages:
- name: trusty
min-ram: 8192
pools:
- name: main
max-servers: 184
labels:
- name: trusty
diskimage: trusty
min-ram: 8192
- name: trusty-2-node
diskimage: trusty
min-ram: 8192
- name: cloud2
region-name: 'chocolate'
boot-timeout: 120
max-servers: 184
rate: 0.001
images:
diskimages:
- name: trusty
pause: False
min-ram: 8192
pools:
- name: main
max-servers: 184
labels:
- name: trusty
diskimage: trusty
min-ram: 8192
- name: trusty-2-node
diskimage: trusty
min-ram: 8192
diskimages:
- name: trusty

View File

@ -9,26 +9,25 @@ zookeeper-servers:
labels:
- name: real-label
image: fake-image
min-ready: 1
providers:
- name: real-provider
providers:
- name: real-provider
region-name: real-region
max-servers: 96
pool: 'real'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Real'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: real-label
diskimage: fake-image
min-ram: 8192
name-filter: 'Real'
diskimages:
- name: fake-image

View File

@ -9,26 +9,25 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: real-provider
providers:
- name: real-provider
cloud: real-cloud
max-servers: 96
pool: 'real'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Real'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
name-filter: 'Real'
diskimages:
- name: fake-image

View File

@ -12,26 +12,22 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,29 +12,30 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
availability-zones:
- az1
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
availability-zones:
- az1
networks:
- net-id: 'some-uuid'
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
name-filter: 'Fake'
diskimages:
- name: fake-image

View File

@ -12,28 +12,30 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
availability-zones:
- az1
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'some-uuid'
availability-zones:
- az1
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
name-filter: 'Fake'
diskimages:
- name: fake-image

View File

@ -11,43 +11,47 @@ zookeeper-servers:
labels:
- name: fake-label1
image: fake-image1
min-ready: 1
providers:
- name: fake-provider1
- name: fake-label2
image: fake-image2
min-ready: 1
providers:
- name: fake-provider2
providers:
- name: fake-provider1
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
cloud: fake
rate: 0.0001
images:
diskimages:
- name: fake-image1
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'some-uuid'
labels:
- name: fake-label1
diskimage: fake-image1
min-ram: 8192
name-filter: 'fake'
- name: fake-provider2
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image2
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'some-uuid'
labels:
- name: fake-label2
diskimage: fake-image2
min-ram: 8192
name-filter: 'fake'
diskimages:
- name: fake-image1

View File

@ -12,26 +12,28 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 0
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'some-uuid'
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
name-filter: 'fake'
diskimages:
- name: fake-image

View File

@ -12,26 +12,28 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'some-uuid'
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
name-filter: 'fake'
diskimages:
- name: fake-image

View File

@ -12,33 +12,33 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
- name: fake-label2
image: fake-image2
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
- name: fake-image2
min-ram: 8192
pools:
- name: main
max-servers: 96
networks:
- net-id: 'some-uuid'
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
- name: fake-label2
diskimage: fake-image2
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,34 +12,32 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
- name: fake-label2
image: fake-image2
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
pause: True
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
- name: fake-image2
min-ram: 8192
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
min-ram: 8192
diskimage: fake-image
- name: fake-label2
diskimage: fake-image2
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,70 +12,64 @@ zookeeper-servers:
labels:
- name: fake-label1
image: fake-image
min-ready: 1
providers:
- name: fake-provider1
- name: fake-label2
image: fake-image
min-ready: 1
providers:
- name: fake-provider2
- name: fake-label3
image: fake-image
min-ready: 1
providers:
- name: fake-provider3
providers:
- name: fake-provider1
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'ipv6-uuid'
ipv6-preferred: True
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'ipv6-uuid'
labels:
- name: fake-label1
diskimage: fake-image
min-ram: 8192
- name: fake-provider2
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'ipv6-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'ipv6-uuid'
labels:
- name: fake-label2
diskimage: fake-image
min-ram: 8192
- name: fake-provider3
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
ipv6-preferred: True
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- net-id: 'some-uuid'
labels:
- name: fake-label3
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,40 +12,31 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider2
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
- name: fake-provider2
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,28 +12,25 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 0
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: 'fake'
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
launch-retries: 2
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
networks:
- net-id: 'some-uuid'
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,28 +12,24 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 0
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
availability-zones:
- az1
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
availability-zones:
- az1
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,28 +12,26 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- name: 'fake-public-network-name'
public: true
- name: 'fake-private-network-name'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
networks:
- name: 'fake-public-network-name'
public: true
- name: 'fake-private-network-name'
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,28 +12,22 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 0
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
availability-zones:
- az1
max-servers: 2
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 2
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,33 +12,28 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
- name: fake-label2
image: fake-image2
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
- name: fake-image2
min-ram: 8192
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
- name: fake-label2
diskimage: fake-image2
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,26 +12,22 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,41 +12,35 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
- name: fake-provider2
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
- name: fake-provider2
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,34 +12,27 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
- name: fake-provider2
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images: []
diskimages:
- name: fake-image

View File

@ -12,42 +12,38 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 2
providers:
- name: fake-provider1
- name: fake-provider2
providers:
- name: fake-provider1
cloud: fake
region-name: fake-region
max-servers: 2
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
SHOULD_FAIL: 'true'
pools:
- name: main
max-servers: 2
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
- name: fake-provider2
cloud: fake
region-name: fake-region
max-servers: 2
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 2
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,27 +12,23 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 1
providers:
- name: fake-provider
providers:
- name: fake-provider
cloud: fake
region-name: fake-region
max-servers: 96
pool: 'fake'
image-type: vhd
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 96
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -12,43 +12,38 @@ zookeeper-servers:
labels:
- name: fake-label
image: fake-image
min-ready: 2
providers:
- name: fake-provider1
- name: fake-provider2
providers:
- name: fake-provider1
cloud: fake
region-name: fake-region
max-servers: 2
pool: 'fake'
image-type: vhd
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 2
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
- name: fake-provider2
cloud: fake
region-name: fake-region
max-servers: 2
pool: 'fake'
image-type: qcow2
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
diskimages:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
pools:
- name: main
max-servers: 2
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
diskimages:
- name: fake-image

View File

@ -40,7 +40,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
self.waitForImage('fake-provider', 'fake-image')
self.provider = b._config.providers['fake-provider']
self.labels = b._config.labels
self.provider_pool = self.provider.pools['main']
# The builder config does not have a provider manager, so create one.
self.pmanager = provider_manager.ProviderManager(self.provider, False)
@ -53,7 +53,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
n1 = zk.Node()
n1.state = zk.BUILDING
n1.type = 'fake-label'
mgr = NodeLaunchManager(self.zk, self.provider, self.labels,
mgr = NodeLaunchManager(self.zk, self.provider_pool,
self.pmanager, 'zuul', 1)
mgr.launch(n1)
while not mgr.poll():
@ -70,7 +70,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
n1 = zk.Node()
n1.state = zk.BUILDING
n1.type = 'fake-label'
mgr = NodeLaunchManager(self.zk, self.provider, self.labels,
mgr = NodeLaunchManager(self.zk, self.provider_pool,
self.pmanager, 'zuul', 1)
mgr.launch(n1)
while not mgr.poll():
@ -90,7 +90,7 @@ class TestNodeLaunchManager(tests.DBTestCase):
n2 = zk.Node()
n2.state = zk.BUILDING
n2.type = 'fake-label'
mgr = NodeLaunchManager(self.zk, self.provider, self.labels,
mgr = NodeLaunchManager(self.zk, self.provider_pool,
self.pmanager, 'zuul', 1)
mgr.launch(n1)
mgr.launch(n2)

View File

@ -469,6 +469,7 @@ class TestNodepool(tests.DBTestCase):
node.type = 'fake-label'
node.public_ipv4 = 'fake'
node.provider = 'fake-provider'
node.pool = 'main'
node.allocated_to = req.id
self.zk.storeNode(node)

View File

@ -404,6 +404,7 @@ class Node(BaseModel):
super(Node, self).__init__(id)
self.lock = None
self.provider = None
self.pool = None
self.type = None
self.allocated_to = None
self.az = None
@ -430,6 +431,7 @@ class Node(BaseModel):
self.state == other.state and
self.state_time == other.state_time and
self.provider == other.provider and
self.pool == other.pool and
self.type == other.type and
self.allocated_to == other.allocated_to and
self.az == other.az and
@ -452,6 +454,7 @@ class Node(BaseModel):
'''
d = super(Node, self).toDict()
d['provider'] = self.provider
d['pool'] = self.pool
d['type'] = self.type
d['allocated_to'] = self.allocated_to
d['az'] = self.az
@ -480,6 +483,7 @@ class Node(BaseModel):
o = Node(o_id)
super(Node, o).fromDict(d)
o.provider = d.get('provider')
o.pool = d.get('pool')
o.type = d.get('type')
o.allocated_to = d.get('allocated_to')
o.az = d.get('az')

View File

@ -20,23 +20,21 @@ diskimages:
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
labels:
- name: single-fake
image: fake-nodepool
- name: small-fake
min-ready: 2
providers:
- name: fake-provider
- name: multi-fake
image: fake-nodepool
- name: big-fake
min-ready: 2
providers:
- name: fake-provider
providers:
- name: fake-provider
region-name: 'fake-region'
max-servers: 96
images:
diskimages:
- name: fake-nodepool
min-ram: 8192
name-filter: 'Fake'
diskimage: fake-nodepool
pools:
- name: main
max-servers: 96
labels:
- name: big-fake
diskimage: fake-nodepool
min-ram: 8192
name-filter: 'Fake'