Remove subnodes from nodepool

As we move closer to zuulv3, the concept of sub-nodes no longer
applies.  As a result, we can start to remove the legacy code making
it easier to re-enable our unit tests in the future.

Change-Id: If964e082bb56b32c8fbc0f3539b83629976fe041
Signed-off-by: Paul Belanger <pabelanger@redhat.com>
This commit is contained in:
Paul Belanger 2017-02-10 12:37:21 -05:00
parent dd6de5b506
commit b27b4798a4
12 changed files with 27 additions and 496 deletions

View File

@ -147,7 +147,6 @@ providers or images are used to create them). Example::
- name: provider2
- name: multi-precise
image: precise
subnodes: 2
min-ready: 2
ready-script: setup_multinode.sh
providers:
@ -172,17 +171,6 @@ providers or images are used to create them). Example::
label considered disabled. ``min-ready`` is best-effort based on available
capacity and is not a guaranteed allocation.
``subnodes``
Used to configure multi-node support. If a `subnodes` key is supplied to
an image, it indicates that the specified number of additional nodes of the
same image type should be created and associated with each node for that
image.
Only one node from each such group will be added to the target, the
subnodes are expected to communicate directly with each other. In the
example above, for each Precise node added to the target system, two
additional nodes will be created and associated with it.
``ready-script``
A script to be used to perform any last minute changes to a node after it
has been launched but before it is put in the READY state to receive jobs.

View File

@ -236,11 +236,11 @@ class AllocationRequest(object):
art = AllocationRequestTarget(self, target, current)
self.request_targets[target] = art
def addProvider(self, provider, target, subnodes):
def addProvider(self, provider, target):
# Handle being called multiple times with different targets.
s = self.sub_requests.get(provider)
if not s:
s = AllocationSubRequest(self, provider, subnodes)
s = AllocationSubRequest(self, provider)
agt = s.addTarget(self.request_targets[target])
self.sub_requests[provider] = s
if s not in provider.sub_requests:
@ -263,11 +263,10 @@ class AllocationRequest(object):
class AllocationSubRequest(object):
"""A request for a number of images from a specific provider."""
def __init__(self, request, provider, subnodes):
def __init__(self, request, provider):
self.request = request
self.provider = provider
self.amount = 0.0
self.subnodes = subnodes
self.targets = []
def __repr__(self):
@ -313,8 +312,7 @@ class AllocationSubRequest(object):
self.amount = amount
# Adjust provider and request values accordingly.
self.request.amount -= amount
subnode_factor = 1 + self.subnodes
self.provider.available -= (amount * subnode_factor)
self.provider.available -= (amount)
# Adjust the requested values for related sub-requests.
self.request.makeRequests()
# Allocate these granted nodes to targets.

View File

@ -86,7 +86,6 @@ class ConfigValidator:
'image': str,
'min-ready': int,
'ready-script': str,
'subnodes': int,
'providers': [{
'name': str,
}],

View File

@ -269,7 +269,6 @@ def loadConfig(config_path):
newconfig.labels[l.name] = l
l.image = label['image']
l.min_ready = label.get('min-ready', 2)
l.subnodes = label.get('subnodes', 0)
l.ready_script = label.get('ready-script')
l.providers = {}
for provider in label['providers']:

View File

@ -43,7 +43,7 @@ STATE_NAMES = {
from sqlalchemy import Table, Column, Integer, String, \
MetaData, create_engine
from sqlalchemy.orm import scoped_session, mapper, relationship, foreign
from sqlalchemy.orm import scoped_session, mapper
from sqlalchemy.orm.session import Session, sessionmaker
metadata = MetaData()
@ -75,24 +75,6 @@ node_table = Table(
Column('comment', String(255)),
mysql_engine='InnoDB',
)
subnode_table = Table(
'subnode', metadata,
Column('id', Integer, primary_key=True),
Column('node_id', Integer, index=True, nullable=False),
# Machine name
Column('hostname', String(255), index=True),
# Provider assigned id for this machine
Column('external_id', String(255)),
# Primary IP address
Column('ip', String(255)),
# Internal/fixed IP address
Column('ip_private', String(255)),
# One of the above values
Column('state', Integer),
# Time of last state change
Column('state_time', Integer),
mysql_engine='InnoDB',
)
job_table = Table(
'job', metadata,
Column('id', Integer, primary_key=True),
@ -138,38 +120,6 @@ class Node(object):
session.commit()
class SubNode(object):
def __init__(self, node,
hostname=None, external_id=None, ip=None, ip_private=None,
state=BUILDING):
self.node_id = node.id
self.provider_name = node.provider_name
self.label_name = node.label_name
self.target_name = node.target_name
self.external_id = external_id
self.ip = ip
self.ip_private = ip_private
self.hostname = hostname
self.state = state
def delete(self):
session = Session.object_session(self)
session.delete(self)
session.commit()
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state
self.state_time = int(time.time())
session = Session.object_session(self)
if session:
session.commit()
class Job(object):
def __init__(self, name=None, hold_on_failure=0):
self.name = name
@ -184,19 +134,9 @@ class Job(object):
mapper(Job, job_table)
mapper(SubNode, subnode_table,
properties=dict(_state=subnode_table.c.state))
mapper(Node, node_table,
properties=dict(
_state=node_table.c.state,
subnodes=relationship(
SubNode,
cascade='all, delete-orphan',
uselist=True,
primaryjoin=foreign(subnode_table.c.node_id) == node_table.c.id,
backref='node')))
_state=node_table.c.state))
class NodeDatabase(object):
@ -259,24 +199,12 @@ class NodeDatabaseSession(object):
self.commit()
return new
def createSubNode(self, *args, **kwargs):
new = SubNode(*args, **kwargs)
self.session().add(new)
self.commit()
return new
def getNode(self, id):
nodes = self.session().query(Node).filter_by(id=id).all()
if not nodes:
return None
return nodes[0]
def getSubNode(self, id):
nodes = self.session().query(SubNode).filter_by(id=id).all()
if not nodes:
return None
return nodes[0]
def getNodeByHostname(self, hostname):
nodes = self.session().query(Node).filter_by(hostname=hostname).all()
if not nodes:

View File

@ -364,20 +364,7 @@ class OLDNodeLauncher(threading.Thread):
# Save the elapsed time for statsd
dt = int((time.time() - start_time) * 1000)
if self.label.subnodes:
self.log.info("Node id: %s is waiting on subnodes" % self.node.id)
while ((time.time() - start_time) < (NODE_CLEANUP - 60)):
session.commit()
ready_subnodes = [n for n in self.node.subnodes
if n.state == nodedb.READY]
if len(ready_subnodes) == self.label.subnodes:
break
time.sleep(5)
nodelist = []
for subnode in self.node.subnodes:
nodelist.append(('sub', subnode))
nodelist.append(('primary', self.node))
self.writeNodepoolInfo(nodelist)
@ -460,16 +447,6 @@ class OLDNodeLauncher(threading.Thread):
f = ftp.open('/etc/nodepool/primary_node_private', 'w')
f.write(self.node.ip_private + '\n')
f.close()
# The IPs of all sub nodes in this node set
f = ftp.open('/etc/nodepool/sub_nodes', 'w')
for subnode in self.node.subnodes:
f.write(subnode.ip + '\n')
f.close()
# The private IPs of all sub nodes in this node set
f = ftp.open('/etc/nodepool/sub_nodes_private', 'w')
for subnode in self.node.subnodes:
f.write(subnode.ip_private + '\n')
f.close()
# The SSH key for this node set
f = ftp.open('/etc/nodepool/id_rsa', 'w')
key.write_private_key(f)
@ -511,166 +488,6 @@ class OLDNodeLauncher(threading.Thread):
output=True)
class SubNodeLauncher(threading.Thread):
log = logging.getLogger("nodepool.SubNodeLauncher")
def __init__(self, nodepool, provider, label, subnode_id,
node_id, node_target_name, timeout, launch_timeout, node_az,
manager_name):
threading.Thread.__init__(self, name='SubNodeLauncher for %s'
% subnode_id)
self.provider = provider
self.label = label
self.image = provider.images[label.image]
self.node_target_name = node_target_name
self.subnode_id = subnode_id
self.node_id = node_id
self.timeout = timeout
self.nodepool = nodepool
self.launch_timeout = launch_timeout
self.node_az = node_az
self.manager_name = manager_name
def run(self):
try:
self._run()
except Exception:
self.log.exception("Exception in run method:")
def _run(self):
with self.nodepool.getDB().getSession() as session:
self.log.debug("Launching subnode id: %s for node id: %s" %
(self.subnode_id, self.node_id))
try:
self.subnode = session.getSubNode(self.subnode_id)
self.manager = self.nodepool.getProviderManager(self.provider)
except Exception:
self.log.exception("Exception preparing to launch subnode "
"id: %s for node id: %s:"
% (self.subnode_id, self.node_id))
return
try:
start_time = time.time()
dt = self.launchSubNode(session)
failed = False
statsd_key = 'ready'
except Exception as e:
self.log.exception("%s launching subnode id: %s "
"for node id: %s in provider: %s error:" %
(e.__class__.__name__, self.subnode_id,
self.node_id, self.provider.name))
dt = int((time.time() - start_time) * 1000)
failed = True
if hasattr(e, 'statsd_key'):
statsd_key = e.statsd_key
else:
statsd_key = 'error.unknown'
try:
self.nodepool.launchStats(statsd_key, dt, self.image.name,
self.provider.name,
self.node_target_name,
self.node_az,
self.manager_name)
except Exception:
self.log.exception("Exception reporting launch stats:")
if failed:
try:
self.nodepool.deleteSubNode(self.subnode, self.manager)
except Exception:
self.log.exception("Exception deleting subnode id: %s: "
"for node id: %s:" %
(self.subnode_id, self.node_id))
return
def launchSubNode(self, session):
start_time = time.time()
timestamp = int(start_time)
target = self.nodepool.config.targets[self.node_target_name]
hostname = target.subnode_hostname.format(
label=self.label, provider=self.provider, node_id=self.node_id,
subnode_id=self.subnode_id, timestamp=str(timestamp))
self.subnode.hostname = hostname
self.subnode.nodename = hostname.split('.')[0]
cloud_image = self.nodepool.zk.getMostRecentImageUpload(
self.image.name, self.provider.name)
if not cloud_image:
raise LaunchNodepoolException("Unable to find current cloud "
"image %s in %s" %
(self.image.name,
self.provider.name))
self.log.info("Creating server with hostname %s in %s from image %s "
"for subnode id: %s for node id: %s"
% (hostname, self.provider.name,
self.image.name, self.subnode_id, self.node_id))
server = self.manager.createServer(
hostname, self.image.min_ram, cloud_image.external_id,
name_filter=self.image.name_filter, az=self.node_az,
config_drive=self.image.config_drive,
nodepool_node_id=self.node_id,
nodepool_image_name=self.image.name)
server_id = server['id']
self.subnode.external_id = server_id
session.commit()
self.log.debug("Waiting for server %s for subnode id: %s for "
"node id: %s" %
(server_id, self.subnode_id, self.node_id))
server = self.manager.waitForServer(server, self.launch_timeout)
if server['status'] != 'ACTIVE':
raise LaunchStatusException("Server %s for subnode id: "
"%s for node id: %s "
"status: %s" %
(server_id, self.subnode_id,
self.node_id, server['status']))
ip = server.get('public_v4')
ip_v6 = server.get('public_v6')
if self.provider.ipv6_preferred:
if ip_v6:
ip = ip_v6
else:
self.log.warning('Preferred ipv6 not available, '
'falling back to ipv4.')
if not ip:
raise LaunchNetworkException("Unable to find public IP of server")
self.subnode.ip_private = server.get('private_v4')
# devstack-gate multi-node depends on private_v4 being populated
# with something. On clouds that don't have a private address, use
# the public.
if not self.subnode.ip_private:
self.subnode.ip_private = server.get('public_v4')
self.subnode.ip = ip
self.log.debug("Subnode id: %s for node id: %s is running, "
"ipv4: %s, ipv6: %s" %
(self.subnode_id, self.node_id, server.get('public_v4'),
server.get('public_v6')))
self.log.debug("Subnode id: %s for node id: %s testing ssh at ip: %s" %
(self.subnode_id, self.node_id, ip))
connect_kwargs = dict(key_filename=self.image.private_key)
if not utils.ssh_connect(ip, self.image.username,
connect_kwargs=connect_kwargs,
timeout=self.timeout):
raise LaunchAuthException("Unable to connect via ssh")
# Save the elapsed time for statsd
dt = int((time.time() - start_time) * 1000)
self.subnode.state = nodedb.READY
self.log.info("Subnode id: %s for node id: %s is ready"
% (self.subnode_id, self.node_id))
self.nodepool.updateStats(session, self.provider.name)
return dt
class NodeLauncher(threading.Thread):
def __init__(self, zk, node, retries):
threading.Thread.__init__(self)
@ -1340,12 +1157,12 @@ class NodePool(threading.Thread):
n.label_name == label_name and
n.state == state)])
def count_nodes_and_subnodes(provider_name):
def count_provider_nodes(provider_name):
count = 0
for n in nodes:
if n.provider_name != provider_name:
continue
count += 1 + len(n.subnodes)
count += 1
return count
# Add a provider for each node provider, along with current
@ -1353,7 +1170,7 @@ class NodePool(threading.Thread):
allocation_providers = {}
for provider in self.config.providers.values():
provider_max = provider.max_servers
n_provider = count_nodes_and_subnodes(provider.name)
n_provider = count_provider_nodes(provider.name)
available = provider_max - n_provider
if available < 0:
self.log.warning("Provider %s over-allocated: "
@ -1436,7 +1253,7 @@ class NodePool(threading.Thread):
# request should be distributed to this target).
sr, agt = ar.addProvider(
allocation_providers[provider.name],
at, label.subnodes)
at, 0)
tlps[agt] = (target, label,
self.config.providers[provider.name])
else:
@ -1470,19 +1287,6 @@ class NodePool(threading.Thread):
self.log.debug("Finished node launch calculation")
return nodes_to_launch
def getNeededSubNodes(self, session):
nodes_to_launch = []
for node in session.getNodes():
if node.label_name in self.config.labels:
expected_subnodes = \
self.config.labels[node.label_name].subnodes
active_subnodes = len([n for n in node.subnodes
if n.state != nodedb.DELETE])
deficit = max(expected_subnodes - active_subnodes, 0)
if deficit:
nodes_to_launch.append((node, deficit))
return nodes_to_launch
def updateConfig(self):
config = self.loadConfig()
self.reconfigureZooKeeper(config)
@ -1527,16 +1331,6 @@ class NodePool(threading.Thread):
self._wake_condition.release()
def _run(self, session, allocation_history):
# Make up the subnode deficit first to make sure that an
# already allocated node has priority in filling its subnodes
# ahead of new nodes.
subnodes_to_launch = self.getNeededSubNodes(session)
for (node, num_to_launch) in subnodes_to_launch:
self.log.info("Need to launch %s subnodes for node id: %s" %
(num_to_launch, node.id))
for i in range(num_to_launch):
self.launchSubNode(session, node)
nodes_to_launch = self.getNeededNodes(session, allocation_history)
for (tlp, num_to_launch) in nodes_to_launch:
@ -1575,39 +1369,6 @@ class NodePool(threading.Thread):
launch_timeout)
t.start()
def launchSubNode(self, session, node):
try:
self._launchSubNode(session, node)
except Exception:
self.log.exception(
"Could not launch subnode for node id: %s", node.id)
def _launchSubNode(self, session, node):
provider = self.config.providers[node.provider_name]
label = self.config.labels[node.label_name]
timeout = provider.boot_timeout
launch_timeout = provider.launch_timeout
subnode = session.createSubNode(node)
t = SubNodeLauncher(self, provider, label, subnode.id,
node.id, node.target_name, timeout, launch_timeout,
node_az=node.az, manager_name=node.manager_name)
t.start()
def deleteSubNode(self, subnode, manager):
# Don't try too hard here, the actual node deletion will make
# sure this is cleaned up.
if subnode.external_id:
try:
self.log.debug('Deleting server %s for subnode id: '
'%s of node id: %s' %
(subnode.external_id, subnode.id,
subnode.node.id))
manager.cleanupServer(subnode.external_id)
manager.waitForServerDeletion(subnode.external_id)
except provider_manager.NotFound:
pass
subnode.delete()
def deleteNode(self, node_id):
try:
self._delete_threads_lock.acquire()
@ -1654,16 +1415,6 @@ class NodePool(threading.Thread):
self.log.exception("Exception revoking node id: %s" %
node.id)
for subnode in node.subnodes:
if subnode.external_id:
try:
self.log.debug('Deleting server %s for subnode id: '
'%s of node id: %s' %
(subnode.external_id, subnode.id, node.id))
manager.cleanupServer(subnode.external_id)
except provider_manager.NotFound:
pass
if node.external_id:
try:
self.log.debug('Deleting server %s for node id: %s' %
@ -1674,11 +1425,6 @@ class NodePool(threading.Thread):
pass
node.external_id = None
for subnode in node.subnodes:
if subnode.external_id:
manager.waitForServerDeletion(subnode.external_id)
subnode.delete()
node.delete()
self.log.info("Deleted node id: %s" % node.id)
@ -1886,7 +1632,7 @@ class NodePool(threading.Thread):
continue
state = nodedb.STATE_NAMES[node.state]
key = 'nodepool.nodes.%s' % state
total_nodes = self.config.labels[node.label_name].subnodes + 1
total_nodes = 1
states[key] += total_nodes
# NOTE(pabelanger): Check if we assign nodes via Gearman if so, use

View File

@ -21,7 +21,6 @@ labels:
- name: trusty-2-node
image: trusty
ready-script: multinode_setup.sh
subnodes: 1
min-ready: 0
providers:
- name: cloud1

View File

@ -21,7 +21,6 @@ labels:
- name: trusty-2-node
image: trusty
ready-script: multinode_setup.sh
subnodes: 1
min-ready: 0
providers:
- name: cloud1

View File

@ -1,61 +0,0 @@
elements-dir: .
images-dir: '{images_dir}'
cron:
check: '*/15 * * * *'
cleanup: '*/1 * * * *'
zookeeper-servers:
- host: {zookeeper_host}
port: {zookeeper_port}
chroot: {zookeeper_chroot}
labels:
- name: fake-label
image: fake-image
min-ready: 2
providers:
- name: fake-provider
- name: multi-fake
image: fake-image
ready-script: multinode_setup.sh
subnodes: 2
min-ready: 2
providers:
- name: fake-provider
providers:
- name: fake-provider
region-name: fake-region
keypair: 'if-present-use-this-keypair'
username: 'fake'
password: 'fake'
auth-url: 'fake'
project-id: 'fake'
max-servers: 96
pool: 'fake'
networks:
- net-id: 'some-uuid'
rate: 0.0001
images:
- name: fake-image
min-ram: 8192
name-filter: 'Fake'
meta:
key: value
key2: value
targets:
- name: fake-target
diskimages:
- name: fake-image
elements:
- fedora
- vm
release: 21
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -40,7 +40,7 @@ class OneLabel(tests.AllocatorTestCase, tests.BaseTestCase):
at1 = allocation.AllocationTarget('target1')
ar1 = allocation.AllocationRequest('label1', self.label1)
ar1.addTarget(at1, 0)
self.agt.append(ar1.addProvider(ap1, at1, 0)[1])
self.agt.append(ar1.addProvider(ap1, at1)[1])
ap1.makeGrants()
@ -67,8 +67,8 @@ class TwoLabels(tests.AllocatorTestCase, tests.BaseTestCase):
ar2 = allocation.AllocationRequest('label2', self.label2)
ar1.addTarget(at1, 0)
ar2.addTarget(at1, 0)
self.agt.append(ar1.addProvider(ap1, at1, 0)[1])
self.agt.append(ar2.addProvider(ap1, at1, 0)[1])
self.agt.append(ar1.addProvider(ap1, at1)[1])
self.agt.append(ar2.addProvider(ap1, at1)[1])
ap1.makeGrants()
@ -115,10 +115,10 @@ class TwoProvidersTwoLabels(tests.AllocatorTestCase, tests.BaseTestCase):
ar2 = allocation.AllocationRequest('label2', self.label2)
ar1.addTarget(at1, 0)
ar2.addTarget(at1, 0)
self.agt.append(ar1.addProvider(ap1, at1, 0)[1])
self.agt.append(ar2.addProvider(ap1, at1, 0)[1])
self.agt.append(ar1.addProvider(ap2, at1, 0)[1])
self.agt.append(ar2.addProvider(ap2, at1, 0)[1])
self.agt.append(ar1.addProvider(ap1, at1)[1])
self.agt.append(ar2.addProvider(ap1, at1)[1])
self.agt.append(ar1.addProvider(ap2, at1)[1])
self.agt.append(ar2.addProvider(ap2, at1)[1])
ap1.makeGrants()
ap2.makeGrants()
@ -170,9 +170,9 @@ class TwoProvidersTwoLabelsOneShared(tests.AllocatorTestCase,
ar2 = allocation.AllocationRequest('label2', self.label2)
ar1.addTarget(at1, 0)
ar2.addTarget(at1, 0)
self.agt.append(ar1.addProvider(ap1, at1, 0)[1])
self.agt.append(ar2.addProvider(ap1, at1, 0)[1])
self.agt.append(ar2.addProvider(ap2, at1, 0)[1])
self.agt.append(ar1.addProvider(ap1, at1)[1])
self.agt.append(ar2.addProvider(ap1, at1)[1])
self.agt.append(ar2.addProvider(ap2, at1)[1])
ap1.makeGrants()
ap2.makeGrants()
@ -293,8 +293,8 @@ class RoundRobinAllocation(tests.RoundRobinTestCase, tests.BaseTestCase):
# providers
for ar in ars:
ar.addTarget(at1, 0)
ar.addProvider(ap1, at1, 0)
ar.addProvider(ap2, at1, 0)
ar.addProvider(ap1, at1)
ar.addProvider(ap2, at1)
ap1.makeGrants()
for g in ap1.grants:
@ -414,15 +414,15 @@ class RoundRobinFixedProvider(tests.RoundRobinTestCase, tests.BaseTestCase):
# first ar can only go to provider1, the last only to
# provider2
ars[0].addTarget(at1, 0)
ars[0].addProvider(ap1, at1, 0)
ars[0].addProvider(ap1, at1)
ars[-1].addTarget(at1, 0)
ars[-1].addProvider(ap2, at1, 0)
ars[-1].addProvider(ap2, at1)
# the rest can go anywhere
for ar in ars[1:-1]:
ar.addTarget(at1, 0)
ar.addProvider(ap1, at1, 0)
ar.addProvider(ap2, at1, 0)
ar.addProvider(ap1, at1)
ar.addProvider(ap2, at1)
ap1.makeGrants()
for g in ap1.grants:

View File

@ -214,69 +214,6 @@ class TestNodepool(tests.DBTestCase):
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
@skip("Disabled for early v3 development")
def test_subnodes(self):
"""Test that an image and node are created"""
configfile = self.setup_config('subnodes.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
nodes = session.getNodes(provider_name='fake-provider',
label_name='multi-fake',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.subnodes), 2)
for subnode in node.subnodes:
self.assertEqual(subnode.state, nodedb.READY)
@skip("Disabled for early v3 development")
def test_subnode_deletion_success(self):
"""Test that subnodes are deleted with parent node"""
configfile = self.setup_config('subnodes.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
subnode_ids = []
node_ids = []
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='multi-fake',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.subnodes), 2)
for subnode in node.subnodes:
self.assertEqual(subnode.state, nodedb.READY)
subnode_ids.append(subnode.id)
node_ids.append(node.id)
for node_id in node_ids:
pool.deleteNode(node_id)
self.wait_for_threads()
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
for subnode_id in subnode_ids:
s = session.getSubNode(subnode_id)
self.assertIsNone(s)
@skip("Disabled for early v3 development")
def test_node_az(self):
"""Test that an image and node are created with az specified"""

View File

@ -28,7 +28,6 @@ labels:
- name: multi-fake
image: fake-nodepool
ready-script: multinode_setup.sh
subnodes: 2
min-ready: 2
providers:
- name: fake-provider