Merge "Support userdata for instances in openstack"

This commit is contained in:
Zuul 2019-01-23 14:36:19 +00:00 committed by Gerrit Code Review
commit 44ae87c310
7 changed files with 135 additions and 3 deletions

View File

@ -328,6 +328,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: debian-stretch
diskimage: debian-stretch
min-ram: 512
@ -336,6 +342,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: fedora-29
diskimage: fedora-29
min-ram: 1024
@ -344,6 +356,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-bionic
diskimage: ubuntu-bionic
min-ram: 512
@ -352,6 +370,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-trusty
diskimage: ubuntu-trusty
min-ram: 512
@ -360,6 +384,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-xenial
diskimage: ubuntu-xenial
min-ram: 512
@ -368,6 +398,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-423
diskimage: opensuse-423
min-ram: 512
@ -376,6 +412,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-150
diskimage: opensuse-150
min-ram: 512
@ -384,6 +426,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-tumbleweed
diskimage: opensuse-tumbleweed
min-ram: 512
@ -392,6 +440,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: gentoo-17-0-systemd
diskimage: gentoo-17-0-systemd
min-ram: 512
@ -400,6 +454,12 @@ providers:
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
diskimages:
- name: centos-7

View File

@ -921,6 +921,15 @@ Selecting the OpenStack driver adds the following options to the
``meta-data`` on the active server (e.g. within
``config-drive:openstack/latest/meta_data.json``)
.. attr:: userdata
:type: str
:default: None
A string of userdata for a node. Example usage is to install
cloud-init package on image which will apply the userdata.
Additional info about options in cloud-config:
https://cloudinit.readthedocs.io/en/latest/topics/examples.html
Static Driver
-------------

View File

@ -87,6 +87,7 @@ class ProviderLabel(ConfigValue):
self.boot_from_volume = False
self.volume_size = None
self.instance_properties = None
self.userdata = None
# The ProviderPool object that owns this label.
self.pool = None
@ -103,7 +104,8 @@ class ProviderLabel(ConfigValue):
other.console_log == self.console_log and
other.boot_from_volume == self.boot_from_volume and
other.volume_size == self.volume_size and
other.instance_properties == self.instance_properties)
other.instance_properties == self.instance_properties and
other.userdata == self.userdata)
return False
def __repr__(self):
@ -203,6 +205,7 @@ class ProviderPool(ConfigPool):
pl.volume_size = label.get('volume-size', 50)
pl.instance_properties = label.get('instance-properties',
None)
pl.userdata = label.get('userdata', None)
top_label = full_config.labels[pl.name]
top_label.pools.append(self)
@ -355,6 +358,7 @@ class OpenStackProviderConfig(ProviderConfig):
'boot-from-volume': bool,
'volume-size': int,
'instance-properties': dict,
'userdata': str,
}
label_min_ram = v.Schema({v.Required('min-ram'): int}, extra=True)

View File

@ -139,7 +139,8 @@ class OpenStackNodeLauncher(NodeLauncher):
security_groups=self.pool.security_groups,
boot_from_volume=self.label.boot_from_volume,
volume_size=self.label.volume_size,
instance_properties=self.label.instance_properties)
instance_properties=self.label.instance_properties,
userdata=self.label.userdata)
except openstack.cloud.exc.OpenStackCloudCreateException as e:
if e.resource_id:
self.node.external_id = e.resource_id

View File

@ -280,7 +280,7 @@ class OpenStackProvider(Provider):
nodepool_image_name=None,
networks=None, security_groups=None,
boot_from_volume=False, volume_size=50,
instance_properties=None):
instance_properties=None, userdata=None):
if not networks:
networks = []
if not isinstance(image, dict):
@ -303,6 +303,8 @@ class OpenStackProvider(Provider):
create_args['availability_zone'] = az
if security_groups:
create_args['security_groups'] = security_groups
if userdata:
create_args['userdata'] = userdata
nics = []
for network in networks:
net_id = self.findNetwork(network)['id']

View File

@ -56,6 +56,12 @@ providers:
instance-properties:
a_key: a_value
b_key: b_value
userdata: |
#cloud-config
password: password
chpasswd: { expire: False }
ssh_pwauth: True
hostname: test
- name: cloud2
driver: openstack

View File

@ -61,6 +61,36 @@ function sshintonode {
fi
}
function showserver {
name=$1
state='ready'
node_id=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f5 | tr -d ' '`
EXPECTED=$(mktemp)
RESULT=$(mktemp)
source /opt/stack/devstack/openrc admin admin
nova show $node_id | grep -Eo "user_data[ ]+.*|[ ]*$" | awk {'print $3'} |\
base64 --decode > $RESULT
cat <<EOF >$EXPECTED
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
EOF
diff $EXPECTED $RESULT
if [[ $? -ne 0 ]]; then
echo "*** Failed to find userdata on server!"
FAILURE_REASON="Failed to find userdata on server for $node"
echo "Expected userdata:"
cat $EXPECTED
echo "Found userdata:"
cat $RESULT
RETURN=1
fi
}
function checknm {
name=$1
state='ready'
@ -116,6 +146,8 @@ if [ ${NODEPOOL_PAUSE_CENTOS_7_DIB,,} = 'false' ]; then
sshintonode centos-7
# networkmanager check
checknm centos-7
# userdata check
showserver centos-7
fi
if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then
@ -125,6 +157,8 @@ if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then
waitfornode debian-stretch
# check ssh for root user
sshintonode debian-stretch
# userdata check
showserver debian-stretch
fi
if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then
@ -136,6 +170,8 @@ if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then
sshintonode fedora-29
# networkmanager check
checknm fedora-29
# userdata check
showserver fedora-29
fi
if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then
@ -145,6 +181,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then
waitfornode ubuntu-bionic
# check ssh for root user
sshintonode ubuntu-bionic
# userdata check
showserver ubuntu-bionic
fi
if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then
@ -154,6 +192,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then
waitfornode ubuntu-trusty
# check ssh for root user
sshintonode ubuntu-trusty
# userdata check
showserver ubuntu-trusty
fi
if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then
@ -163,6 +203,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then
waitfornode ubuntu-xenial
# check ssh for root user
sshintonode ubuntu-xenial
# userdata check
showserver ubuntu-xenial
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then
@ -172,6 +214,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then
waitfornode opensuse-423
# check ssh for root user
sshintonode opensuse-423
# userdata check
showserver opensuse-423
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then
# check that image built
@ -180,6 +224,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then
waitfornode opensuse-150
# check ssh for root user
sshintonode opensuse-150
# userdata check
showserver opensuse-150
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then
# check that image built
@ -188,6 +234,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then
waitfornode opensuse-tumbleweed
# check ssh for root user
sshintonode opensuse-tumbleweed
# userdata check
showserver opensuse-tumbleweed
fi
if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then
# check that image built
@ -196,6 +244,8 @@ if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then
waitfornode gentoo-17-0-systemd
# check ssh for root user
sshintonode gentoo-17-0-systemd
# userdata check
showserver gentoo-17-0-systemd
fi
set -o errexit